ring_buffer.c 101 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035
  1. /*
  2. * Generic ring buffer
  3. *
  4. * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com>
  5. */
  6. #include <linux/ring_buffer.h>
  7. #include <linux/trace_clock.h>
  8. #include <linux/ftrace_irq.h>
  9. #include <linux/spinlock.h>
  10. #include <linux/debugfs.h>
  11. #include <linux/uaccess.h>
  12. #include <linux/hardirq.h>
  13. #include <linux/kmemcheck.h>
  14. #include <linux/module.h>
  15. #include <linux/percpu.h>
  16. #include <linux/mutex.h>
  17. #include <linux/slab.h>
  18. #include <linux/init.h>
  19. #include <linux/hash.h>
  20. #include <linux/list.h>
  21. #include <linux/cpu.h>
  22. #include <linux/fs.h>
  23. #include <asm/local.h>
  24. #include "trace.h"
  25. /*
  26. * The ring buffer header is special. We must manually up keep it.
  27. */
  28. int ring_buffer_print_entry_header(struct trace_seq *s)
  29. {
  30. int ret;
  31. ret = trace_seq_printf(s, "# compressed entry header\n");
  32. ret = trace_seq_printf(s, "\ttype_len : 5 bits\n");
  33. ret = trace_seq_printf(s, "\ttime_delta : 27 bits\n");
  34. ret = trace_seq_printf(s, "\tarray : 32 bits\n");
  35. ret = trace_seq_printf(s, "\n");
  36. ret = trace_seq_printf(s, "\tpadding : type == %d\n",
  37. RINGBUF_TYPE_PADDING);
  38. ret = trace_seq_printf(s, "\ttime_extend : type == %d\n",
  39. RINGBUF_TYPE_TIME_EXTEND);
  40. ret = trace_seq_printf(s, "\tdata max type_len == %d\n",
  41. RINGBUF_TYPE_DATA_TYPE_LEN_MAX);
  42. return ret;
  43. }
  44. /*
  45. * The ring buffer is made up of a list of pages. A separate list of pages is
  46. * allocated for each CPU. A writer may only write to a buffer that is
  47. * associated with the CPU it is currently executing on. A reader may read
  48. * from any per cpu buffer.
  49. *
  50. * The reader is special. For each per cpu buffer, the reader has its own
  51. * reader page. When a reader has read the entire reader page, this reader
  52. * page is swapped with another page in the ring buffer.
  53. *
  54. * Now, as long as the writer is off the reader page, the reader can do what
  55. * ever it wants with that page. The writer will never write to that page
  56. * again (as long as it is out of the ring buffer).
  57. *
  58. * Here's some silly ASCII art.
  59. *
  60. * +------+
  61. * |reader| RING BUFFER
  62. * |page |
  63. * +------+ +---+ +---+ +---+
  64. * | |-->| |-->| |
  65. * +---+ +---+ +---+
  66. * ^ |
  67. * | |
  68. * +---------------+
  69. *
  70. *
  71. * +------+
  72. * |reader| RING BUFFER
  73. * |page |------------------v
  74. * +------+ +---+ +---+ +---+
  75. * | |-->| |-->| |
  76. * +---+ +---+ +---+
  77. * ^ |
  78. * | |
  79. * +---------------+
  80. *
  81. *
  82. * +------+
  83. * |reader| RING BUFFER
  84. * |page |------------------v
  85. * +------+ +---+ +---+ +---+
  86. * ^ | |-->| |-->| |
  87. * | +---+ +---+ +---+
  88. * | |
  89. * | |
  90. * +------------------------------+
  91. *
  92. *
  93. * +------+
  94. * |buffer| RING BUFFER
  95. * |page |------------------v
  96. * +------+ +---+ +---+ +---+
  97. * ^ | | | |-->| |
  98. * | New +---+ +---+ +---+
  99. * | Reader------^ |
  100. * | page |
  101. * +------------------------------+
  102. *
  103. *
  104. * After we make this swap, the reader can hand this page off to the splice
  105. * code and be done with it. It can even allocate a new page if it needs to
  106. * and swap that into the ring buffer.
  107. *
  108. * We will be using cmpxchg soon to make all this lockless.
  109. *
  110. */
  111. /*
  112. * A fast way to enable or disable all ring buffers is to
  113. * call tracing_on or tracing_off. Turning off the ring buffers
  114. * prevents all ring buffers from being recorded to.
  115. * Turning this switch on, makes it OK to write to the
  116. * ring buffer, if the ring buffer is enabled itself.
  117. *
  118. * There's three layers that must be on in order to write
  119. * to the ring buffer.
  120. *
  121. * 1) This global flag must be set.
  122. * 2) The ring buffer must be enabled for recording.
  123. * 3) The per cpu buffer must be enabled for recording.
  124. *
  125. * In case of an anomaly, this global flag has a bit set that
  126. * will permantly disable all ring buffers.
  127. */
  128. /*
  129. * Global flag to disable all recording to ring buffers
  130. * This has two bits: ON, DISABLED
  131. *
  132. * ON DISABLED
  133. * ---- ----------
  134. * 0 0 : ring buffers are off
  135. * 1 0 : ring buffers are on
  136. * X 1 : ring buffers are permanently disabled
  137. */
  138. enum {
  139. RB_BUFFERS_ON_BIT = 0,
  140. RB_BUFFERS_DISABLED_BIT = 1,
  141. };
  142. enum {
  143. RB_BUFFERS_ON = 1 << RB_BUFFERS_ON_BIT,
  144. RB_BUFFERS_DISABLED = 1 << RB_BUFFERS_DISABLED_BIT,
  145. };
  146. static unsigned long ring_buffer_flags __read_mostly = RB_BUFFERS_ON;
  147. #define BUF_PAGE_HDR_SIZE offsetof(struct buffer_data_page, data)
  148. /**
  149. * tracing_on - enable all tracing buffers
  150. *
  151. * This function enables all tracing buffers that may have been
  152. * disabled with tracing_off.
  153. */
  154. void tracing_on(void)
  155. {
  156. set_bit(RB_BUFFERS_ON_BIT, &ring_buffer_flags);
  157. }
  158. EXPORT_SYMBOL_GPL(tracing_on);
  159. /**
  160. * tracing_off - turn off all tracing buffers
  161. *
  162. * This function stops all tracing buffers from recording data.
  163. * It does not disable any overhead the tracers themselves may
  164. * be causing. This function simply causes all recording to
  165. * the ring buffers to fail.
  166. */
  167. void tracing_off(void)
  168. {
  169. clear_bit(RB_BUFFERS_ON_BIT, &ring_buffer_flags);
  170. }
  171. EXPORT_SYMBOL_GPL(tracing_off);
  172. /**
  173. * tracing_off_permanent - permanently disable ring buffers
  174. *
  175. * This function, once called, will disable all ring buffers
  176. * permanently.
  177. */
  178. void tracing_off_permanent(void)
  179. {
  180. set_bit(RB_BUFFERS_DISABLED_BIT, &ring_buffer_flags);
  181. }
  182. /**
  183. * tracing_is_on - show state of ring buffers enabled
  184. */
  185. int tracing_is_on(void)
  186. {
  187. return ring_buffer_flags == RB_BUFFERS_ON;
  188. }
  189. EXPORT_SYMBOL_GPL(tracing_is_on);
  190. #define RB_EVNT_HDR_SIZE (offsetof(struct ring_buffer_event, array))
  191. #define RB_ALIGNMENT 4U
  192. #define RB_MAX_SMALL_DATA (RB_ALIGNMENT * RINGBUF_TYPE_DATA_TYPE_LEN_MAX)
  193. #define RB_EVNT_MIN_SIZE 8U /* two 32bit words */
  194. #if !defined(CONFIG_64BIT) || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
  195. # define RB_FORCE_8BYTE_ALIGNMENT 0
  196. # define RB_ARCH_ALIGNMENT RB_ALIGNMENT
  197. #else
  198. # define RB_FORCE_8BYTE_ALIGNMENT 1
  199. # define RB_ARCH_ALIGNMENT 8U
  200. #endif
  201. /* define RINGBUF_TYPE_DATA for 'case RINGBUF_TYPE_DATA:' */
  202. #define RINGBUF_TYPE_DATA 0 ... RINGBUF_TYPE_DATA_TYPE_LEN_MAX
  203. enum {
  204. RB_LEN_TIME_EXTEND = 8,
  205. RB_LEN_TIME_STAMP = 16,
  206. };
  207. #define skip_time_extend(event) \
  208. ((struct ring_buffer_event *)((char *)event + RB_LEN_TIME_EXTEND))
  209. static inline int rb_null_event(struct ring_buffer_event *event)
  210. {
  211. return event->type_len == RINGBUF_TYPE_PADDING && !event->time_delta;
  212. }
  213. static void rb_event_set_padding(struct ring_buffer_event *event)
  214. {
  215. /* padding has a NULL time_delta */
  216. event->type_len = RINGBUF_TYPE_PADDING;
  217. event->time_delta = 0;
  218. }
  219. static unsigned
  220. rb_event_data_length(struct ring_buffer_event *event)
  221. {
  222. unsigned length;
  223. if (event->type_len)
  224. length = event->type_len * RB_ALIGNMENT;
  225. else
  226. length = event->array[0];
  227. return length + RB_EVNT_HDR_SIZE;
  228. }
  229. /*
  230. * Return the length of the given event. Will return
  231. * the length of the time extend if the event is a
  232. * time extend.
  233. */
  234. static inline unsigned
  235. rb_event_length(struct ring_buffer_event *event)
  236. {
  237. switch (event->type_len) {
  238. case RINGBUF_TYPE_PADDING:
  239. if (rb_null_event(event))
  240. /* undefined */
  241. return -1;
  242. return event->array[0] + RB_EVNT_HDR_SIZE;
  243. case RINGBUF_TYPE_TIME_EXTEND:
  244. return RB_LEN_TIME_EXTEND;
  245. case RINGBUF_TYPE_TIME_STAMP:
  246. return RB_LEN_TIME_STAMP;
  247. case RINGBUF_TYPE_DATA:
  248. return rb_event_data_length(event);
  249. default:
  250. BUG();
  251. }
  252. /* not hit */
  253. return 0;
  254. }
  255. /*
  256. * Return total length of time extend and data,
  257. * or just the event length for all other events.
  258. */
  259. static inline unsigned
  260. rb_event_ts_length(struct ring_buffer_event *event)
  261. {
  262. unsigned len = 0;
  263. if (event->type_len == RINGBUF_TYPE_TIME_EXTEND) {
  264. /* time extends include the data event after it */
  265. len = RB_LEN_TIME_EXTEND;
  266. event = skip_time_extend(event);
  267. }
  268. return len + rb_event_length(event);
  269. }
  270. /**
  271. * ring_buffer_event_length - return the length of the event
  272. * @event: the event to get the length of
  273. *
  274. * Returns the size of the data load of a data event.
  275. * If the event is something other than a data event, it
  276. * returns the size of the event itself. With the exception
  277. * of a TIME EXTEND, where it still returns the size of the
  278. * data load of the data event after it.
  279. */
  280. unsigned ring_buffer_event_length(struct ring_buffer_event *event)
  281. {
  282. unsigned length;
  283. if (event->type_len == RINGBUF_TYPE_TIME_EXTEND)
  284. event = skip_time_extend(event);
  285. length = rb_event_length(event);
  286. if (event->type_len > RINGBUF_TYPE_DATA_TYPE_LEN_MAX)
  287. return length;
  288. length -= RB_EVNT_HDR_SIZE;
  289. if (length > RB_MAX_SMALL_DATA + sizeof(event->array[0]))
  290. length -= sizeof(event->array[0]);
  291. return length;
  292. }
  293. EXPORT_SYMBOL_GPL(ring_buffer_event_length);
  294. /* inline for ring buffer fast paths */
  295. static void *
  296. rb_event_data(struct ring_buffer_event *event)
  297. {
  298. if (event->type_len == RINGBUF_TYPE_TIME_EXTEND)
  299. event = skip_time_extend(event);
  300. BUG_ON(event->type_len > RINGBUF_TYPE_DATA_TYPE_LEN_MAX);
  301. /* If length is in len field, then array[0] has the data */
  302. if (event->type_len)
  303. return (void *)&event->array[0];
  304. /* Otherwise length is in array[0] and array[1] has the data */
  305. return (void *)&event->array[1];
  306. }
  307. /**
  308. * ring_buffer_event_data - return the data of the event
  309. * @event: the event to get the data from
  310. */
  311. void *ring_buffer_event_data(struct ring_buffer_event *event)
  312. {
  313. return rb_event_data(event);
  314. }
  315. EXPORT_SYMBOL_GPL(ring_buffer_event_data);
  316. #define for_each_buffer_cpu(buffer, cpu) \
  317. for_each_cpu(cpu, buffer->cpumask)
  318. #define TS_SHIFT 27
  319. #define TS_MASK ((1ULL << TS_SHIFT) - 1)
  320. #define TS_DELTA_TEST (~TS_MASK)
  321. /* Flag when events were overwritten */
  322. #define RB_MISSED_EVENTS (1 << 31)
  323. /* Missed count stored at end */
  324. #define RB_MISSED_STORED (1 << 30)
  325. struct buffer_data_page {
  326. u64 time_stamp; /* page time stamp */
  327. local_t commit; /* write committed index */
  328. unsigned char data[]; /* data of buffer page */
  329. };
  330. /*
  331. * Note, the buffer_page list must be first. The buffer pages
  332. * are allocated in cache lines, which means that each buffer
  333. * page will be at the beginning of a cache line, and thus
  334. * the least significant bits will be zero. We use this to
  335. * add flags in the list struct pointers, to make the ring buffer
  336. * lockless.
  337. */
  338. struct buffer_page {
  339. struct list_head list; /* list of buffer pages */
  340. local_t write; /* index for next write */
  341. unsigned read; /* index for next read */
  342. local_t entries; /* entries on this page */
  343. unsigned long real_end; /* real end of data */
  344. struct buffer_data_page *page; /* Actual data page */
  345. };
  346. /*
  347. * The buffer page counters, write and entries, must be reset
  348. * atomically when crossing page boundaries. To synchronize this
  349. * update, two counters are inserted into the number. One is
  350. * the actual counter for the write position or count on the page.
  351. *
  352. * The other is a counter of updaters. Before an update happens
  353. * the update partition of the counter is incremented. This will
  354. * allow the updater to update the counter atomically.
  355. *
  356. * The counter is 20 bits, and the state data is 12.
  357. */
  358. #define RB_WRITE_MASK 0xfffff
  359. #define RB_WRITE_INTCNT (1 << 20)
  360. static void rb_init_page(struct buffer_data_page *bpage)
  361. {
  362. local_set(&bpage->commit, 0);
  363. }
  364. /**
  365. * ring_buffer_page_len - the size of data on the page.
  366. * @page: The page to read
  367. *
  368. * Returns the amount of data on the page, including buffer page header.
  369. */
  370. size_t ring_buffer_page_len(void *page)
  371. {
  372. return local_read(&((struct buffer_data_page *)page)->commit)
  373. + BUF_PAGE_HDR_SIZE;
  374. }
  375. /*
  376. * Also stolen from mm/slob.c. Thanks to Mathieu Desnoyers for pointing
  377. * this issue out.
  378. */
  379. static void free_buffer_page(struct buffer_page *bpage)
  380. {
  381. free_page((unsigned long)bpage->page);
  382. kfree(bpage);
  383. }
  384. /*
  385. * We need to fit the time_stamp delta into 27 bits.
  386. */
  387. static inline int test_time_stamp(u64 delta)
  388. {
  389. if (delta & TS_DELTA_TEST)
  390. return 1;
  391. return 0;
  392. }
  393. #define BUF_PAGE_SIZE (PAGE_SIZE - BUF_PAGE_HDR_SIZE)
  394. /* Max payload is BUF_PAGE_SIZE - header (8bytes) */
  395. #define BUF_MAX_DATA_SIZE (BUF_PAGE_SIZE - (sizeof(u32) * 2))
  396. /* Max number of timestamps that can fit on a page */
  397. #define RB_TIMESTAMPS_PER_PAGE (BUF_PAGE_SIZE / RB_LEN_TIME_EXTEND)
  398. int ring_buffer_print_page_header(struct trace_seq *s)
  399. {
  400. struct buffer_data_page field;
  401. int ret;
  402. ret = trace_seq_printf(s, "\tfield: u64 timestamp;\t"
  403. "offset:0;\tsize:%u;\tsigned:%u;\n",
  404. (unsigned int)sizeof(field.time_stamp),
  405. (unsigned int)is_signed_type(u64));
  406. ret = trace_seq_printf(s, "\tfield: local_t commit;\t"
  407. "offset:%u;\tsize:%u;\tsigned:%u;\n",
  408. (unsigned int)offsetof(typeof(field), commit),
  409. (unsigned int)sizeof(field.commit),
  410. (unsigned int)is_signed_type(long));
  411. ret = trace_seq_printf(s, "\tfield: int overwrite;\t"
  412. "offset:%u;\tsize:%u;\tsigned:%u;\n",
  413. (unsigned int)offsetof(typeof(field), commit),
  414. 1,
  415. (unsigned int)is_signed_type(long));
  416. ret = trace_seq_printf(s, "\tfield: char data;\t"
  417. "offset:%u;\tsize:%u;\tsigned:%u;\n",
  418. (unsigned int)offsetof(typeof(field), data),
  419. (unsigned int)BUF_PAGE_SIZE,
  420. (unsigned int)is_signed_type(char));
  421. return ret;
  422. }
  423. /*
  424. * head_page == tail_page && head == tail then buffer is empty.
  425. */
  426. struct ring_buffer_per_cpu {
  427. int cpu;
  428. atomic_t record_disabled;
  429. struct ring_buffer *buffer;
  430. spinlock_t reader_lock; /* serialize readers */
  431. arch_spinlock_t lock;
  432. struct lock_class_key lock_key;
  433. struct list_head *pages;
  434. struct buffer_page *head_page; /* read from head */
  435. struct buffer_page *tail_page; /* write to tail */
  436. struct buffer_page *commit_page; /* committed pages */
  437. struct buffer_page *reader_page;
  438. unsigned long lost_events;
  439. unsigned long last_overrun;
  440. local_t commit_overrun;
  441. local_t overrun;
  442. local_t entries;
  443. local_t committing;
  444. local_t commits;
  445. unsigned long read;
  446. u64 write_stamp;
  447. u64 read_stamp;
  448. };
  449. struct ring_buffer {
  450. unsigned pages;
  451. unsigned flags;
  452. int cpus;
  453. atomic_t record_disabled;
  454. cpumask_var_t cpumask;
  455. struct lock_class_key *reader_lock_key;
  456. struct mutex mutex;
  457. struct ring_buffer_per_cpu **buffers;
  458. #ifdef CONFIG_HOTPLUG_CPU
  459. struct notifier_block cpu_notify;
  460. #endif
  461. u64 (*clock)(void);
  462. };
  463. struct ring_buffer_iter {
  464. struct ring_buffer_per_cpu *cpu_buffer;
  465. unsigned long head;
  466. struct buffer_page *head_page;
  467. struct buffer_page *cache_reader_page;
  468. unsigned long cache_read;
  469. u64 read_stamp;
  470. };
  471. /* buffer may be either ring_buffer or ring_buffer_per_cpu */
  472. #define RB_WARN_ON(b, cond) \
  473. ({ \
  474. int _____ret = unlikely(cond); \
  475. if (_____ret) { \
  476. if (__same_type(*(b), struct ring_buffer_per_cpu)) { \
  477. struct ring_buffer_per_cpu *__b = \
  478. (void *)b; \
  479. atomic_inc(&__b->buffer->record_disabled); \
  480. } else \
  481. atomic_inc(&b->record_disabled); \
  482. WARN_ON(1); \
  483. } \
  484. _____ret; \
  485. })
  486. /* Up this if you want to test the TIME_EXTENTS and normalization */
  487. #define DEBUG_SHIFT 0
  488. static inline u64 rb_time_stamp(struct ring_buffer *buffer)
  489. {
  490. /* shift to debug/test normalization and TIME_EXTENTS */
  491. return buffer->clock() << DEBUG_SHIFT;
  492. }
  493. u64 ring_buffer_time_stamp(struct ring_buffer *buffer, int cpu)
  494. {
  495. u64 time;
  496. preempt_disable_notrace();
  497. time = rb_time_stamp(buffer);
  498. preempt_enable_no_resched_notrace();
  499. return time;
  500. }
  501. EXPORT_SYMBOL_GPL(ring_buffer_time_stamp);
  502. void ring_buffer_normalize_time_stamp(struct ring_buffer *buffer,
  503. int cpu, u64 *ts)
  504. {
  505. /* Just stupid testing the normalize function and deltas */
  506. *ts >>= DEBUG_SHIFT;
  507. }
  508. EXPORT_SYMBOL_GPL(ring_buffer_normalize_time_stamp);
  509. /*
  510. * Making the ring buffer lockless makes things tricky.
  511. * Although writes only happen on the CPU that they are on,
  512. * and they only need to worry about interrupts. Reads can
  513. * happen on any CPU.
  514. *
  515. * The reader page is always off the ring buffer, but when the
  516. * reader finishes with a page, it needs to swap its page with
  517. * a new one from the buffer. The reader needs to take from
  518. * the head (writes go to the tail). But if a writer is in overwrite
  519. * mode and wraps, it must push the head page forward.
  520. *
  521. * Here lies the problem.
  522. *
  523. * The reader must be careful to replace only the head page, and
  524. * not another one. As described at the top of the file in the
  525. * ASCII art, the reader sets its old page to point to the next
  526. * page after head. It then sets the page after head to point to
  527. * the old reader page. But if the writer moves the head page
  528. * during this operation, the reader could end up with the tail.
  529. *
  530. * We use cmpxchg to help prevent this race. We also do something
  531. * special with the page before head. We set the LSB to 1.
  532. *
  533. * When the writer must push the page forward, it will clear the
  534. * bit that points to the head page, move the head, and then set
  535. * the bit that points to the new head page.
  536. *
  537. * We also don't want an interrupt coming in and moving the head
  538. * page on another writer. Thus we use the second LSB to catch
  539. * that too. Thus:
  540. *
  541. * head->list->prev->next bit 1 bit 0
  542. * ------- -------
  543. * Normal page 0 0
  544. * Points to head page 0 1
  545. * New head page 1 0
  546. *
  547. * Note we can not trust the prev pointer of the head page, because:
  548. *
  549. * +----+ +-----+ +-----+
  550. * | |------>| T |---X--->| N |
  551. * | |<------| | | |
  552. * +----+ +-----+ +-----+
  553. * ^ ^ |
  554. * | +-----+ | |
  555. * +----------| R |----------+ |
  556. * | |<-----------+
  557. * +-----+
  558. *
  559. * Key: ---X--> HEAD flag set in pointer
  560. * T Tail page
  561. * R Reader page
  562. * N Next page
  563. *
  564. * (see __rb_reserve_next() to see where this happens)
  565. *
  566. * What the above shows is that the reader just swapped out
  567. * the reader page with a page in the buffer, but before it
  568. * could make the new header point back to the new page added
  569. * it was preempted by a writer. The writer moved forward onto
  570. * the new page added by the reader and is about to move forward
  571. * again.
  572. *
  573. * You can see, it is legitimate for the previous pointer of
  574. * the head (or any page) not to point back to itself. But only
  575. * temporarially.
  576. */
  577. #define RB_PAGE_NORMAL 0UL
  578. #define RB_PAGE_HEAD 1UL
  579. #define RB_PAGE_UPDATE 2UL
  580. #define RB_FLAG_MASK 3UL
  581. /* PAGE_MOVED is not part of the mask */
  582. #define RB_PAGE_MOVED 4UL
  583. /*
  584. * rb_list_head - remove any bit
  585. */
  586. static struct list_head *rb_list_head(struct list_head *list)
  587. {
  588. unsigned long val = (unsigned long)list;
  589. return (struct list_head *)(val & ~RB_FLAG_MASK);
  590. }
  591. /*
  592. * rb_is_head_page - test if the given page is the head page
  593. *
  594. * Because the reader may move the head_page pointer, we can
  595. * not trust what the head page is (it may be pointing to
  596. * the reader page). But if the next page is a header page,
  597. * its flags will be non zero.
  598. */
  599. static int inline
  600. rb_is_head_page(struct ring_buffer_per_cpu *cpu_buffer,
  601. struct buffer_page *page, struct list_head *list)
  602. {
  603. unsigned long val;
  604. val = (unsigned long)list->next;
  605. if ((val & ~RB_FLAG_MASK) != (unsigned long)&page->list)
  606. return RB_PAGE_MOVED;
  607. return val & RB_FLAG_MASK;
  608. }
  609. /*
  610. * rb_is_reader_page
  611. *
  612. * The unique thing about the reader page, is that, if the
  613. * writer is ever on it, the previous pointer never points
  614. * back to the reader page.
  615. */
  616. static int rb_is_reader_page(struct buffer_page *page)
  617. {
  618. struct list_head *list = page->list.prev;
  619. return rb_list_head(list->next) != &page->list;
  620. }
  621. /*
  622. * rb_set_list_to_head - set a list_head to be pointing to head.
  623. */
  624. static void rb_set_list_to_head(struct ring_buffer_per_cpu *cpu_buffer,
  625. struct list_head *list)
  626. {
  627. unsigned long *ptr;
  628. ptr = (unsigned long *)&list->next;
  629. *ptr |= RB_PAGE_HEAD;
  630. *ptr &= ~RB_PAGE_UPDATE;
  631. }
  632. /*
  633. * rb_head_page_activate - sets up head page
  634. */
  635. static void rb_head_page_activate(struct ring_buffer_per_cpu *cpu_buffer)
  636. {
  637. struct buffer_page *head;
  638. head = cpu_buffer->head_page;
  639. if (!head)
  640. return;
  641. /*
  642. * Set the previous list pointer to have the HEAD flag.
  643. */
  644. rb_set_list_to_head(cpu_buffer, head->list.prev);
  645. }
  646. static void rb_list_head_clear(struct list_head *list)
  647. {
  648. unsigned long *ptr = (unsigned long *)&list->next;
  649. *ptr &= ~RB_FLAG_MASK;
  650. }
  651. /*
  652. * rb_head_page_dactivate - clears head page ptr (for free list)
  653. */
  654. static void
  655. rb_head_page_deactivate(struct ring_buffer_per_cpu *cpu_buffer)
  656. {
  657. struct list_head *hd;
  658. /* Go through the whole list and clear any pointers found. */
  659. rb_list_head_clear(cpu_buffer->pages);
  660. list_for_each(hd, cpu_buffer->pages)
  661. rb_list_head_clear(hd);
  662. }
  663. static int rb_head_page_set(struct ring_buffer_per_cpu *cpu_buffer,
  664. struct buffer_page *head,
  665. struct buffer_page *prev,
  666. int old_flag, int new_flag)
  667. {
  668. struct list_head *list;
  669. unsigned long val = (unsigned long)&head->list;
  670. unsigned long ret;
  671. list = &prev->list;
  672. val &= ~RB_FLAG_MASK;
  673. ret = cmpxchg((unsigned long *)&list->next,
  674. val | old_flag, val | new_flag);
  675. /* check if the reader took the page */
  676. if ((ret & ~RB_FLAG_MASK) != val)
  677. return RB_PAGE_MOVED;
  678. return ret & RB_FLAG_MASK;
  679. }
  680. static int rb_head_page_set_update(struct ring_buffer_per_cpu *cpu_buffer,
  681. struct buffer_page *head,
  682. struct buffer_page *prev,
  683. int old_flag)
  684. {
  685. return rb_head_page_set(cpu_buffer, head, prev,
  686. old_flag, RB_PAGE_UPDATE);
  687. }
  688. static int rb_head_page_set_head(struct ring_buffer_per_cpu *cpu_buffer,
  689. struct buffer_page *head,
  690. struct buffer_page *prev,
  691. int old_flag)
  692. {
  693. return rb_head_page_set(cpu_buffer, head, prev,
  694. old_flag, RB_PAGE_HEAD);
  695. }
  696. static int rb_head_page_set_normal(struct ring_buffer_per_cpu *cpu_buffer,
  697. struct buffer_page *head,
  698. struct buffer_page *prev,
  699. int old_flag)
  700. {
  701. return rb_head_page_set(cpu_buffer, head, prev,
  702. old_flag, RB_PAGE_NORMAL);
  703. }
  704. static inline void rb_inc_page(struct ring_buffer_per_cpu *cpu_buffer,
  705. struct buffer_page **bpage)
  706. {
  707. struct list_head *p = rb_list_head((*bpage)->list.next);
  708. *bpage = list_entry(p, struct buffer_page, list);
  709. }
  710. static struct buffer_page *
  711. rb_set_head_page(struct ring_buffer_per_cpu *cpu_buffer)
  712. {
  713. struct buffer_page *head;
  714. struct buffer_page *page;
  715. struct list_head *list;
  716. int i;
  717. if (RB_WARN_ON(cpu_buffer, !cpu_buffer->head_page))
  718. return NULL;
  719. /* sanity check */
  720. list = cpu_buffer->pages;
  721. if (RB_WARN_ON(cpu_buffer, rb_list_head(list->prev->next) != list))
  722. return NULL;
  723. page = head = cpu_buffer->head_page;
  724. /*
  725. * It is possible that the writer moves the header behind
  726. * where we started, and we miss in one loop.
  727. * A second loop should grab the header, but we'll do
  728. * three loops just because I'm paranoid.
  729. */
  730. for (i = 0; i < 3; i++) {
  731. do {
  732. if (rb_is_head_page(cpu_buffer, page, page->list.prev)) {
  733. cpu_buffer->head_page = page;
  734. return page;
  735. }
  736. rb_inc_page(cpu_buffer, &page);
  737. } while (page != head);
  738. }
  739. RB_WARN_ON(cpu_buffer, 1);
  740. return NULL;
  741. }
  742. static int rb_head_page_replace(struct buffer_page *old,
  743. struct buffer_page *new)
  744. {
  745. unsigned long *ptr = (unsigned long *)&old->list.prev->next;
  746. unsigned long val;
  747. unsigned long ret;
  748. val = *ptr & ~RB_FLAG_MASK;
  749. val |= RB_PAGE_HEAD;
  750. ret = cmpxchg(ptr, val, (unsigned long)&new->list);
  751. return ret == val;
  752. }
  753. /*
  754. * rb_tail_page_update - move the tail page forward
  755. *
  756. * Returns 1 if moved tail page, 0 if someone else did.
  757. */
  758. static int rb_tail_page_update(struct ring_buffer_per_cpu *cpu_buffer,
  759. struct buffer_page *tail_page,
  760. struct buffer_page *next_page)
  761. {
  762. struct buffer_page *old_tail;
  763. unsigned long old_entries;
  764. unsigned long old_write;
  765. int ret = 0;
  766. /*
  767. * The tail page now needs to be moved forward.
  768. *
  769. * We need to reset the tail page, but without messing
  770. * with possible erasing of data brought in by interrupts
  771. * that have moved the tail page and are currently on it.
  772. *
  773. * We add a counter to the write field to denote this.
  774. */
  775. old_write = local_add_return(RB_WRITE_INTCNT, &next_page->write);
  776. old_entries = local_add_return(RB_WRITE_INTCNT, &next_page->entries);
  777. /*
  778. * Just make sure we have seen our old_write and synchronize
  779. * with any interrupts that come in.
  780. */
  781. barrier();
  782. /*
  783. * If the tail page is still the same as what we think
  784. * it is, then it is up to us to update the tail
  785. * pointer.
  786. */
  787. if (tail_page == cpu_buffer->tail_page) {
  788. /* Zero the write counter */
  789. unsigned long val = old_write & ~RB_WRITE_MASK;
  790. unsigned long eval = old_entries & ~RB_WRITE_MASK;
  791. /*
  792. * This will only succeed if an interrupt did
  793. * not come in and change it. In which case, we
  794. * do not want to modify it.
  795. *
  796. * We add (void) to let the compiler know that we do not care
  797. * about the return value of these functions. We use the
  798. * cmpxchg to only update if an interrupt did not already
  799. * do it for us. If the cmpxchg fails, we don't care.
  800. */
  801. (void)local_cmpxchg(&next_page->write, old_write, val);
  802. (void)local_cmpxchg(&next_page->entries, old_entries, eval);
  803. /*
  804. * No need to worry about races with clearing out the commit.
  805. * it only can increment when a commit takes place. But that
  806. * only happens in the outer most nested commit.
  807. */
  808. local_set(&next_page->page->commit, 0);
  809. old_tail = cmpxchg(&cpu_buffer->tail_page,
  810. tail_page, next_page);
  811. if (old_tail == tail_page)
  812. ret = 1;
  813. }
  814. return ret;
  815. }
  816. static int rb_check_bpage(struct ring_buffer_per_cpu *cpu_buffer,
  817. struct buffer_page *bpage)
  818. {
  819. unsigned long val = (unsigned long)bpage;
  820. if (RB_WARN_ON(cpu_buffer, val & RB_FLAG_MASK))
  821. return 1;
  822. return 0;
  823. }
  824. /**
  825. * rb_check_list - make sure a pointer to a list has the last bits zero
  826. */
  827. static int rb_check_list(struct ring_buffer_per_cpu *cpu_buffer,
  828. struct list_head *list)
  829. {
  830. if (RB_WARN_ON(cpu_buffer, rb_list_head(list->prev) != list->prev))
  831. return 1;
  832. if (RB_WARN_ON(cpu_buffer, rb_list_head(list->next) != list->next))
  833. return 1;
  834. return 0;
  835. }
  836. /**
  837. * check_pages - integrity check of buffer pages
  838. * @cpu_buffer: CPU buffer with pages to test
  839. *
  840. * As a safety measure we check to make sure the data pages have not
  841. * been corrupted.
  842. */
  843. static int rb_check_pages(struct ring_buffer_per_cpu *cpu_buffer)
  844. {
  845. struct list_head *head = cpu_buffer->pages;
  846. struct buffer_page *bpage, *tmp;
  847. rb_head_page_deactivate(cpu_buffer);
  848. if (RB_WARN_ON(cpu_buffer, head->next->prev != head))
  849. return -1;
  850. if (RB_WARN_ON(cpu_buffer, head->prev->next != head))
  851. return -1;
  852. if (rb_check_list(cpu_buffer, head))
  853. return -1;
  854. list_for_each_entry_safe(bpage, tmp, head, list) {
  855. if (RB_WARN_ON(cpu_buffer,
  856. bpage->list.next->prev != &bpage->list))
  857. return -1;
  858. if (RB_WARN_ON(cpu_buffer,
  859. bpage->list.prev->next != &bpage->list))
  860. return -1;
  861. if (rb_check_list(cpu_buffer, &bpage->list))
  862. return -1;
  863. }
  864. rb_head_page_activate(cpu_buffer);
  865. return 0;
  866. }
  867. static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer,
  868. unsigned nr_pages)
  869. {
  870. struct buffer_page *bpage, *tmp;
  871. unsigned long addr;
  872. LIST_HEAD(pages);
  873. unsigned i;
  874. WARN_ON(!nr_pages);
  875. for (i = 0; i < nr_pages; i++) {
  876. bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()),
  877. GFP_KERNEL, cpu_to_node(cpu_buffer->cpu));
  878. if (!bpage)
  879. goto free_pages;
  880. rb_check_bpage(cpu_buffer, bpage);
  881. list_add(&bpage->list, &pages);
  882. addr = __get_free_page(GFP_KERNEL);
  883. if (!addr)
  884. goto free_pages;
  885. bpage->page = (void *)addr;
  886. rb_init_page(bpage->page);
  887. }
  888. /*
  889. * The ring buffer page list is a circular list that does not
  890. * start and end with a list head. All page list items point to
  891. * other pages.
  892. */
  893. cpu_buffer->pages = pages.next;
  894. list_del(&pages);
  895. rb_check_pages(cpu_buffer);
  896. return 0;
  897. free_pages:
  898. list_for_each_entry_safe(bpage, tmp, &pages, list) {
  899. list_del_init(&bpage->list);
  900. free_buffer_page(bpage);
  901. }
  902. return -ENOMEM;
  903. }
  904. static struct ring_buffer_per_cpu *
  905. rb_allocate_cpu_buffer(struct ring_buffer *buffer, int cpu)
  906. {
  907. struct ring_buffer_per_cpu *cpu_buffer;
  908. struct buffer_page *bpage;
  909. unsigned long addr;
  910. int ret;
  911. cpu_buffer = kzalloc_node(ALIGN(sizeof(*cpu_buffer), cache_line_size()),
  912. GFP_KERNEL, cpu_to_node(cpu));
  913. if (!cpu_buffer)
  914. return NULL;
  915. cpu_buffer->cpu = cpu;
  916. cpu_buffer->buffer = buffer;
  917. spin_lock_init(&cpu_buffer->reader_lock);
  918. lockdep_set_class(&cpu_buffer->reader_lock, buffer->reader_lock_key);
  919. cpu_buffer->lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
  920. bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()),
  921. GFP_KERNEL, cpu_to_node(cpu));
  922. if (!bpage)
  923. goto fail_free_buffer;
  924. rb_check_bpage(cpu_buffer, bpage);
  925. cpu_buffer->reader_page = bpage;
  926. addr = __get_free_page(GFP_KERNEL);
  927. if (!addr)
  928. goto fail_free_reader;
  929. bpage->page = (void *)addr;
  930. rb_init_page(bpage->page);
  931. INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
  932. ret = rb_allocate_pages(cpu_buffer, buffer->pages);
  933. if (ret < 0)
  934. goto fail_free_reader;
  935. cpu_buffer->head_page
  936. = list_entry(cpu_buffer->pages, struct buffer_page, list);
  937. cpu_buffer->tail_page = cpu_buffer->commit_page = cpu_buffer->head_page;
  938. rb_head_page_activate(cpu_buffer);
  939. return cpu_buffer;
  940. fail_free_reader:
  941. free_buffer_page(cpu_buffer->reader_page);
  942. fail_free_buffer:
  943. kfree(cpu_buffer);
  944. return NULL;
  945. }
  946. static void rb_free_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer)
  947. {
  948. struct list_head *head = cpu_buffer->pages;
  949. struct buffer_page *bpage, *tmp;
  950. free_buffer_page(cpu_buffer->reader_page);
  951. rb_head_page_deactivate(cpu_buffer);
  952. if (head) {
  953. list_for_each_entry_safe(bpage, tmp, head, list) {
  954. list_del_init(&bpage->list);
  955. free_buffer_page(bpage);
  956. }
  957. bpage = list_entry(head, struct buffer_page, list);
  958. free_buffer_page(bpage);
  959. }
  960. kfree(cpu_buffer);
  961. }
  962. #ifdef CONFIG_HOTPLUG_CPU
  963. static int rb_cpu_notify(struct notifier_block *self,
  964. unsigned long action, void *hcpu);
  965. #endif
  966. /**
  967. * ring_buffer_alloc - allocate a new ring_buffer
  968. * @size: the size in bytes per cpu that is needed.
  969. * @flags: attributes to set for the ring buffer.
  970. *
  971. * Currently the only flag that is available is the RB_FL_OVERWRITE
  972. * flag. This flag means that the buffer will overwrite old data
  973. * when the buffer wraps. If this flag is not set, the buffer will
  974. * drop data when the tail hits the head.
  975. */
  976. struct ring_buffer *__ring_buffer_alloc(unsigned long size, unsigned flags,
  977. struct lock_class_key *key)
  978. {
  979. struct ring_buffer *buffer;
  980. int bsize;
  981. int cpu;
  982. /* keep it in its own cache line */
  983. buffer = kzalloc(ALIGN(sizeof(*buffer), cache_line_size()),
  984. GFP_KERNEL);
  985. if (!buffer)
  986. return NULL;
  987. if (!alloc_cpumask_var(&buffer->cpumask, GFP_KERNEL))
  988. goto fail_free_buffer;
  989. buffer->pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
  990. buffer->flags = flags;
  991. buffer->clock = trace_clock_local;
  992. buffer->reader_lock_key = key;
  993. /* need at least two pages */
  994. if (buffer->pages < 2)
  995. buffer->pages = 2;
  996. /*
  997. * In case of non-hotplug cpu, if the ring-buffer is allocated
  998. * in early initcall, it will not be notified of secondary cpus.
  999. * In that off case, we need to allocate for all possible cpus.
  1000. */
  1001. #ifdef CONFIG_HOTPLUG_CPU
  1002. get_online_cpus();
  1003. cpumask_copy(buffer->cpumask, cpu_online_mask);
  1004. #else
  1005. cpumask_copy(buffer->cpumask, cpu_possible_mask);
  1006. #endif
  1007. buffer->cpus = nr_cpu_ids;
  1008. bsize = sizeof(void *) * nr_cpu_ids;
  1009. buffer->buffers = kzalloc(ALIGN(bsize, cache_line_size()),
  1010. GFP_KERNEL);
  1011. if (!buffer->buffers)
  1012. goto fail_free_cpumask;
  1013. for_each_buffer_cpu(buffer, cpu) {
  1014. buffer->buffers[cpu] =
  1015. rb_allocate_cpu_buffer(buffer, cpu);
  1016. if (!buffer->buffers[cpu])
  1017. goto fail_free_buffers;
  1018. }
  1019. #ifdef CONFIG_HOTPLUG_CPU
  1020. buffer->cpu_notify.notifier_call = rb_cpu_notify;
  1021. buffer->cpu_notify.priority = 0;
  1022. register_cpu_notifier(&buffer->cpu_notify);
  1023. #endif
  1024. put_online_cpus();
  1025. mutex_init(&buffer->mutex);
  1026. return buffer;
  1027. fail_free_buffers:
  1028. for_each_buffer_cpu(buffer, cpu) {
  1029. if (buffer->buffers[cpu])
  1030. rb_free_cpu_buffer(buffer->buffers[cpu]);
  1031. }
  1032. kfree(buffer->buffers);
  1033. fail_free_cpumask:
  1034. free_cpumask_var(buffer->cpumask);
  1035. put_online_cpus();
  1036. fail_free_buffer:
  1037. kfree(buffer);
  1038. return NULL;
  1039. }
  1040. EXPORT_SYMBOL_GPL(__ring_buffer_alloc);
  1041. /**
  1042. * ring_buffer_free - free a ring buffer.
  1043. * @buffer: the buffer to free.
  1044. */
  1045. void
  1046. ring_buffer_free(struct ring_buffer *buffer)
  1047. {
  1048. int cpu;
  1049. get_online_cpus();
  1050. #ifdef CONFIG_HOTPLUG_CPU
  1051. unregister_cpu_notifier(&buffer->cpu_notify);
  1052. #endif
  1053. for_each_buffer_cpu(buffer, cpu)
  1054. rb_free_cpu_buffer(buffer->buffers[cpu]);
  1055. put_online_cpus();
  1056. kfree(buffer->buffers);
  1057. free_cpumask_var(buffer->cpumask);
  1058. kfree(buffer);
  1059. }
  1060. EXPORT_SYMBOL_GPL(ring_buffer_free);
  1061. void ring_buffer_set_clock(struct ring_buffer *buffer,
  1062. u64 (*clock)(void))
  1063. {
  1064. buffer->clock = clock;
  1065. }
  1066. static void rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer);
  1067. static void
  1068. rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned nr_pages)
  1069. {
  1070. struct buffer_page *bpage;
  1071. struct list_head *p;
  1072. unsigned i;
  1073. spin_lock_irq(&cpu_buffer->reader_lock);
  1074. rb_head_page_deactivate(cpu_buffer);
  1075. for (i = 0; i < nr_pages; i++) {
  1076. if (RB_WARN_ON(cpu_buffer, list_empty(cpu_buffer->pages)))
  1077. goto out;
  1078. p = cpu_buffer->pages->next;
  1079. bpage = list_entry(p, struct buffer_page, list);
  1080. list_del_init(&bpage->list);
  1081. free_buffer_page(bpage);
  1082. }
  1083. if (RB_WARN_ON(cpu_buffer, list_empty(cpu_buffer->pages)))
  1084. goto out;
  1085. rb_reset_cpu(cpu_buffer);
  1086. rb_check_pages(cpu_buffer);
  1087. out:
  1088. spin_unlock_irq(&cpu_buffer->reader_lock);
  1089. }
  1090. static void
  1091. rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer,
  1092. struct list_head *pages, unsigned nr_pages)
  1093. {
  1094. struct buffer_page *bpage;
  1095. struct list_head *p;
  1096. unsigned i;
  1097. spin_lock_irq(&cpu_buffer->reader_lock);
  1098. rb_head_page_deactivate(cpu_buffer);
  1099. for (i = 0; i < nr_pages; i++) {
  1100. if (RB_WARN_ON(cpu_buffer, list_empty(pages)))
  1101. goto out;
  1102. p = pages->next;
  1103. bpage = list_entry(p, struct buffer_page, list);
  1104. list_del_init(&bpage->list);
  1105. list_add_tail(&bpage->list, cpu_buffer->pages);
  1106. }
  1107. rb_reset_cpu(cpu_buffer);
  1108. rb_check_pages(cpu_buffer);
  1109. out:
  1110. spin_unlock_irq(&cpu_buffer->reader_lock);
  1111. }
  1112. /**
  1113. * ring_buffer_resize - resize the ring buffer
  1114. * @buffer: the buffer to resize.
  1115. * @size: the new size.
  1116. *
  1117. * Minimum size is 2 * BUF_PAGE_SIZE.
  1118. *
  1119. * Returns -1 on failure.
  1120. */
  1121. int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size)
  1122. {
  1123. struct ring_buffer_per_cpu *cpu_buffer;
  1124. unsigned nr_pages, rm_pages, new_pages;
  1125. struct buffer_page *bpage, *tmp;
  1126. unsigned long buffer_size;
  1127. unsigned long addr;
  1128. LIST_HEAD(pages);
  1129. int i, cpu;
  1130. /*
  1131. * Always succeed at resizing a non-existent buffer:
  1132. */
  1133. if (!buffer)
  1134. return size;
  1135. size = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
  1136. size *= BUF_PAGE_SIZE;
  1137. buffer_size = buffer->pages * BUF_PAGE_SIZE;
  1138. /* we need a minimum of two pages */
  1139. if (size < BUF_PAGE_SIZE * 2)
  1140. size = BUF_PAGE_SIZE * 2;
  1141. if (size == buffer_size)
  1142. return size;
  1143. atomic_inc(&buffer->record_disabled);
  1144. /* Make sure all writers are done with this buffer. */
  1145. synchronize_sched();
  1146. mutex_lock(&buffer->mutex);
  1147. get_online_cpus();
  1148. nr_pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
  1149. if (size < buffer_size) {
  1150. /* easy case, just free pages */
  1151. if (RB_WARN_ON(buffer, nr_pages >= buffer->pages))
  1152. goto out_fail;
  1153. rm_pages = buffer->pages - nr_pages;
  1154. for_each_buffer_cpu(buffer, cpu) {
  1155. cpu_buffer = buffer->buffers[cpu];
  1156. rb_remove_pages(cpu_buffer, rm_pages);
  1157. }
  1158. goto out;
  1159. }
  1160. /*
  1161. * This is a bit more difficult. We only want to add pages
  1162. * when we can allocate enough for all CPUs. We do this
  1163. * by allocating all the pages and storing them on a local
  1164. * link list. If we succeed in our allocation, then we
  1165. * add these pages to the cpu_buffers. Otherwise we just free
  1166. * them all and return -ENOMEM;
  1167. */
  1168. if (RB_WARN_ON(buffer, nr_pages <= buffer->pages))
  1169. goto out_fail;
  1170. new_pages = nr_pages - buffer->pages;
  1171. for_each_buffer_cpu(buffer, cpu) {
  1172. for (i = 0; i < new_pages; i++) {
  1173. bpage = kzalloc_node(ALIGN(sizeof(*bpage),
  1174. cache_line_size()),
  1175. GFP_KERNEL, cpu_to_node(cpu));
  1176. if (!bpage)
  1177. goto free_pages;
  1178. list_add(&bpage->list, &pages);
  1179. addr = __get_free_page(GFP_KERNEL);
  1180. if (!addr)
  1181. goto free_pages;
  1182. bpage->page = (void *)addr;
  1183. rb_init_page(bpage->page);
  1184. }
  1185. }
  1186. for_each_buffer_cpu(buffer, cpu) {
  1187. cpu_buffer = buffer->buffers[cpu];
  1188. rb_insert_pages(cpu_buffer, &pages, new_pages);
  1189. }
  1190. if (RB_WARN_ON(buffer, !list_empty(&pages)))
  1191. goto out_fail;
  1192. out:
  1193. buffer->pages = nr_pages;
  1194. put_online_cpus();
  1195. mutex_unlock(&buffer->mutex);
  1196. atomic_dec(&buffer->record_disabled);
  1197. return size;
  1198. free_pages:
  1199. list_for_each_entry_safe(bpage, tmp, &pages, list) {
  1200. list_del_init(&bpage->list);
  1201. free_buffer_page(bpage);
  1202. }
  1203. put_online_cpus();
  1204. mutex_unlock(&buffer->mutex);
  1205. atomic_dec(&buffer->record_disabled);
  1206. return -ENOMEM;
  1207. /*
  1208. * Something went totally wrong, and we are too paranoid
  1209. * to even clean up the mess.
  1210. */
  1211. out_fail:
  1212. put_online_cpus();
  1213. mutex_unlock(&buffer->mutex);
  1214. atomic_dec(&buffer->record_disabled);
  1215. return -1;
  1216. }
  1217. EXPORT_SYMBOL_GPL(ring_buffer_resize);
  1218. static inline void *
  1219. __rb_data_page_index(struct buffer_data_page *bpage, unsigned index)
  1220. {
  1221. return bpage->data + index;
  1222. }
  1223. static inline void *__rb_page_index(struct buffer_page *bpage, unsigned index)
  1224. {
  1225. return bpage->page->data + index;
  1226. }
  1227. static inline struct ring_buffer_event *
  1228. rb_reader_event(struct ring_buffer_per_cpu *cpu_buffer)
  1229. {
  1230. return __rb_page_index(cpu_buffer->reader_page,
  1231. cpu_buffer->reader_page->read);
  1232. }
  1233. static inline struct ring_buffer_event *
  1234. rb_iter_head_event(struct ring_buffer_iter *iter)
  1235. {
  1236. return __rb_page_index(iter->head_page, iter->head);
  1237. }
  1238. static inline unsigned long rb_page_write(struct buffer_page *bpage)
  1239. {
  1240. return local_read(&bpage->write) & RB_WRITE_MASK;
  1241. }
  1242. static inline unsigned rb_page_commit(struct buffer_page *bpage)
  1243. {
  1244. return local_read(&bpage->page->commit);
  1245. }
  1246. static inline unsigned long rb_page_entries(struct buffer_page *bpage)
  1247. {
  1248. return local_read(&bpage->entries) & RB_WRITE_MASK;
  1249. }
  1250. /* Size is determined by what has been commited */
  1251. static inline unsigned rb_page_size(struct buffer_page *bpage)
  1252. {
  1253. return rb_page_commit(bpage);
  1254. }
  1255. static inline unsigned
  1256. rb_commit_index(struct ring_buffer_per_cpu *cpu_buffer)
  1257. {
  1258. return rb_page_commit(cpu_buffer->commit_page);
  1259. }
  1260. static inline unsigned
  1261. rb_event_index(struct ring_buffer_event *event)
  1262. {
  1263. unsigned long addr = (unsigned long)event;
  1264. return (addr & ~PAGE_MASK) - BUF_PAGE_HDR_SIZE;
  1265. }
  1266. static inline int
  1267. rb_event_is_commit(struct ring_buffer_per_cpu *cpu_buffer,
  1268. struct ring_buffer_event *event)
  1269. {
  1270. unsigned long addr = (unsigned long)event;
  1271. unsigned long index;
  1272. index = rb_event_index(event);
  1273. addr &= PAGE_MASK;
  1274. return cpu_buffer->commit_page->page == (void *)addr &&
  1275. rb_commit_index(cpu_buffer) == index;
  1276. }
  1277. static void
  1278. rb_set_commit_to_write(struct ring_buffer_per_cpu *cpu_buffer)
  1279. {
  1280. unsigned long max_count;
  1281. /*
  1282. * We only race with interrupts and NMIs on this CPU.
  1283. * If we own the commit event, then we can commit
  1284. * all others that interrupted us, since the interruptions
  1285. * are in stack format (they finish before they come
  1286. * back to us). This allows us to do a simple loop to
  1287. * assign the commit to the tail.
  1288. */
  1289. again:
  1290. max_count = cpu_buffer->buffer->pages * 100;
  1291. while (cpu_buffer->commit_page != cpu_buffer->tail_page) {
  1292. if (RB_WARN_ON(cpu_buffer, !(--max_count)))
  1293. return;
  1294. if (RB_WARN_ON(cpu_buffer,
  1295. rb_is_reader_page(cpu_buffer->tail_page)))
  1296. return;
  1297. local_set(&cpu_buffer->commit_page->page->commit,
  1298. rb_page_write(cpu_buffer->commit_page));
  1299. rb_inc_page(cpu_buffer, &cpu_buffer->commit_page);
  1300. cpu_buffer->write_stamp =
  1301. cpu_buffer->commit_page->page->time_stamp;
  1302. /* add barrier to keep gcc from optimizing too much */
  1303. barrier();
  1304. }
  1305. while (rb_commit_index(cpu_buffer) !=
  1306. rb_page_write(cpu_buffer->commit_page)) {
  1307. local_set(&cpu_buffer->commit_page->page->commit,
  1308. rb_page_write(cpu_buffer->commit_page));
  1309. RB_WARN_ON(cpu_buffer,
  1310. local_read(&cpu_buffer->commit_page->page->commit) &
  1311. ~RB_WRITE_MASK);
  1312. barrier();
  1313. }
  1314. /* again, keep gcc from optimizing */
  1315. barrier();
  1316. /*
  1317. * If an interrupt came in just after the first while loop
  1318. * and pushed the tail page forward, we will be left with
  1319. * a dangling commit that will never go forward.
  1320. */
  1321. if (unlikely(cpu_buffer->commit_page != cpu_buffer->tail_page))
  1322. goto again;
  1323. }
  1324. static void rb_reset_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
  1325. {
  1326. cpu_buffer->read_stamp = cpu_buffer->reader_page->page->time_stamp;
  1327. cpu_buffer->reader_page->read = 0;
  1328. }
  1329. static void rb_inc_iter(struct ring_buffer_iter *iter)
  1330. {
  1331. struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
  1332. /*
  1333. * The iterator could be on the reader page (it starts there).
  1334. * But the head could have moved, since the reader was
  1335. * found. Check for this case and assign the iterator
  1336. * to the head page instead of next.
  1337. */
  1338. if (iter->head_page == cpu_buffer->reader_page)
  1339. iter->head_page = rb_set_head_page(cpu_buffer);
  1340. else
  1341. rb_inc_page(cpu_buffer, &iter->head_page);
  1342. iter->read_stamp = iter->head_page->page->time_stamp;
  1343. iter->head = 0;
  1344. }
  1345. /* Slow path, do not inline */
  1346. static noinline struct ring_buffer_event *
  1347. rb_add_time_stamp(struct ring_buffer_event *event, u64 delta)
  1348. {
  1349. event->type_len = RINGBUF_TYPE_TIME_EXTEND;
  1350. /* Not the first event on the page? */
  1351. if (rb_event_index(event)) {
  1352. event->time_delta = delta & TS_MASK;
  1353. event->array[0] = delta >> TS_SHIFT;
  1354. } else {
  1355. /* nope, just zero it */
  1356. event->time_delta = 0;
  1357. event->array[0] = 0;
  1358. }
  1359. return skip_time_extend(event);
  1360. }
  1361. /**
  1362. * ring_buffer_update_event - update event type and data
  1363. * @event: the even to update
  1364. * @type: the type of event
  1365. * @length: the size of the event field in the ring buffer
  1366. *
  1367. * Update the type and data fields of the event. The length
  1368. * is the actual size that is written to the ring buffer,
  1369. * and with this, we can determine what to place into the
  1370. * data field.
  1371. */
  1372. static void
  1373. rb_update_event(struct ring_buffer_per_cpu *cpu_buffer,
  1374. struct ring_buffer_event *event, unsigned length,
  1375. int add_timestamp, u64 delta)
  1376. {
  1377. /* Only a commit updates the timestamp */
  1378. if (unlikely(!rb_event_is_commit(cpu_buffer, event)))
  1379. delta = 0;
  1380. /*
  1381. * If we need to add a timestamp, then we
  1382. * add it to the start of the resevered space.
  1383. */
  1384. if (unlikely(add_timestamp)) {
  1385. event = rb_add_time_stamp(event, delta);
  1386. length -= RB_LEN_TIME_EXTEND;
  1387. delta = 0;
  1388. }
  1389. event->time_delta = delta;
  1390. length -= RB_EVNT_HDR_SIZE;
  1391. if (length > RB_MAX_SMALL_DATA || RB_FORCE_8BYTE_ALIGNMENT) {
  1392. event->type_len = 0;
  1393. event->array[0] = length;
  1394. } else
  1395. event->type_len = DIV_ROUND_UP(length, RB_ALIGNMENT);
  1396. }
  1397. /*
  1398. * rb_handle_head_page - writer hit the head page
  1399. *
  1400. * Returns: +1 to retry page
  1401. * 0 to continue
  1402. * -1 on error
  1403. */
  1404. static int
  1405. rb_handle_head_page(struct ring_buffer_per_cpu *cpu_buffer,
  1406. struct buffer_page *tail_page,
  1407. struct buffer_page *next_page)
  1408. {
  1409. struct buffer_page *new_head;
  1410. int entries;
  1411. int type;
  1412. int ret;
  1413. entries = rb_page_entries(next_page);
  1414. /*
  1415. * The hard part is here. We need to move the head
  1416. * forward, and protect against both readers on
  1417. * other CPUs and writers coming in via interrupts.
  1418. */
  1419. type = rb_head_page_set_update(cpu_buffer, next_page, tail_page,
  1420. RB_PAGE_HEAD);
  1421. /*
  1422. * type can be one of four:
  1423. * NORMAL - an interrupt already moved it for us
  1424. * HEAD - we are the first to get here.
  1425. * UPDATE - we are the interrupt interrupting
  1426. * a current move.
  1427. * MOVED - a reader on another CPU moved the next
  1428. * pointer to its reader page. Give up
  1429. * and try again.
  1430. */
  1431. switch (type) {
  1432. case RB_PAGE_HEAD:
  1433. /*
  1434. * We changed the head to UPDATE, thus
  1435. * it is our responsibility to update
  1436. * the counters.
  1437. */
  1438. local_add(entries, &cpu_buffer->overrun);
  1439. /*
  1440. * The entries will be zeroed out when we move the
  1441. * tail page.
  1442. */
  1443. /* still more to do */
  1444. break;
  1445. case RB_PAGE_UPDATE:
  1446. /*
  1447. * This is an interrupt that interrupt the
  1448. * previous update. Still more to do.
  1449. */
  1450. break;
  1451. case RB_PAGE_NORMAL:
  1452. /*
  1453. * An interrupt came in before the update
  1454. * and processed this for us.
  1455. * Nothing left to do.
  1456. */
  1457. return 1;
  1458. case RB_PAGE_MOVED:
  1459. /*
  1460. * The reader is on another CPU and just did
  1461. * a swap with our next_page.
  1462. * Try again.
  1463. */
  1464. return 1;
  1465. default:
  1466. RB_WARN_ON(cpu_buffer, 1); /* WTF??? */
  1467. return -1;
  1468. }
  1469. /*
  1470. * Now that we are here, the old head pointer is
  1471. * set to UPDATE. This will keep the reader from
  1472. * swapping the head page with the reader page.
  1473. * The reader (on another CPU) will spin till
  1474. * we are finished.
  1475. *
  1476. * We just need to protect against interrupts
  1477. * doing the job. We will set the next pointer
  1478. * to HEAD. After that, we set the old pointer
  1479. * to NORMAL, but only if it was HEAD before.
  1480. * otherwise we are an interrupt, and only
  1481. * want the outer most commit to reset it.
  1482. */
  1483. new_head = next_page;
  1484. rb_inc_page(cpu_buffer, &new_head);
  1485. ret = rb_head_page_set_head(cpu_buffer, new_head, next_page,
  1486. RB_PAGE_NORMAL);
  1487. /*
  1488. * Valid returns are:
  1489. * HEAD - an interrupt came in and already set it.
  1490. * NORMAL - One of two things:
  1491. * 1) We really set it.
  1492. * 2) A bunch of interrupts came in and moved
  1493. * the page forward again.
  1494. */
  1495. switch (ret) {
  1496. case RB_PAGE_HEAD:
  1497. case RB_PAGE_NORMAL:
  1498. /* OK */
  1499. break;
  1500. default:
  1501. RB_WARN_ON(cpu_buffer, 1);
  1502. return -1;
  1503. }
  1504. /*
  1505. * It is possible that an interrupt came in,
  1506. * set the head up, then more interrupts came in
  1507. * and moved it again. When we get back here,
  1508. * the page would have been set to NORMAL but we
  1509. * just set it back to HEAD.
  1510. *
  1511. * How do you detect this? Well, if that happened
  1512. * the tail page would have moved.
  1513. */
  1514. if (ret == RB_PAGE_NORMAL) {
  1515. /*
  1516. * If the tail had moved passed next, then we need
  1517. * to reset the pointer.
  1518. */
  1519. if (cpu_buffer->tail_page != tail_page &&
  1520. cpu_buffer->tail_page != next_page)
  1521. rb_head_page_set_normal(cpu_buffer, new_head,
  1522. next_page,
  1523. RB_PAGE_HEAD);
  1524. }
  1525. /*
  1526. * If this was the outer most commit (the one that
  1527. * changed the original pointer from HEAD to UPDATE),
  1528. * then it is up to us to reset it to NORMAL.
  1529. */
  1530. if (type == RB_PAGE_HEAD) {
  1531. ret = rb_head_page_set_normal(cpu_buffer, next_page,
  1532. tail_page,
  1533. RB_PAGE_UPDATE);
  1534. if (RB_WARN_ON(cpu_buffer,
  1535. ret != RB_PAGE_UPDATE))
  1536. return -1;
  1537. }
  1538. return 0;
  1539. }
  1540. static unsigned rb_calculate_event_length(unsigned length)
  1541. {
  1542. struct ring_buffer_event event; /* Used only for sizeof array */
  1543. /* zero length can cause confusions */
  1544. if (!length)
  1545. length = 1;
  1546. if (length > RB_MAX_SMALL_DATA || RB_FORCE_8BYTE_ALIGNMENT)
  1547. length += sizeof(event.array[0]);
  1548. length += RB_EVNT_HDR_SIZE;
  1549. length = ALIGN(length, RB_ARCH_ALIGNMENT);
  1550. return length;
  1551. }
  1552. static inline void
  1553. rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
  1554. struct buffer_page *tail_page,
  1555. unsigned long tail, unsigned long length)
  1556. {
  1557. struct ring_buffer_event *event;
  1558. /*
  1559. * Only the event that crossed the page boundary
  1560. * must fill the old tail_page with padding.
  1561. */
  1562. if (tail >= BUF_PAGE_SIZE) {
  1563. /*
  1564. * If the page was filled, then we still need
  1565. * to update the real_end. Reset it to zero
  1566. * and the reader will ignore it.
  1567. */
  1568. if (tail == BUF_PAGE_SIZE)
  1569. tail_page->real_end = 0;
  1570. local_sub(length, &tail_page->write);
  1571. return;
  1572. }
  1573. event = __rb_page_index(tail_page, tail);
  1574. kmemcheck_annotate_bitfield(event, bitfield);
  1575. /*
  1576. * Save the original length to the meta data.
  1577. * This will be used by the reader to add lost event
  1578. * counter.
  1579. */
  1580. tail_page->real_end = tail;
  1581. /*
  1582. * If this event is bigger than the minimum size, then
  1583. * we need to be careful that we don't subtract the
  1584. * write counter enough to allow another writer to slip
  1585. * in on this page.
  1586. * We put in a discarded commit instead, to make sure
  1587. * that this space is not used again.
  1588. *
  1589. * If we are less than the minimum size, we don't need to
  1590. * worry about it.
  1591. */
  1592. if (tail > (BUF_PAGE_SIZE - RB_EVNT_MIN_SIZE)) {
  1593. /* No room for any events */
  1594. /* Mark the rest of the page with padding */
  1595. rb_event_set_padding(event);
  1596. /* Set the write back to the previous setting */
  1597. local_sub(length, &tail_page->write);
  1598. return;
  1599. }
  1600. /* Put in a discarded event */
  1601. event->array[0] = (BUF_PAGE_SIZE - tail) - RB_EVNT_HDR_SIZE;
  1602. event->type_len = RINGBUF_TYPE_PADDING;
  1603. /* time delta must be non zero */
  1604. event->time_delta = 1;
  1605. /* Set write to end of buffer */
  1606. length = (tail + length) - BUF_PAGE_SIZE;
  1607. local_sub(length, &tail_page->write);
  1608. }
  1609. /*
  1610. * This is the slow path, force gcc not to inline it.
  1611. */
  1612. static noinline struct ring_buffer_event *
  1613. rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
  1614. unsigned long length, unsigned long tail,
  1615. struct buffer_page *tail_page, u64 ts)
  1616. {
  1617. struct buffer_page *commit_page = cpu_buffer->commit_page;
  1618. struct ring_buffer *buffer = cpu_buffer->buffer;
  1619. struct buffer_page *next_page;
  1620. int ret;
  1621. next_page = tail_page;
  1622. rb_inc_page(cpu_buffer, &next_page);
  1623. /*
  1624. * If for some reason, we had an interrupt storm that made
  1625. * it all the way around the buffer, bail, and warn
  1626. * about it.
  1627. */
  1628. if (unlikely(next_page == commit_page)) {
  1629. local_inc(&cpu_buffer->commit_overrun);
  1630. goto out_reset;
  1631. }
  1632. /*
  1633. * This is where the fun begins!
  1634. *
  1635. * We are fighting against races between a reader that
  1636. * could be on another CPU trying to swap its reader
  1637. * page with the buffer head.
  1638. *
  1639. * We are also fighting against interrupts coming in and
  1640. * moving the head or tail on us as well.
  1641. *
  1642. * If the next page is the head page then we have filled
  1643. * the buffer, unless the commit page is still on the
  1644. * reader page.
  1645. */
  1646. if (rb_is_head_page(cpu_buffer, next_page, &tail_page->list)) {
  1647. /*
  1648. * If the commit is not on the reader page, then
  1649. * move the header page.
  1650. */
  1651. if (!rb_is_reader_page(cpu_buffer->commit_page)) {
  1652. /*
  1653. * If we are not in overwrite mode,
  1654. * this is easy, just stop here.
  1655. */
  1656. if (!(buffer->flags & RB_FL_OVERWRITE))
  1657. goto out_reset;
  1658. ret = rb_handle_head_page(cpu_buffer,
  1659. tail_page,
  1660. next_page);
  1661. if (ret < 0)
  1662. goto out_reset;
  1663. if (ret)
  1664. goto out_again;
  1665. } else {
  1666. /*
  1667. * We need to be careful here too. The
  1668. * commit page could still be on the reader
  1669. * page. We could have a small buffer, and
  1670. * have filled up the buffer with events
  1671. * from interrupts and such, and wrapped.
  1672. *
  1673. * Note, if the tail page is also the on the
  1674. * reader_page, we let it move out.
  1675. */
  1676. if (unlikely((cpu_buffer->commit_page !=
  1677. cpu_buffer->tail_page) &&
  1678. (cpu_buffer->commit_page ==
  1679. cpu_buffer->reader_page))) {
  1680. local_inc(&cpu_buffer->commit_overrun);
  1681. goto out_reset;
  1682. }
  1683. }
  1684. }
  1685. ret = rb_tail_page_update(cpu_buffer, tail_page, next_page);
  1686. if (ret) {
  1687. /*
  1688. * Nested commits always have zero deltas, so
  1689. * just reread the time stamp
  1690. */
  1691. ts = rb_time_stamp(buffer);
  1692. next_page->page->time_stamp = ts;
  1693. }
  1694. out_again:
  1695. rb_reset_tail(cpu_buffer, tail_page, tail, length);
  1696. /* fail and let the caller try again */
  1697. return ERR_PTR(-EAGAIN);
  1698. out_reset:
  1699. /* reset write */
  1700. rb_reset_tail(cpu_buffer, tail_page, tail, length);
  1701. return NULL;
  1702. }
  1703. static struct ring_buffer_event *
  1704. __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
  1705. unsigned long length, u64 ts,
  1706. u64 delta, int add_timestamp)
  1707. {
  1708. struct buffer_page *tail_page;
  1709. struct ring_buffer_event *event;
  1710. unsigned long tail, write;
  1711. /*
  1712. * If the time delta since the last event is too big to
  1713. * hold in the time field of the event, then we append a
  1714. * TIME EXTEND event ahead of the data event.
  1715. */
  1716. if (unlikely(add_timestamp))
  1717. length += RB_LEN_TIME_EXTEND;
  1718. tail_page = cpu_buffer->tail_page;
  1719. write = local_add_return(length, &tail_page->write);
  1720. /* set write to only the index of the write */
  1721. write &= RB_WRITE_MASK;
  1722. tail = write - length;
  1723. /* See if we shot pass the end of this buffer page */
  1724. if (unlikely(write > BUF_PAGE_SIZE))
  1725. return rb_move_tail(cpu_buffer, length, tail,
  1726. tail_page, ts);
  1727. /* We reserved something on the buffer */
  1728. event = __rb_page_index(tail_page, tail);
  1729. kmemcheck_annotate_bitfield(event, bitfield);
  1730. rb_update_event(cpu_buffer, event, length, add_timestamp, delta);
  1731. local_inc(&tail_page->entries);
  1732. /*
  1733. * If this is the first commit on the page, then update
  1734. * its timestamp.
  1735. */
  1736. if (!tail)
  1737. tail_page->page->time_stamp = ts;
  1738. return event;
  1739. }
  1740. static inline int
  1741. rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
  1742. struct ring_buffer_event *event)
  1743. {
  1744. unsigned long new_index, old_index;
  1745. struct buffer_page *bpage;
  1746. unsigned long index;
  1747. unsigned long addr;
  1748. new_index = rb_event_index(event);
  1749. old_index = new_index + rb_event_ts_length(event);
  1750. addr = (unsigned long)event;
  1751. addr &= PAGE_MASK;
  1752. bpage = cpu_buffer->tail_page;
  1753. if (bpage->page == (void *)addr && rb_page_write(bpage) == old_index) {
  1754. unsigned long write_mask =
  1755. local_read(&bpage->write) & ~RB_WRITE_MASK;
  1756. /*
  1757. * This is on the tail page. It is possible that
  1758. * a write could come in and move the tail page
  1759. * and write to the next page. That is fine
  1760. * because we just shorten what is on this page.
  1761. */
  1762. old_index += write_mask;
  1763. new_index += write_mask;
  1764. index = local_cmpxchg(&bpage->write, old_index, new_index);
  1765. if (index == old_index)
  1766. return 1;
  1767. }
  1768. /* could not discard */
  1769. return 0;
  1770. }
  1771. static void rb_start_commit(struct ring_buffer_per_cpu *cpu_buffer)
  1772. {
  1773. local_inc(&cpu_buffer->committing);
  1774. local_inc(&cpu_buffer->commits);
  1775. }
  1776. static inline void rb_end_commit(struct ring_buffer_per_cpu *cpu_buffer)
  1777. {
  1778. unsigned long commits;
  1779. if (RB_WARN_ON(cpu_buffer,
  1780. !local_read(&cpu_buffer->committing)))
  1781. return;
  1782. again:
  1783. commits = local_read(&cpu_buffer->commits);
  1784. /* synchronize with interrupts */
  1785. barrier();
  1786. if (local_read(&cpu_buffer->committing) == 1)
  1787. rb_set_commit_to_write(cpu_buffer);
  1788. local_dec(&cpu_buffer->committing);
  1789. /* synchronize with interrupts */
  1790. barrier();
  1791. /*
  1792. * Need to account for interrupts coming in between the
  1793. * updating of the commit page and the clearing of the
  1794. * committing counter.
  1795. */
  1796. if (unlikely(local_read(&cpu_buffer->commits) != commits) &&
  1797. !local_read(&cpu_buffer->committing)) {
  1798. local_inc(&cpu_buffer->committing);
  1799. goto again;
  1800. }
  1801. }
  1802. static struct ring_buffer_event *
  1803. rb_reserve_next_event(struct ring_buffer *buffer,
  1804. struct ring_buffer_per_cpu *cpu_buffer,
  1805. unsigned long length)
  1806. {
  1807. struct ring_buffer_event *event;
  1808. u64 ts, delta;
  1809. int nr_loops = 0;
  1810. int add_timestamp;
  1811. u64 diff;
  1812. rb_start_commit(cpu_buffer);
  1813. #ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
  1814. /*
  1815. * Due to the ability to swap a cpu buffer from a buffer
  1816. * it is possible it was swapped before we committed.
  1817. * (committing stops a swap). We check for it here and
  1818. * if it happened, we have to fail the write.
  1819. */
  1820. barrier();
  1821. if (unlikely(ACCESS_ONCE(cpu_buffer->buffer) != buffer)) {
  1822. local_dec(&cpu_buffer->committing);
  1823. local_dec(&cpu_buffer->commits);
  1824. return NULL;
  1825. }
  1826. #endif
  1827. length = rb_calculate_event_length(length);
  1828. again:
  1829. add_timestamp = 0;
  1830. delta = 0;
  1831. /*
  1832. * We allow for interrupts to reenter here and do a trace.
  1833. * If one does, it will cause this original code to loop
  1834. * back here. Even with heavy interrupts happening, this
  1835. * should only happen a few times in a row. If this happens
  1836. * 1000 times in a row, there must be either an interrupt
  1837. * storm or we have something buggy.
  1838. * Bail!
  1839. */
  1840. if (RB_WARN_ON(cpu_buffer, ++nr_loops > 1000))
  1841. goto out_fail;
  1842. ts = rb_time_stamp(cpu_buffer->buffer);
  1843. diff = ts - cpu_buffer->write_stamp;
  1844. /* make sure this diff is calculated here */
  1845. barrier();
  1846. /* Did the write stamp get updated already? */
  1847. if (likely(ts >= cpu_buffer->write_stamp)) {
  1848. delta = diff;
  1849. if (unlikely(test_time_stamp(delta))) {
  1850. WARN_ONCE(delta > (1ULL << 59),
  1851. KERN_WARNING "Delta way too big! %llu ts=%llu write stamp = %llu\n",
  1852. (unsigned long long)delta,
  1853. (unsigned long long)ts,
  1854. (unsigned long long)cpu_buffer->write_stamp);
  1855. add_timestamp = 1;
  1856. }
  1857. }
  1858. event = __rb_reserve_next(cpu_buffer, length, ts,
  1859. delta, add_timestamp);
  1860. if (unlikely(PTR_ERR(event) == -EAGAIN))
  1861. goto again;
  1862. if (!event)
  1863. goto out_fail;
  1864. return event;
  1865. out_fail:
  1866. rb_end_commit(cpu_buffer);
  1867. return NULL;
  1868. }
  1869. #ifdef CONFIG_TRACING
  1870. #define TRACE_RECURSIVE_DEPTH 16
  1871. /* Keep this code out of the fast path cache */
  1872. static noinline void trace_recursive_fail(void)
  1873. {
  1874. /* Disable all tracing before we do anything else */
  1875. tracing_off_permanent();
  1876. printk_once(KERN_WARNING "Tracing recursion: depth[%ld]:"
  1877. "HC[%lu]:SC[%lu]:NMI[%lu]\n",
  1878. current->trace_recursion,
  1879. hardirq_count() >> HARDIRQ_SHIFT,
  1880. softirq_count() >> SOFTIRQ_SHIFT,
  1881. in_nmi());
  1882. WARN_ON_ONCE(1);
  1883. }
  1884. static inline int trace_recursive_lock(void)
  1885. {
  1886. current->trace_recursion++;
  1887. if (likely(current->trace_recursion < TRACE_RECURSIVE_DEPTH))
  1888. return 0;
  1889. trace_recursive_fail();
  1890. return -1;
  1891. }
  1892. static inline void trace_recursive_unlock(void)
  1893. {
  1894. WARN_ON_ONCE(!current->trace_recursion);
  1895. current->trace_recursion--;
  1896. }
  1897. #else
  1898. #define trace_recursive_lock() (0)
  1899. #define trace_recursive_unlock() do { } while (0)
  1900. #endif
  1901. /**
  1902. * ring_buffer_lock_reserve - reserve a part of the buffer
  1903. * @buffer: the ring buffer to reserve from
  1904. * @length: the length of the data to reserve (excluding event header)
  1905. *
  1906. * Returns a reseverd event on the ring buffer to copy directly to.
  1907. * The user of this interface will need to get the body to write into
  1908. * and can use the ring_buffer_event_data() interface.
  1909. *
  1910. * The length is the length of the data needed, not the event length
  1911. * which also includes the event header.
  1912. *
  1913. * Must be paired with ring_buffer_unlock_commit, unless NULL is returned.
  1914. * If NULL is returned, then nothing has been allocated or locked.
  1915. */
  1916. struct ring_buffer_event *
  1917. ring_buffer_lock_reserve(struct ring_buffer *buffer, unsigned long length)
  1918. {
  1919. struct ring_buffer_per_cpu *cpu_buffer;
  1920. struct ring_buffer_event *event;
  1921. int cpu;
  1922. if (ring_buffer_flags != RB_BUFFERS_ON)
  1923. return NULL;
  1924. /* If we are tracing schedule, we don't want to recurse */
  1925. preempt_disable_notrace();
  1926. if (atomic_read(&buffer->record_disabled))
  1927. goto out_nocheck;
  1928. if (trace_recursive_lock())
  1929. goto out_nocheck;
  1930. cpu = raw_smp_processor_id();
  1931. if (!cpumask_test_cpu(cpu, buffer->cpumask))
  1932. goto out;
  1933. cpu_buffer = buffer->buffers[cpu];
  1934. if (atomic_read(&cpu_buffer->record_disabled))
  1935. goto out;
  1936. if (length > BUF_MAX_DATA_SIZE)
  1937. goto out;
  1938. event = rb_reserve_next_event(buffer, cpu_buffer, length);
  1939. if (!event)
  1940. goto out;
  1941. return event;
  1942. out:
  1943. trace_recursive_unlock();
  1944. out_nocheck:
  1945. preempt_enable_notrace();
  1946. return NULL;
  1947. }
  1948. EXPORT_SYMBOL_GPL(ring_buffer_lock_reserve);
  1949. static void
  1950. rb_update_write_stamp(struct ring_buffer_per_cpu *cpu_buffer,
  1951. struct ring_buffer_event *event)
  1952. {
  1953. u64 delta;
  1954. /*
  1955. * The event first in the commit queue updates the
  1956. * time stamp.
  1957. */
  1958. if (rb_event_is_commit(cpu_buffer, event)) {
  1959. /*
  1960. * A commit event that is first on a page
  1961. * updates the write timestamp with the page stamp
  1962. */
  1963. if (!rb_event_index(event))
  1964. cpu_buffer->write_stamp =
  1965. cpu_buffer->commit_page->page->time_stamp;
  1966. else if (event->type_len == RINGBUF_TYPE_TIME_EXTEND) {
  1967. delta = event->array[0];
  1968. delta <<= TS_SHIFT;
  1969. delta += event->time_delta;
  1970. cpu_buffer->write_stamp += delta;
  1971. } else
  1972. cpu_buffer->write_stamp += event->time_delta;
  1973. }
  1974. }
  1975. static void rb_commit(struct ring_buffer_per_cpu *cpu_buffer,
  1976. struct ring_buffer_event *event)
  1977. {
  1978. local_inc(&cpu_buffer->entries);
  1979. rb_update_write_stamp(cpu_buffer, event);
  1980. rb_end_commit(cpu_buffer);
  1981. }
  1982. /**
  1983. * ring_buffer_unlock_commit - commit a reserved
  1984. * @buffer: The buffer to commit to
  1985. * @event: The event pointer to commit.
  1986. *
  1987. * This commits the data to the ring buffer, and releases any locks held.
  1988. *
  1989. * Must be paired with ring_buffer_lock_reserve.
  1990. */
  1991. int ring_buffer_unlock_commit(struct ring_buffer *buffer,
  1992. struct ring_buffer_event *event)
  1993. {
  1994. struct ring_buffer_per_cpu *cpu_buffer;
  1995. int cpu = raw_smp_processor_id();
  1996. cpu_buffer = buffer->buffers[cpu];
  1997. rb_commit(cpu_buffer, event);
  1998. trace_recursive_unlock();
  1999. preempt_enable_notrace();
  2000. return 0;
  2001. }
  2002. EXPORT_SYMBOL_GPL(ring_buffer_unlock_commit);
  2003. static inline void rb_event_discard(struct ring_buffer_event *event)
  2004. {
  2005. if (event->type_len == RINGBUF_TYPE_TIME_EXTEND)
  2006. event = skip_time_extend(event);
  2007. /* array[0] holds the actual length for the discarded event */
  2008. event->array[0] = rb_event_data_length(event) - RB_EVNT_HDR_SIZE;
  2009. event->type_len = RINGBUF_TYPE_PADDING;
  2010. /* time delta must be non zero */
  2011. if (!event->time_delta)
  2012. event->time_delta = 1;
  2013. }
  2014. /*
  2015. * Decrement the entries to the page that an event is on.
  2016. * The event does not even need to exist, only the pointer
  2017. * to the page it is on. This may only be called before the commit
  2018. * takes place.
  2019. */
  2020. static inline void
  2021. rb_decrement_entry(struct ring_buffer_per_cpu *cpu_buffer,
  2022. struct ring_buffer_event *event)
  2023. {
  2024. unsigned long addr = (unsigned long)event;
  2025. struct buffer_page *bpage = cpu_buffer->commit_page;
  2026. struct buffer_page *start;
  2027. addr &= PAGE_MASK;
  2028. /* Do the likely case first */
  2029. if (likely(bpage->page == (void *)addr)) {
  2030. local_dec(&bpage->entries);
  2031. return;
  2032. }
  2033. /*
  2034. * Because the commit page may be on the reader page we
  2035. * start with the next page and check the end loop there.
  2036. */
  2037. rb_inc_page(cpu_buffer, &bpage);
  2038. start = bpage;
  2039. do {
  2040. if (bpage->page == (void *)addr) {
  2041. local_dec(&bpage->entries);
  2042. return;
  2043. }
  2044. rb_inc_page(cpu_buffer, &bpage);
  2045. } while (bpage != start);
  2046. /* commit not part of this buffer?? */
  2047. RB_WARN_ON(cpu_buffer, 1);
  2048. }
  2049. /**
  2050. * ring_buffer_commit_discard - discard an event that has not been committed
  2051. * @buffer: the ring buffer
  2052. * @event: non committed event to discard
  2053. *
  2054. * Sometimes an event that is in the ring buffer needs to be ignored.
  2055. * This function lets the user discard an event in the ring buffer
  2056. * and then that event will not be read later.
  2057. *
  2058. * This function only works if it is called before the the item has been
  2059. * committed. It will try to free the event from the ring buffer
  2060. * if another event has not been added behind it.
  2061. *
  2062. * If another event has been added behind it, it will set the event
  2063. * up as discarded, and perform the commit.
  2064. *
  2065. * If this function is called, do not call ring_buffer_unlock_commit on
  2066. * the event.
  2067. */
  2068. void ring_buffer_discard_commit(struct ring_buffer *buffer,
  2069. struct ring_buffer_event *event)
  2070. {
  2071. struct ring_buffer_per_cpu *cpu_buffer;
  2072. int cpu;
  2073. /* The event is discarded regardless */
  2074. rb_event_discard(event);
  2075. cpu = smp_processor_id();
  2076. cpu_buffer = buffer->buffers[cpu];
  2077. /*
  2078. * This must only be called if the event has not been
  2079. * committed yet. Thus we can assume that preemption
  2080. * is still disabled.
  2081. */
  2082. RB_WARN_ON(buffer, !local_read(&cpu_buffer->committing));
  2083. rb_decrement_entry(cpu_buffer, event);
  2084. if (rb_try_to_discard(cpu_buffer, event))
  2085. goto out;
  2086. /*
  2087. * The commit is still visible by the reader, so we
  2088. * must still update the timestamp.
  2089. */
  2090. rb_update_write_stamp(cpu_buffer, event);
  2091. out:
  2092. rb_end_commit(cpu_buffer);
  2093. trace_recursive_unlock();
  2094. preempt_enable_notrace();
  2095. }
  2096. EXPORT_SYMBOL_GPL(ring_buffer_discard_commit);
  2097. /**
  2098. * ring_buffer_write - write data to the buffer without reserving
  2099. * @buffer: The ring buffer to write to.
  2100. * @length: The length of the data being written (excluding the event header)
  2101. * @data: The data to write to the buffer.
  2102. *
  2103. * This is like ring_buffer_lock_reserve and ring_buffer_unlock_commit as
  2104. * one function. If you already have the data to write to the buffer, it
  2105. * may be easier to simply call this function.
  2106. *
  2107. * Note, like ring_buffer_lock_reserve, the length is the length of the data
  2108. * and not the length of the event which would hold the header.
  2109. */
  2110. int ring_buffer_write(struct ring_buffer *buffer,
  2111. unsigned long length,
  2112. void *data)
  2113. {
  2114. struct ring_buffer_per_cpu *cpu_buffer;
  2115. struct ring_buffer_event *event;
  2116. void *body;
  2117. int ret = -EBUSY;
  2118. int cpu;
  2119. if (ring_buffer_flags != RB_BUFFERS_ON)
  2120. return -EBUSY;
  2121. preempt_disable_notrace();
  2122. if (atomic_read(&buffer->record_disabled))
  2123. goto out;
  2124. cpu = raw_smp_processor_id();
  2125. if (!cpumask_test_cpu(cpu, buffer->cpumask))
  2126. goto out;
  2127. cpu_buffer = buffer->buffers[cpu];
  2128. if (atomic_read(&cpu_buffer->record_disabled))
  2129. goto out;
  2130. if (length > BUF_MAX_DATA_SIZE)
  2131. goto out;
  2132. event = rb_reserve_next_event(buffer, cpu_buffer, length);
  2133. if (!event)
  2134. goto out;
  2135. body = rb_event_data(event);
  2136. memcpy(body, data, length);
  2137. rb_commit(cpu_buffer, event);
  2138. ret = 0;
  2139. out:
  2140. preempt_enable_notrace();
  2141. return ret;
  2142. }
  2143. EXPORT_SYMBOL_GPL(ring_buffer_write);
  2144. static int rb_per_cpu_empty(struct ring_buffer_per_cpu *cpu_buffer)
  2145. {
  2146. struct buffer_page *reader = cpu_buffer->reader_page;
  2147. struct buffer_page *head = rb_set_head_page(cpu_buffer);
  2148. struct buffer_page *commit = cpu_buffer->commit_page;
  2149. /* In case of error, head will be NULL */
  2150. if (unlikely(!head))
  2151. return 1;
  2152. return reader->read == rb_page_commit(reader) &&
  2153. (commit == reader ||
  2154. (commit == head &&
  2155. head->read == rb_page_commit(commit)));
  2156. }
  2157. /**
  2158. * ring_buffer_record_disable - stop all writes into the buffer
  2159. * @buffer: The ring buffer to stop writes to.
  2160. *
  2161. * This prevents all writes to the buffer. Any attempt to write
  2162. * to the buffer after this will fail and return NULL.
  2163. *
  2164. * The caller should call synchronize_sched() after this.
  2165. */
  2166. void ring_buffer_record_disable(struct ring_buffer *buffer)
  2167. {
  2168. atomic_inc(&buffer->record_disabled);
  2169. }
  2170. EXPORT_SYMBOL_GPL(ring_buffer_record_disable);
  2171. /**
  2172. * ring_buffer_record_enable - enable writes to the buffer
  2173. * @buffer: The ring buffer to enable writes
  2174. *
  2175. * Note, multiple disables will need the same number of enables
  2176. * to truly enable the writing (much like preempt_disable).
  2177. */
  2178. void ring_buffer_record_enable(struct ring_buffer *buffer)
  2179. {
  2180. atomic_dec(&buffer->record_disabled);
  2181. }
  2182. EXPORT_SYMBOL_GPL(ring_buffer_record_enable);
  2183. /**
  2184. * ring_buffer_record_disable_cpu - stop all writes into the cpu_buffer
  2185. * @buffer: The ring buffer to stop writes to.
  2186. * @cpu: The CPU buffer to stop
  2187. *
  2188. * This prevents all writes to the buffer. Any attempt to write
  2189. * to the buffer after this will fail and return NULL.
  2190. *
  2191. * The caller should call synchronize_sched() after this.
  2192. */
  2193. void ring_buffer_record_disable_cpu(struct ring_buffer *buffer, int cpu)
  2194. {
  2195. struct ring_buffer_per_cpu *cpu_buffer;
  2196. if (!cpumask_test_cpu(cpu, buffer->cpumask))
  2197. return;
  2198. cpu_buffer = buffer->buffers[cpu];
  2199. atomic_inc(&cpu_buffer->record_disabled);
  2200. }
  2201. EXPORT_SYMBOL_GPL(ring_buffer_record_disable_cpu);
  2202. /**
  2203. * ring_buffer_record_enable_cpu - enable writes to the buffer
  2204. * @buffer: The ring buffer to enable writes
  2205. * @cpu: The CPU to enable.
  2206. *
  2207. * Note, multiple disables will need the same number of enables
  2208. * to truly enable the writing (much like preempt_disable).
  2209. */
  2210. void ring_buffer_record_enable_cpu(struct ring_buffer *buffer, int cpu)
  2211. {
  2212. struct ring_buffer_per_cpu *cpu_buffer;
  2213. if (!cpumask_test_cpu(cpu, buffer->cpumask))
  2214. return;
  2215. cpu_buffer = buffer->buffers[cpu];
  2216. atomic_dec(&cpu_buffer->record_disabled);
  2217. }
  2218. EXPORT_SYMBOL_GPL(ring_buffer_record_enable_cpu);
  2219. /**
  2220. * ring_buffer_entries_cpu - get the number of entries in a cpu buffer
  2221. * @buffer: The ring buffer
  2222. * @cpu: The per CPU buffer to get the entries from.
  2223. */
  2224. unsigned long ring_buffer_entries_cpu(struct ring_buffer *buffer, int cpu)
  2225. {
  2226. struct ring_buffer_per_cpu *cpu_buffer;
  2227. unsigned long ret;
  2228. if (!cpumask_test_cpu(cpu, buffer->cpumask))
  2229. return 0;
  2230. cpu_buffer = buffer->buffers[cpu];
  2231. ret = (local_read(&cpu_buffer->entries) - local_read(&cpu_buffer->overrun))
  2232. - cpu_buffer->read;
  2233. return ret;
  2234. }
  2235. EXPORT_SYMBOL_GPL(ring_buffer_entries_cpu);
  2236. /**
  2237. * ring_buffer_overrun_cpu - get the number of overruns in a cpu_buffer
  2238. * @buffer: The ring buffer
  2239. * @cpu: The per CPU buffer to get the number of overruns from
  2240. */
  2241. unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu)
  2242. {
  2243. struct ring_buffer_per_cpu *cpu_buffer;
  2244. unsigned long ret;
  2245. if (!cpumask_test_cpu(cpu, buffer->cpumask))
  2246. return 0;
  2247. cpu_buffer = buffer->buffers[cpu];
  2248. ret = local_read(&cpu_buffer->overrun);
  2249. return ret;
  2250. }
  2251. EXPORT_SYMBOL_GPL(ring_buffer_overrun_cpu);
  2252. /**
  2253. * ring_buffer_commit_overrun_cpu - get the number of overruns caused by commits
  2254. * @buffer: The ring buffer
  2255. * @cpu: The per CPU buffer to get the number of overruns from
  2256. */
  2257. unsigned long
  2258. ring_buffer_commit_overrun_cpu(struct ring_buffer *buffer, int cpu)
  2259. {
  2260. struct ring_buffer_per_cpu *cpu_buffer;
  2261. unsigned long ret;
  2262. if (!cpumask_test_cpu(cpu, buffer->cpumask))
  2263. return 0;
  2264. cpu_buffer = buffer->buffers[cpu];
  2265. ret = local_read(&cpu_buffer->commit_overrun);
  2266. return ret;
  2267. }
  2268. EXPORT_SYMBOL_GPL(ring_buffer_commit_overrun_cpu);
  2269. /**
  2270. * ring_buffer_entries - get the number of entries in a buffer
  2271. * @buffer: The ring buffer
  2272. *
  2273. * Returns the total number of entries in the ring buffer
  2274. * (all CPU entries)
  2275. */
  2276. unsigned long ring_buffer_entries(struct ring_buffer *buffer)
  2277. {
  2278. struct ring_buffer_per_cpu *cpu_buffer;
  2279. unsigned long entries = 0;
  2280. int cpu;
  2281. /* if you care about this being correct, lock the buffer */
  2282. for_each_buffer_cpu(buffer, cpu) {
  2283. cpu_buffer = buffer->buffers[cpu];
  2284. entries += (local_read(&cpu_buffer->entries) -
  2285. local_read(&cpu_buffer->overrun)) - cpu_buffer->read;
  2286. }
  2287. return entries;
  2288. }
  2289. EXPORT_SYMBOL_GPL(ring_buffer_entries);
  2290. /**
  2291. * ring_buffer_overruns - get the number of overruns in buffer
  2292. * @buffer: The ring buffer
  2293. *
  2294. * Returns the total number of overruns in the ring buffer
  2295. * (all CPU entries)
  2296. */
  2297. unsigned long ring_buffer_overruns(struct ring_buffer *buffer)
  2298. {
  2299. struct ring_buffer_per_cpu *cpu_buffer;
  2300. unsigned long overruns = 0;
  2301. int cpu;
  2302. /* if you care about this being correct, lock the buffer */
  2303. for_each_buffer_cpu(buffer, cpu) {
  2304. cpu_buffer = buffer->buffers[cpu];
  2305. overruns += local_read(&cpu_buffer->overrun);
  2306. }
  2307. return overruns;
  2308. }
  2309. EXPORT_SYMBOL_GPL(ring_buffer_overruns);
  2310. static void rb_iter_reset(struct ring_buffer_iter *iter)
  2311. {
  2312. struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
  2313. /* Iterator usage is expected to have record disabled */
  2314. if (list_empty(&cpu_buffer->reader_page->list)) {
  2315. iter->head_page = rb_set_head_page(cpu_buffer);
  2316. if (unlikely(!iter->head_page))
  2317. return;
  2318. iter->head = iter->head_page->read;
  2319. } else {
  2320. iter->head_page = cpu_buffer->reader_page;
  2321. iter->head = cpu_buffer->reader_page->read;
  2322. }
  2323. if (iter->head)
  2324. iter->read_stamp = cpu_buffer->read_stamp;
  2325. else
  2326. iter->read_stamp = iter->head_page->page->time_stamp;
  2327. iter->cache_reader_page = cpu_buffer->reader_page;
  2328. iter->cache_read = cpu_buffer->read;
  2329. }
  2330. /**
  2331. * ring_buffer_iter_reset - reset an iterator
  2332. * @iter: The iterator to reset
  2333. *
  2334. * Resets the iterator, so that it will start from the beginning
  2335. * again.
  2336. */
  2337. void ring_buffer_iter_reset(struct ring_buffer_iter *iter)
  2338. {
  2339. struct ring_buffer_per_cpu *cpu_buffer;
  2340. unsigned long flags;
  2341. if (!iter)
  2342. return;
  2343. cpu_buffer = iter->cpu_buffer;
  2344. spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
  2345. rb_iter_reset(iter);
  2346. spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
  2347. }
  2348. EXPORT_SYMBOL_GPL(ring_buffer_iter_reset);
  2349. /**
  2350. * ring_buffer_iter_empty - check if an iterator has no more to read
  2351. * @iter: The iterator to check
  2352. */
  2353. int ring_buffer_iter_empty(struct ring_buffer_iter *iter)
  2354. {
  2355. struct ring_buffer_per_cpu *cpu_buffer;
  2356. cpu_buffer = iter->cpu_buffer;
  2357. return iter->head_page == cpu_buffer->commit_page &&
  2358. iter->head == rb_commit_index(cpu_buffer);
  2359. }
  2360. EXPORT_SYMBOL_GPL(ring_buffer_iter_empty);
  2361. static void
  2362. rb_update_read_stamp(struct ring_buffer_per_cpu *cpu_buffer,
  2363. struct ring_buffer_event *event)
  2364. {
  2365. u64 delta;
  2366. switch (event->type_len) {
  2367. case RINGBUF_TYPE_PADDING:
  2368. return;
  2369. case RINGBUF_TYPE_TIME_EXTEND:
  2370. delta = event->array[0];
  2371. delta <<= TS_SHIFT;
  2372. delta += event->time_delta;
  2373. cpu_buffer->read_stamp += delta;
  2374. return;
  2375. case RINGBUF_TYPE_TIME_STAMP:
  2376. /* FIXME: not implemented */
  2377. return;
  2378. case RINGBUF_TYPE_DATA:
  2379. cpu_buffer->read_stamp += event->time_delta;
  2380. return;
  2381. default:
  2382. BUG();
  2383. }
  2384. return;
  2385. }
  2386. static void
  2387. rb_update_iter_read_stamp(struct ring_buffer_iter *iter,
  2388. struct ring_buffer_event *event)
  2389. {
  2390. u64 delta;
  2391. switch (event->type_len) {
  2392. case RINGBUF_TYPE_PADDING:
  2393. return;
  2394. case RINGBUF_TYPE_TIME_EXTEND:
  2395. delta = event->array[0];
  2396. delta <<= TS_SHIFT;
  2397. delta += event->time_delta;
  2398. iter->read_stamp += delta;
  2399. return;
  2400. case RINGBUF_TYPE_TIME_STAMP:
  2401. /* FIXME: not implemented */
  2402. return;
  2403. case RINGBUF_TYPE_DATA:
  2404. iter->read_stamp += event->time_delta;
  2405. return;
  2406. default:
  2407. BUG();
  2408. }
  2409. return;
  2410. }
  2411. static struct buffer_page *
  2412. rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
  2413. {
  2414. struct buffer_page *reader = NULL;
  2415. unsigned long overwrite;
  2416. unsigned long flags;
  2417. int nr_loops = 0;
  2418. int ret;
  2419. local_irq_save(flags);
  2420. arch_spin_lock(&cpu_buffer->lock);
  2421. again:
  2422. /*
  2423. * This should normally only loop twice. But because the
  2424. * start of the reader inserts an empty page, it causes
  2425. * a case where we will loop three times. There should be no
  2426. * reason to loop four times (that I know of).
  2427. */
  2428. if (RB_WARN_ON(cpu_buffer, ++nr_loops > 3)) {
  2429. reader = NULL;
  2430. goto out;
  2431. }
  2432. reader = cpu_buffer->reader_page;
  2433. /* If there's more to read, return this page */
  2434. if (cpu_buffer->reader_page->read < rb_page_size(reader))
  2435. goto out;
  2436. /* Never should we have an index greater than the size */
  2437. if (RB_WARN_ON(cpu_buffer,
  2438. cpu_buffer->reader_page->read > rb_page_size(reader)))
  2439. goto out;
  2440. /* check if we caught up to the tail */
  2441. reader = NULL;
  2442. if (cpu_buffer->commit_page == cpu_buffer->reader_page)
  2443. goto out;
  2444. /*
  2445. * Reset the reader page to size zero.
  2446. */
  2447. local_set(&cpu_buffer->reader_page->write, 0);
  2448. local_set(&cpu_buffer->reader_page->entries, 0);
  2449. local_set(&cpu_buffer->reader_page->page->commit, 0);
  2450. cpu_buffer->reader_page->real_end = 0;
  2451. spin:
  2452. /*
  2453. * Splice the empty reader page into the list around the head.
  2454. */
  2455. reader = rb_set_head_page(cpu_buffer);
  2456. cpu_buffer->reader_page->list.next = rb_list_head(reader->list.next);
  2457. cpu_buffer->reader_page->list.prev = reader->list.prev;
  2458. /*
  2459. * cpu_buffer->pages just needs to point to the buffer, it
  2460. * has no specific buffer page to point to. Lets move it out
  2461. * of our way so we don't accidently swap it.
  2462. */
  2463. cpu_buffer->pages = reader->list.prev;
  2464. /* The reader page will be pointing to the new head */
  2465. rb_set_list_to_head(cpu_buffer, &cpu_buffer->reader_page->list);
  2466. /*
  2467. * We want to make sure we read the overruns after we set up our
  2468. * pointers to the next object. The writer side does a
  2469. * cmpxchg to cross pages which acts as the mb on the writer
  2470. * side. Note, the reader will constantly fail the swap
  2471. * while the writer is updating the pointers, so this
  2472. * guarantees that the overwrite recorded here is the one we
  2473. * want to compare with the last_overrun.
  2474. */
  2475. smp_mb();
  2476. overwrite = local_read(&(cpu_buffer->overrun));
  2477. /*
  2478. * Here's the tricky part.
  2479. *
  2480. * We need to move the pointer past the header page.
  2481. * But we can only do that if a writer is not currently
  2482. * moving it. The page before the header page has the
  2483. * flag bit '1' set if it is pointing to the page we want.
  2484. * but if the writer is in the process of moving it
  2485. * than it will be '2' or already moved '0'.
  2486. */
  2487. ret = rb_head_page_replace(reader, cpu_buffer->reader_page);
  2488. /*
  2489. * If we did not convert it, then we must try again.
  2490. */
  2491. if (!ret)
  2492. goto spin;
  2493. /*
  2494. * Yeah! We succeeded in replacing the page.
  2495. *
  2496. * Now make the new head point back to the reader page.
  2497. */
  2498. rb_list_head(reader->list.next)->prev = &cpu_buffer->reader_page->list;
  2499. rb_inc_page(cpu_buffer, &cpu_buffer->head_page);
  2500. /* Finally update the reader page to the new head */
  2501. cpu_buffer->reader_page = reader;
  2502. rb_reset_reader_page(cpu_buffer);
  2503. if (overwrite != cpu_buffer->last_overrun) {
  2504. cpu_buffer->lost_events = overwrite - cpu_buffer->last_overrun;
  2505. cpu_buffer->last_overrun = overwrite;
  2506. }
  2507. goto again;
  2508. out:
  2509. arch_spin_unlock(&cpu_buffer->lock);
  2510. local_irq_restore(flags);
  2511. return reader;
  2512. }
  2513. static void rb_advance_reader(struct ring_buffer_per_cpu *cpu_buffer)
  2514. {
  2515. struct ring_buffer_event *event;
  2516. struct buffer_page *reader;
  2517. unsigned length;
  2518. reader = rb_get_reader_page(cpu_buffer);
  2519. /* This function should not be called when buffer is empty */
  2520. if (RB_WARN_ON(cpu_buffer, !reader))
  2521. return;
  2522. event = rb_reader_event(cpu_buffer);
  2523. if (event->type_len <= RINGBUF_TYPE_DATA_TYPE_LEN_MAX)
  2524. cpu_buffer->read++;
  2525. rb_update_read_stamp(cpu_buffer, event);
  2526. length = rb_event_length(event);
  2527. cpu_buffer->reader_page->read += length;
  2528. }
  2529. static void rb_advance_iter(struct ring_buffer_iter *iter)
  2530. {
  2531. struct ring_buffer_per_cpu *cpu_buffer;
  2532. struct ring_buffer_event *event;
  2533. unsigned length;
  2534. cpu_buffer = iter->cpu_buffer;
  2535. /*
  2536. * Check if we are at the end of the buffer.
  2537. */
  2538. if (iter->head >= rb_page_size(iter->head_page)) {
  2539. /* discarded commits can make the page empty */
  2540. if (iter->head_page == cpu_buffer->commit_page)
  2541. return;
  2542. rb_inc_iter(iter);
  2543. return;
  2544. }
  2545. event = rb_iter_head_event(iter);
  2546. length = rb_event_length(event);
  2547. /*
  2548. * This should not be called to advance the header if we are
  2549. * at the tail of the buffer.
  2550. */
  2551. if (RB_WARN_ON(cpu_buffer,
  2552. (iter->head_page == cpu_buffer->commit_page) &&
  2553. (iter->head + length > rb_commit_index(cpu_buffer))))
  2554. return;
  2555. rb_update_iter_read_stamp(iter, event);
  2556. iter->head += length;
  2557. /* check for end of page padding */
  2558. if ((iter->head >= rb_page_size(iter->head_page)) &&
  2559. (iter->head_page != cpu_buffer->commit_page))
  2560. rb_advance_iter(iter);
  2561. }
  2562. static int rb_lost_events(struct ring_buffer_per_cpu *cpu_buffer)
  2563. {
  2564. return cpu_buffer->lost_events;
  2565. }
  2566. static struct ring_buffer_event *
  2567. rb_buffer_peek(struct ring_buffer_per_cpu *cpu_buffer, u64 *ts,
  2568. unsigned long *lost_events)
  2569. {
  2570. struct ring_buffer_event *event;
  2571. struct buffer_page *reader;
  2572. int nr_loops = 0;
  2573. again:
  2574. /*
  2575. * We repeat when a time extend is encountered.
  2576. * Since the time extend is always attached to a data event,
  2577. * we should never loop more than once.
  2578. * (We never hit the following condition more than twice).
  2579. */
  2580. if (RB_WARN_ON(cpu_buffer, ++nr_loops > 2))
  2581. return NULL;
  2582. reader = rb_get_reader_page(cpu_buffer);
  2583. if (!reader)
  2584. return NULL;
  2585. event = rb_reader_event(cpu_buffer);
  2586. switch (event->type_len) {
  2587. case RINGBUF_TYPE_PADDING:
  2588. if (rb_null_event(event))
  2589. RB_WARN_ON(cpu_buffer, 1);
  2590. /*
  2591. * Because the writer could be discarding every
  2592. * event it creates (which would probably be bad)
  2593. * if we were to go back to "again" then we may never
  2594. * catch up, and will trigger the warn on, or lock
  2595. * the box. Return the padding, and we will release
  2596. * the current locks, and try again.
  2597. */
  2598. return event;
  2599. case RINGBUF_TYPE_TIME_EXTEND:
  2600. /* Internal data, OK to advance */
  2601. rb_advance_reader(cpu_buffer);
  2602. goto again;
  2603. case RINGBUF_TYPE_TIME_STAMP:
  2604. /* FIXME: not implemented */
  2605. rb_advance_reader(cpu_buffer);
  2606. goto again;
  2607. case RINGBUF_TYPE_DATA:
  2608. if (ts) {
  2609. *ts = cpu_buffer->read_stamp + event->time_delta;
  2610. ring_buffer_normalize_time_stamp(cpu_buffer->buffer,
  2611. cpu_buffer->cpu, ts);
  2612. }
  2613. if (lost_events)
  2614. *lost_events = rb_lost_events(cpu_buffer);
  2615. return event;
  2616. default:
  2617. BUG();
  2618. }
  2619. return NULL;
  2620. }
  2621. EXPORT_SYMBOL_GPL(ring_buffer_peek);
  2622. static struct ring_buffer_event *
  2623. rb_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
  2624. {
  2625. struct ring_buffer *buffer;
  2626. struct ring_buffer_per_cpu *cpu_buffer;
  2627. struct ring_buffer_event *event;
  2628. int nr_loops = 0;
  2629. cpu_buffer = iter->cpu_buffer;
  2630. buffer = cpu_buffer->buffer;
  2631. /*
  2632. * Check if someone performed a consuming read to
  2633. * the buffer. A consuming read invalidates the iterator
  2634. * and we need to reset the iterator in this case.
  2635. */
  2636. if (unlikely(iter->cache_read != cpu_buffer->read ||
  2637. iter->cache_reader_page != cpu_buffer->reader_page))
  2638. rb_iter_reset(iter);
  2639. again:
  2640. if (ring_buffer_iter_empty(iter))
  2641. return NULL;
  2642. /*
  2643. * We repeat when a time extend is encountered.
  2644. * Since the time extend is always attached to a data event,
  2645. * we should never loop more than once.
  2646. * (We never hit the following condition more than twice).
  2647. */
  2648. if (RB_WARN_ON(cpu_buffer, ++nr_loops > 2))
  2649. return NULL;
  2650. if (rb_per_cpu_empty(cpu_buffer))
  2651. return NULL;
  2652. if (iter->head >= local_read(&iter->head_page->page->commit)) {
  2653. rb_inc_iter(iter);
  2654. goto again;
  2655. }
  2656. event = rb_iter_head_event(iter);
  2657. switch (event->type_len) {
  2658. case RINGBUF_TYPE_PADDING:
  2659. if (rb_null_event(event)) {
  2660. rb_inc_iter(iter);
  2661. goto again;
  2662. }
  2663. rb_advance_iter(iter);
  2664. return event;
  2665. case RINGBUF_TYPE_TIME_EXTEND:
  2666. /* Internal data, OK to advance */
  2667. rb_advance_iter(iter);
  2668. goto again;
  2669. case RINGBUF_TYPE_TIME_STAMP:
  2670. /* FIXME: not implemented */
  2671. rb_advance_iter(iter);
  2672. goto again;
  2673. case RINGBUF_TYPE_DATA:
  2674. if (ts) {
  2675. *ts = iter->read_stamp + event->time_delta;
  2676. ring_buffer_normalize_time_stamp(buffer,
  2677. cpu_buffer->cpu, ts);
  2678. }
  2679. return event;
  2680. default:
  2681. BUG();
  2682. }
  2683. return NULL;
  2684. }
  2685. EXPORT_SYMBOL_GPL(ring_buffer_iter_peek);
  2686. static inline int rb_ok_to_lock(void)
  2687. {
  2688. /*
  2689. * If an NMI die dumps out the content of the ring buffer
  2690. * do not grab locks. We also permanently disable the ring
  2691. * buffer too. A one time deal is all you get from reading
  2692. * the ring buffer from an NMI.
  2693. */
  2694. if (likely(!in_nmi()))
  2695. return 1;
  2696. tracing_off_permanent();
  2697. return 0;
  2698. }
  2699. /**
  2700. * ring_buffer_peek - peek at the next event to be read
  2701. * @buffer: The ring buffer to read
  2702. * @cpu: The cpu to peak at
  2703. * @ts: The timestamp counter of this event.
  2704. * @lost_events: a variable to store if events were lost (may be NULL)
  2705. *
  2706. * This will return the event that will be read next, but does
  2707. * not consume the data.
  2708. */
  2709. struct ring_buffer_event *
  2710. ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts,
  2711. unsigned long *lost_events)
  2712. {
  2713. struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
  2714. struct ring_buffer_event *event;
  2715. unsigned long flags;
  2716. int dolock;
  2717. if (!cpumask_test_cpu(cpu, buffer->cpumask))
  2718. return NULL;
  2719. dolock = rb_ok_to_lock();
  2720. again:
  2721. local_irq_save(flags);
  2722. if (dolock)
  2723. spin_lock(&cpu_buffer->reader_lock);
  2724. event = rb_buffer_peek(cpu_buffer, ts, lost_events);
  2725. if (event && event->type_len == RINGBUF_TYPE_PADDING)
  2726. rb_advance_reader(cpu_buffer);
  2727. if (dolock)
  2728. spin_unlock(&cpu_buffer->reader_lock);
  2729. local_irq_restore(flags);
  2730. if (event && event->type_len == RINGBUF_TYPE_PADDING)
  2731. goto again;
  2732. return event;
  2733. }
  2734. /**
  2735. * ring_buffer_iter_peek - peek at the next event to be read
  2736. * @iter: The ring buffer iterator
  2737. * @ts: The timestamp counter of this event.
  2738. *
  2739. * This will return the event that will be read next, but does
  2740. * not increment the iterator.
  2741. */
  2742. struct ring_buffer_event *
  2743. ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
  2744. {
  2745. struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
  2746. struct ring_buffer_event *event;
  2747. unsigned long flags;
  2748. again:
  2749. spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
  2750. event = rb_iter_peek(iter, ts);
  2751. spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
  2752. if (event && event->type_len == RINGBUF_TYPE_PADDING)
  2753. goto again;
  2754. return event;
  2755. }
  2756. /**
  2757. * ring_buffer_consume - return an event and consume it
  2758. * @buffer: The ring buffer to get the next event from
  2759. * @cpu: the cpu to read the buffer from
  2760. * @ts: a variable to store the timestamp (may be NULL)
  2761. * @lost_events: a variable to store if events were lost (may be NULL)
  2762. *
  2763. * Returns the next event in the ring buffer, and that event is consumed.
  2764. * Meaning, that sequential reads will keep returning a different event,
  2765. * and eventually empty the ring buffer if the producer is slower.
  2766. */
  2767. struct ring_buffer_event *
  2768. ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts,
  2769. unsigned long *lost_events)
  2770. {
  2771. struct ring_buffer_per_cpu *cpu_buffer;
  2772. struct ring_buffer_event *event = NULL;
  2773. unsigned long flags;
  2774. int dolock;
  2775. dolock = rb_ok_to_lock();
  2776. again:
  2777. /* might be called in atomic */
  2778. preempt_disable();
  2779. if (!cpumask_test_cpu(cpu, buffer->cpumask))
  2780. goto out;
  2781. cpu_buffer = buffer->buffers[cpu];
  2782. local_irq_save(flags);
  2783. if (dolock)
  2784. spin_lock(&cpu_buffer->reader_lock);
  2785. event = rb_buffer_peek(cpu_buffer, ts, lost_events);
  2786. if (event) {
  2787. cpu_buffer->lost_events = 0;
  2788. rb_advance_reader(cpu_buffer);
  2789. }
  2790. if (dolock)
  2791. spin_unlock(&cpu_buffer->reader_lock);
  2792. local_irq_restore(flags);
  2793. out:
  2794. preempt_enable();
  2795. if (event && event->type_len == RINGBUF_TYPE_PADDING)
  2796. goto again;
  2797. return event;
  2798. }
  2799. EXPORT_SYMBOL_GPL(ring_buffer_consume);
  2800. /**
  2801. * ring_buffer_read_prepare - Prepare for a non consuming read of the buffer
  2802. * @buffer: The ring buffer to read from
  2803. * @cpu: The cpu buffer to iterate over
  2804. *
  2805. * This performs the initial preparations necessary to iterate
  2806. * through the buffer. Memory is allocated, buffer recording
  2807. * is disabled, and the iterator pointer is returned to the caller.
  2808. *
  2809. * Disabling buffer recordng prevents the reading from being
  2810. * corrupted. This is not a consuming read, so a producer is not
  2811. * expected.
  2812. *
  2813. * After a sequence of ring_buffer_read_prepare calls, the user is
  2814. * expected to make at least one call to ring_buffer_prepare_sync.
  2815. * Afterwards, ring_buffer_read_start is invoked to get things going
  2816. * for real.
  2817. *
  2818. * This overall must be paired with ring_buffer_finish.
  2819. */
  2820. struct ring_buffer_iter *
  2821. ring_buffer_read_prepare(struct ring_buffer *buffer, int cpu)
  2822. {
  2823. struct ring_buffer_per_cpu *cpu_buffer;
  2824. struct ring_buffer_iter *iter;
  2825. if (!cpumask_test_cpu(cpu, buffer->cpumask))
  2826. return NULL;
  2827. iter = kmalloc(sizeof(*iter), GFP_KERNEL);
  2828. if (!iter)
  2829. return NULL;
  2830. cpu_buffer = buffer->buffers[cpu];
  2831. iter->cpu_buffer = cpu_buffer;
  2832. atomic_inc(&cpu_buffer->record_disabled);
  2833. return iter;
  2834. }
  2835. EXPORT_SYMBOL_GPL(ring_buffer_read_prepare);
  2836. /**
  2837. * ring_buffer_read_prepare_sync - Synchronize a set of prepare calls
  2838. *
  2839. * All previously invoked ring_buffer_read_prepare calls to prepare
  2840. * iterators will be synchronized. Afterwards, read_buffer_read_start
  2841. * calls on those iterators are allowed.
  2842. */
  2843. void
  2844. ring_buffer_read_prepare_sync(void)
  2845. {
  2846. synchronize_sched();
  2847. }
  2848. EXPORT_SYMBOL_GPL(ring_buffer_read_prepare_sync);
  2849. /**
  2850. * ring_buffer_read_start - start a non consuming read of the buffer
  2851. * @iter: The iterator returned by ring_buffer_read_prepare
  2852. *
  2853. * This finalizes the startup of an iteration through the buffer.
  2854. * The iterator comes from a call to ring_buffer_read_prepare and
  2855. * an intervening ring_buffer_read_prepare_sync must have been
  2856. * performed.
  2857. *
  2858. * Must be paired with ring_buffer_finish.
  2859. */
  2860. void
  2861. ring_buffer_read_start(struct ring_buffer_iter *iter)
  2862. {
  2863. struct ring_buffer_per_cpu *cpu_buffer;
  2864. unsigned long flags;
  2865. if (!iter)
  2866. return;
  2867. cpu_buffer = iter->cpu_buffer;
  2868. spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
  2869. arch_spin_lock(&cpu_buffer->lock);
  2870. rb_iter_reset(iter);
  2871. arch_spin_unlock(&cpu_buffer->lock);
  2872. spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
  2873. }
  2874. EXPORT_SYMBOL_GPL(ring_buffer_read_start);
  2875. /**
  2876. * ring_buffer_finish - finish reading the iterator of the buffer
  2877. * @iter: The iterator retrieved by ring_buffer_start
  2878. *
  2879. * This re-enables the recording to the buffer, and frees the
  2880. * iterator.
  2881. */
  2882. void
  2883. ring_buffer_read_finish(struct ring_buffer_iter *iter)
  2884. {
  2885. struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
  2886. atomic_dec(&cpu_buffer->record_disabled);
  2887. kfree(iter);
  2888. }
  2889. EXPORT_SYMBOL_GPL(ring_buffer_read_finish);
  2890. /**
  2891. * ring_buffer_read - read the next item in the ring buffer by the iterator
  2892. * @iter: The ring buffer iterator
  2893. * @ts: The time stamp of the event read.
  2894. *
  2895. * This reads the next event in the ring buffer and increments the iterator.
  2896. */
  2897. struct ring_buffer_event *
  2898. ring_buffer_read(struct ring_buffer_iter *iter, u64 *ts)
  2899. {
  2900. struct ring_buffer_event *event;
  2901. struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
  2902. unsigned long flags;
  2903. spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
  2904. again:
  2905. event = rb_iter_peek(iter, ts);
  2906. if (!event)
  2907. goto out;
  2908. if (event->type_len == RINGBUF_TYPE_PADDING)
  2909. goto again;
  2910. rb_advance_iter(iter);
  2911. out:
  2912. spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
  2913. return event;
  2914. }
  2915. EXPORT_SYMBOL_GPL(ring_buffer_read);
  2916. /**
  2917. * ring_buffer_size - return the size of the ring buffer (in bytes)
  2918. * @buffer: The ring buffer.
  2919. */
  2920. unsigned long ring_buffer_size(struct ring_buffer *buffer)
  2921. {
  2922. return BUF_PAGE_SIZE * buffer->pages;
  2923. }
  2924. EXPORT_SYMBOL_GPL(ring_buffer_size);
  2925. static void
  2926. rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
  2927. {
  2928. rb_head_page_deactivate(cpu_buffer);
  2929. cpu_buffer->head_page
  2930. = list_entry(cpu_buffer->pages, struct buffer_page, list);
  2931. local_set(&cpu_buffer->head_page->write, 0);
  2932. local_set(&cpu_buffer->head_page->entries, 0);
  2933. local_set(&cpu_buffer->head_page->page->commit, 0);
  2934. cpu_buffer->head_page->read = 0;
  2935. cpu_buffer->tail_page = cpu_buffer->head_page;
  2936. cpu_buffer->commit_page = cpu_buffer->head_page;
  2937. INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
  2938. local_set(&cpu_buffer->reader_page->write, 0);
  2939. local_set(&cpu_buffer->reader_page->entries, 0);
  2940. local_set(&cpu_buffer->reader_page->page->commit, 0);
  2941. cpu_buffer->reader_page->read = 0;
  2942. local_set(&cpu_buffer->commit_overrun, 0);
  2943. local_set(&cpu_buffer->overrun, 0);
  2944. local_set(&cpu_buffer->entries, 0);
  2945. local_set(&cpu_buffer->committing, 0);
  2946. local_set(&cpu_buffer->commits, 0);
  2947. cpu_buffer->read = 0;
  2948. cpu_buffer->write_stamp = 0;
  2949. cpu_buffer->read_stamp = 0;
  2950. cpu_buffer->lost_events = 0;
  2951. cpu_buffer->last_overrun = 0;
  2952. rb_head_page_activate(cpu_buffer);
  2953. }
  2954. /**
  2955. * ring_buffer_reset_cpu - reset a ring buffer per CPU buffer
  2956. * @buffer: The ring buffer to reset a per cpu buffer of
  2957. * @cpu: The CPU buffer to be reset
  2958. */
  2959. void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu)
  2960. {
  2961. struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
  2962. unsigned long flags;
  2963. if (!cpumask_test_cpu(cpu, buffer->cpumask))
  2964. return;
  2965. atomic_inc(&cpu_buffer->record_disabled);
  2966. spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
  2967. if (RB_WARN_ON(cpu_buffer, local_read(&cpu_buffer->committing)))
  2968. goto out;
  2969. arch_spin_lock(&cpu_buffer->lock);
  2970. rb_reset_cpu(cpu_buffer);
  2971. arch_spin_unlock(&cpu_buffer->lock);
  2972. out:
  2973. spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
  2974. atomic_dec(&cpu_buffer->record_disabled);
  2975. }
  2976. EXPORT_SYMBOL_GPL(ring_buffer_reset_cpu);
  2977. /**
  2978. * ring_buffer_reset - reset a ring buffer
  2979. * @buffer: The ring buffer to reset all cpu buffers
  2980. */
  2981. void ring_buffer_reset(struct ring_buffer *buffer)
  2982. {
  2983. int cpu;
  2984. for_each_buffer_cpu(buffer, cpu)
  2985. ring_buffer_reset_cpu(buffer, cpu);
  2986. }
  2987. EXPORT_SYMBOL_GPL(ring_buffer_reset);
  2988. /**
  2989. * rind_buffer_empty - is the ring buffer empty?
  2990. * @buffer: The ring buffer to test
  2991. */
  2992. int ring_buffer_empty(struct ring_buffer *buffer)
  2993. {
  2994. struct ring_buffer_per_cpu *cpu_buffer;
  2995. unsigned long flags;
  2996. int dolock;
  2997. int cpu;
  2998. int ret;
  2999. dolock = rb_ok_to_lock();
  3000. /* yes this is racy, but if you don't like the race, lock the buffer */
  3001. for_each_buffer_cpu(buffer, cpu) {
  3002. cpu_buffer = buffer->buffers[cpu];
  3003. local_irq_save(flags);
  3004. if (dolock)
  3005. spin_lock(&cpu_buffer->reader_lock);
  3006. ret = rb_per_cpu_empty(cpu_buffer);
  3007. if (dolock)
  3008. spin_unlock(&cpu_buffer->reader_lock);
  3009. local_irq_restore(flags);
  3010. if (!ret)
  3011. return 0;
  3012. }
  3013. return 1;
  3014. }
  3015. EXPORT_SYMBOL_GPL(ring_buffer_empty);
  3016. /**
  3017. * ring_buffer_empty_cpu - is a cpu buffer of a ring buffer empty?
  3018. * @buffer: The ring buffer
  3019. * @cpu: The CPU buffer to test
  3020. */
  3021. int ring_buffer_empty_cpu(struct ring_buffer *buffer, int cpu)
  3022. {
  3023. struct ring_buffer_per_cpu *cpu_buffer;
  3024. unsigned long flags;
  3025. int dolock;
  3026. int ret;
  3027. if (!cpumask_test_cpu(cpu, buffer->cpumask))
  3028. return 1;
  3029. dolock = rb_ok_to_lock();
  3030. cpu_buffer = buffer->buffers[cpu];
  3031. local_irq_save(flags);
  3032. if (dolock)
  3033. spin_lock(&cpu_buffer->reader_lock);
  3034. ret = rb_per_cpu_empty(cpu_buffer);
  3035. if (dolock)
  3036. spin_unlock(&cpu_buffer->reader_lock);
  3037. local_irq_restore(flags);
  3038. return ret;
  3039. }
  3040. EXPORT_SYMBOL_GPL(ring_buffer_empty_cpu);
  3041. #ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
  3042. /**
  3043. * ring_buffer_swap_cpu - swap a CPU buffer between two ring buffers
  3044. * @buffer_a: One buffer to swap with
  3045. * @buffer_b: The other buffer to swap with
  3046. *
  3047. * This function is useful for tracers that want to take a "snapshot"
  3048. * of a CPU buffer and has another back up buffer lying around.
  3049. * it is expected that the tracer handles the cpu buffer not being
  3050. * used at the moment.
  3051. */
  3052. int ring_buffer_swap_cpu(struct ring_buffer *buffer_a,
  3053. struct ring_buffer *buffer_b, int cpu)
  3054. {
  3055. struct ring_buffer_per_cpu *cpu_buffer_a;
  3056. struct ring_buffer_per_cpu *cpu_buffer_b;
  3057. int ret = -EINVAL;
  3058. if (!cpumask_test_cpu(cpu, buffer_a->cpumask) ||
  3059. !cpumask_test_cpu(cpu, buffer_b->cpumask))
  3060. goto out;
  3061. /* At least make sure the two buffers are somewhat the same */
  3062. if (buffer_a->pages != buffer_b->pages)
  3063. goto out;
  3064. ret = -EAGAIN;
  3065. if (ring_buffer_flags != RB_BUFFERS_ON)
  3066. goto out;
  3067. if (atomic_read(&buffer_a->record_disabled))
  3068. goto out;
  3069. if (atomic_read(&buffer_b->record_disabled))
  3070. goto out;
  3071. cpu_buffer_a = buffer_a->buffers[cpu];
  3072. cpu_buffer_b = buffer_b->buffers[cpu];
  3073. if (atomic_read(&cpu_buffer_a->record_disabled))
  3074. goto out;
  3075. if (atomic_read(&cpu_buffer_b->record_disabled))
  3076. goto out;
  3077. /*
  3078. * We can't do a synchronize_sched here because this
  3079. * function can be called in atomic context.
  3080. * Normally this will be called from the same CPU as cpu.
  3081. * If not it's up to the caller to protect this.
  3082. */
  3083. atomic_inc(&cpu_buffer_a->record_disabled);
  3084. atomic_inc(&cpu_buffer_b->record_disabled);
  3085. ret = -EBUSY;
  3086. if (local_read(&cpu_buffer_a->committing))
  3087. goto out_dec;
  3088. if (local_read(&cpu_buffer_b->committing))
  3089. goto out_dec;
  3090. buffer_a->buffers[cpu] = cpu_buffer_b;
  3091. buffer_b->buffers[cpu] = cpu_buffer_a;
  3092. cpu_buffer_b->buffer = buffer_a;
  3093. cpu_buffer_a->buffer = buffer_b;
  3094. ret = 0;
  3095. out_dec:
  3096. atomic_dec(&cpu_buffer_a->record_disabled);
  3097. atomic_dec(&cpu_buffer_b->record_disabled);
  3098. out:
  3099. return ret;
  3100. }
  3101. EXPORT_SYMBOL_GPL(ring_buffer_swap_cpu);
  3102. #endif /* CONFIG_RING_BUFFER_ALLOW_SWAP */
  3103. /**
  3104. * ring_buffer_alloc_read_page - allocate a page to read from buffer
  3105. * @buffer: the buffer to allocate for.
  3106. *
  3107. * This function is used in conjunction with ring_buffer_read_page.
  3108. * When reading a full page from the ring buffer, these functions
  3109. * can be used to speed up the process. The calling function should
  3110. * allocate a few pages first with this function. Then when it
  3111. * needs to get pages from the ring buffer, it passes the result
  3112. * of this function into ring_buffer_read_page, which will swap
  3113. * the page that was allocated, with the read page of the buffer.
  3114. *
  3115. * Returns:
  3116. * The page allocated, or NULL on error.
  3117. */
  3118. void *ring_buffer_alloc_read_page(struct ring_buffer *buffer)
  3119. {
  3120. struct buffer_data_page *bpage;
  3121. unsigned long addr;
  3122. addr = __get_free_page(GFP_KERNEL);
  3123. if (!addr)
  3124. return NULL;
  3125. bpage = (void *)addr;
  3126. rb_init_page(bpage);
  3127. return bpage;
  3128. }
  3129. EXPORT_SYMBOL_GPL(ring_buffer_alloc_read_page);
  3130. /**
  3131. * ring_buffer_free_read_page - free an allocated read page
  3132. * @buffer: the buffer the page was allocate for
  3133. * @data: the page to free
  3134. *
  3135. * Free a page allocated from ring_buffer_alloc_read_page.
  3136. */
  3137. void ring_buffer_free_read_page(struct ring_buffer *buffer, void *data)
  3138. {
  3139. free_page((unsigned long)data);
  3140. }
  3141. EXPORT_SYMBOL_GPL(ring_buffer_free_read_page);
  3142. /**
  3143. * ring_buffer_read_page - extract a page from the ring buffer
  3144. * @buffer: buffer to extract from
  3145. * @data_page: the page to use allocated from ring_buffer_alloc_read_page
  3146. * @len: amount to extract
  3147. * @cpu: the cpu of the buffer to extract
  3148. * @full: should the extraction only happen when the page is full.
  3149. *
  3150. * This function will pull out a page from the ring buffer and consume it.
  3151. * @data_page must be the address of the variable that was returned
  3152. * from ring_buffer_alloc_read_page. This is because the page might be used
  3153. * to swap with a page in the ring buffer.
  3154. *
  3155. * for example:
  3156. * rpage = ring_buffer_alloc_read_page(buffer);
  3157. * if (!rpage)
  3158. * return error;
  3159. * ret = ring_buffer_read_page(buffer, &rpage, len, cpu, 0);
  3160. * if (ret >= 0)
  3161. * process_page(rpage, ret);
  3162. *
  3163. * When @full is set, the function will not return true unless
  3164. * the writer is off the reader page.
  3165. *
  3166. * Note: it is up to the calling functions to handle sleeps and wakeups.
  3167. * The ring buffer can be used anywhere in the kernel and can not
  3168. * blindly call wake_up. The layer that uses the ring buffer must be
  3169. * responsible for that.
  3170. *
  3171. * Returns:
  3172. * >=0 if data has been transferred, returns the offset of consumed data.
  3173. * <0 if no data has been transferred.
  3174. */
  3175. int ring_buffer_read_page(struct ring_buffer *buffer,
  3176. void **data_page, size_t len, int cpu, int full)
  3177. {
  3178. struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
  3179. struct ring_buffer_event *event;
  3180. struct buffer_data_page *bpage;
  3181. struct buffer_page *reader;
  3182. unsigned long missed_events;
  3183. unsigned long flags;
  3184. unsigned int commit;
  3185. unsigned int read;
  3186. u64 save_timestamp;
  3187. int ret = -1;
  3188. if (!cpumask_test_cpu(cpu, buffer->cpumask))
  3189. goto out;
  3190. /*
  3191. * If len is not big enough to hold the page header, then
  3192. * we can not copy anything.
  3193. */
  3194. if (len <= BUF_PAGE_HDR_SIZE)
  3195. goto out;
  3196. len -= BUF_PAGE_HDR_SIZE;
  3197. if (!data_page)
  3198. goto out;
  3199. bpage = *data_page;
  3200. if (!bpage)
  3201. goto out;
  3202. spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
  3203. reader = rb_get_reader_page(cpu_buffer);
  3204. if (!reader)
  3205. goto out_unlock;
  3206. event = rb_reader_event(cpu_buffer);
  3207. read = reader->read;
  3208. commit = rb_page_commit(reader);
  3209. /* Check if any events were dropped */
  3210. missed_events = cpu_buffer->lost_events;
  3211. /*
  3212. * If this page has been partially read or
  3213. * if len is not big enough to read the rest of the page or
  3214. * a writer is still on the page, then
  3215. * we must copy the data from the page to the buffer.
  3216. * Otherwise, we can simply swap the page with the one passed in.
  3217. */
  3218. if (read || (len < (commit - read)) ||
  3219. cpu_buffer->reader_page == cpu_buffer->commit_page) {
  3220. struct buffer_data_page *rpage = cpu_buffer->reader_page->page;
  3221. unsigned int rpos = read;
  3222. unsigned int pos = 0;
  3223. unsigned int size;
  3224. if (full)
  3225. goto out_unlock;
  3226. if (len > (commit - read))
  3227. len = (commit - read);
  3228. /* Always keep the time extend and data together */
  3229. size = rb_event_ts_length(event);
  3230. if (len < size)
  3231. goto out_unlock;
  3232. /* save the current timestamp, since the user will need it */
  3233. save_timestamp = cpu_buffer->read_stamp;
  3234. /* Need to copy one event at a time */
  3235. do {
  3236. memcpy(bpage->data + pos, rpage->data + rpos, size);
  3237. len -= size;
  3238. rb_advance_reader(cpu_buffer);
  3239. rpos = reader->read;
  3240. pos += size;
  3241. if (rpos >= commit)
  3242. break;
  3243. event = rb_reader_event(cpu_buffer);
  3244. /* Always keep the time extend and data together */
  3245. size = rb_event_ts_length(event);
  3246. } while (len > size);
  3247. /* update bpage */
  3248. local_set(&bpage->commit, pos);
  3249. bpage->time_stamp = save_timestamp;
  3250. /* we copied everything to the beginning */
  3251. read = 0;
  3252. } else {
  3253. /* update the entry counter */
  3254. cpu_buffer->read += rb_page_entries(reader);
  3255. /* swap the pages */
  3256. rb_init_page(bpage);
  3257. bpage = reader->page;
  3258. reader->page = *data_page;
  3259. local_set(&reader->write, 0);
  3260. local_set(&reader->entries, 0);
  3261. reader->read = 0;
  3262. *data_page = bpage;
  3263. /*
  3264. * Use the real_end for the data size,
  3265. * This gives us a chance to store the lost events
  3266. * on the page.
  3267. */
  3268. if (reader->real_end)
  3269. local_set(&bpage->commit, reader->real_end);
  3270. }
  3271. ret = read;
  3272. cpu_buffer->lost_events = 0;
  3273. commit = local_read(&bpage->commit);
  3274. /*
  3275. * Set a flag in the commit field if we lost events
  3276. */
  3277. if (missed_events) {
  3278. /* If there is room at the end of the page to save the
  3279. * missed events, then record it there.
  3280. */
  3281. if (BUF_PAGE_SIZE - commit >= sizeof(missed_events)) {
  3282. memcpy(&bpage->data[commit], &missed_events,
  3283. sizeof(missed_events));
  3284. local_add(RB_MISSED_STORED, &bpage->commit);
  3285. commit += sizeof(missed_events);
  3286. }
  3287. local_add(RB_MISSED_EVENTS, &bpage->commit);
  3288. }
  3289. /*
  3290. * This page may be off to user land. Zero it out here.
  3291. */
  3292. if (commit < BUF_PAGE_SIZE)
  3293. memset(&bpage->data[commit], 0, BUF_PAGE_SIZE - commit);
  3294. out_unlock:
  3295. spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
  3296. out:
  3297. return ret;
  3298. }
  3299. EXPORT_SYMBOL_GPL(ring_buffer_read_page);
  3300. #ifdef CONFIG_TRACING
  3301. static ssize_t
  3302. rb_simple_read(struct file *filp, char __user *ubuf,
  3303. size_t cnt, loff_t *ppos)
  3304. {
  3305. unsigned long *p = filp->private_data;
  3306. char buf[64];
  3307. int r;
  3308. if (test_bit(RB_BUFFERS_DISABLED_BIT, p))
  3309. r = sprintf(buf, "permanently disabled\n");
  3310. else
  3311. r = sprintf(buf, "%d\n", test_bit(RB_BUFFERS_ON_BIT, p));
  3312. return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
  3313. }
  3314. static ssize_t
  3315. rb_simple_write(struct file *filp, const char __user *ubuf,
  3316. size_t cnt, loff_t *ppos)
  3317. {
  3318. unsigned long *p = filp->private_data;
  3319. char buf[64];
  3320. unsigned long val;
  3321. int ret;
  3322. if (cnt >= sizeof(buf))
  3323. return -EINVAL;
  3324. if (copy_from_user(&buf, ubuf, cnt))
  3325. return -EFAULT;
  3326. buf[cnt] = 0;
  3327. ret = strict_strtoul(buf, 10, &val);
  3328. if (ret < 0)
  3329. return ret;
  3330. if (val)
  3331. set_bit(RB_BUFFERS_ON_BIT, p);
  3332. else
  3333. clear_bit(RB_BUFFERS_ON_BIT, p);
  3334. (*ppos)++;
  3335. return cnt;
  3336. }
  3337. static const struct file_operations rb_simple_fops = {
  3338. .open = tracing_open_generic,
  3339. .read = rb_simple_read,
  3340. .write = rb_simple_write,
  3341. };
  3342. static __init int rb_init_debugfs(void)
  3343. {
  3344. struct dentry *d_tracer;
  3345. d_tracer = tracing_init_dentry();
  3346. trace_create_file("tracing_on", 0644, d_tracer,
  3347. &ring_buffer_flags, &rb_simple_fops);
  3348. return 0;
  3349. }
  3350. fs_initcall(rb_init_debugfs);
  3351. #endif
  3352. #ifdef CONFIG_HOTPLUG_CPU
  3353. static int rb_cpu_notify(struct notifier_block *self,
  3354. unsigned long action, void *hcpu)
  3355. {
  3356. struct ring_buffer *buffer =
  3357. container_of(self, struct ring_buffer, cpu_notify);
  3358. long cpu = (long)hcpu;
  3359. switch (action) {
  3360. case CPU_UP_PREPARE:
  3361. case CPU_UP_PREPARE_FROZEN:
  3362. if (cpumask_test_cpu(cpu, buffer->cpumask))
  3363. return NOTIFY_OK;
  3364. buffer->buffers[cpu] =
  3365. rb_allocate_cpu_buffer(buffer, cpu);
  3366. if (!buffer->buffers[cpu]) {
  3367. WARN(1, "failed to allocate ring buffer on CPU %ld\n",
  3368. cpu);
  3369. return NOTIFY_OK;
  3370. }
  3371. smp_wmb();
  3372. cpumask_set_cpu(cpu, buffer->cpumask);
  3373. break;
  3374. case CPU_DOWN_PREPARE:
  3375. case CPU_DOWN_PREPARE_FROZEN:
  3376. /*
  3377. * Do nothing.
  3378. * If we were to free the buffer, then the user would
  3379. * lose any trace that was in the buffer.
  3380. */
  3381. break;
  3382. default:
  3383. break;
  3384. }
  3385. return NOTIFY_OK;
  3386. }
  3387. #endif