header.c 83 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689369036913692369336943695369636973698369937003701370237033704370537063707370837093710371137123713371437153716371737183719372037213722372337243725372637273728372937303731373237333734373537363737373837393740374137423743374437453746374737483749375037513752375337543755375637573758375937603761376237633764376537663767376837693770377137723773377437753776377737783779378037813782378337843785378637873788378937903791379237933794379537963797379837993800380138023803380438053806380738083809381038113812381338143815381638173818381938203821382238233824382538263827382838293830383138323833383438353836383738383839384038413842384338443845384638473848384938503851385238533854385538563857385838593860386138623863386438653866386738683869387038713872387338743875387638773878387938803881388238833884388538863887388838893890389138923893389438953896389738983899390039013902390339043905390639073908390939103911391239133914391539163917391839193920392139223923392439253926392739283929393039313932393339343935
  1. // SPDX-License-Identifier: GPL-2.0
  2. #include <errno.h>
  3. #include <inttypes.h>
  4. #include "util.h"
  5. #include "string2.h"
  6. #include <sys/param.h>
  7. #include <sys/types.h>
  8. #include <byteswap.h>
  9. #include <unistd.h>
  10. #include <stdio.h>
  11. #include <stdlib.h>
  12. #include <linux/compiler.h>
  13. #include <linux/list.h>
  14. #include <linux/kernel.h>
  15. #include <linux/bitops.h>
  16. #include <linux/stringify.h>
  17. #include <sys/stat.h>
  18. #include <sys/utsname.h>
  19. #include <linux/time64.h>
  20. #include <dirent.h>
  21. #include "evlist.h"
  22. #include "evsel.h"
  23. #include "header.h"
  24. #include "memswap.h"
  25. #include "../perf.h"
  26. #include "trace-event.h"
  27. #include "session.h"
  28. #include "symbol.h"
  29. #include "debug.h"
  30. #include "cpumap.h"
  31. #include "pmu.h"
  32. #include "vdso.h"
  33. #include "strbuf.h"
  34. #include "build-id.h"
  35. #include "data.h"
  36. #include <api/fs/fs.h>
  37. #include "asm/bug.h"
  38. #include "tool.h"
  39. #include "time-utils.h"
  40. #include "units.h"
  41. #include "sane_ctype.h"
  42. /*
  43. * magic2 = "PERFILE2"
  44. * must be a numerical value to let the endianness
  45. * determine the memory layout. That way we are able
  46. * to detect endianness when reading the perf.data file
  47. * back.
  48. *
  49. * we check for legacy (PERFFILE) format.
  50. */
  51. static const char *__perf_magic1 = "PERFFILE";
  52. static const u64 __perf_magic2 = 0x32454c4946524550ULL;
  53. static const u64 __perf_magic2_sw = 0x50455246494c4532ULL;
  54. #define PERF_MAGIC __perf_magic2
  55. const char perf_version_string[] = PERF_VERSION;
  56. struct perf_file_attr {
  57. struct perf_event_attr attr;
  58. struct perf_file_section ids;
  59. };
  60. struct feat_fd {
  61. struct perf_header *ph;
  62. int fd;
  63. void *buf; /* Either buf != NULL or fd >= 0 */
  64. ssize_t offset;
  65. size_t size;
  66. struct perf_evsel *events;
  67. };
  68. void perf_header__set_feat(struct perf_header *header, int feat)
  69. {
  70. set_bit(feat, header->adds_features);
  71. }
  72. void perf_header__clear_feat(struct perf_header *header, int feat)
  73. {
  74. clear_bit(feat, header->adds_features);
  75. }
  76. bool perf_header__has_feat(const struct perf_header *header, int feat)
  77. {
  78. return test_bit(feat, header->adds_features);
  79. }
  80. static int __do_write_fd(struct feat_fd *ff, const void *buf, size_t size)
  81. {
  82. ssize_t ret = writen(ff->fd, buf, size);
  83. if (ret != (ssize_t)size)
  84. return ret < 0 ? (int)ret : -1;
  85. return 0;
  86. }
  87. static int __do_write_buf(struct feat_fd *ff, const void *buf, size_t size)
  88. {
  89. /* struct perf_event_header::size is u16 */
  90. const size_t max_size = 0xffff - sizeof(struct perf_event_header);
  91. size_t new_size = ff->size;
  92. void *addr;
  93. if (size + ff->offset > max_size)
  94. return -E2BIG;
  95. while (size > (new_size - ff->offset))
  96. new_size <<= 1;
  97. new_size = min(max_size, new_size);
  98. if (ff->size < new_size) {
  99. addr = realloc(ff->buf, new_size);
  100. if (!addr)
  101. return -ENOMEM;
  102. ff->buf = addr;
  103. ff->size = new_size;
  104. }
  105. memcpy(ff->buf + ff->offset, buf, size);
  106. ff->offset += size;
  107. return 0;
  108. }
  109. /* Return: 0 if succeded, -ERR if failed. */
  110. int do_write(struct feat_fd *ff, const void *buf, size_t size)
  111. {
  112. if (!ff->buf)
  113. return __do_write_fd(ff, buf, size);
  114. return __do_write_buf(ff, buf, size);
  115. }
  116. /* Return: 0 if succeded, -ERR if failed. */
  117. static int do_write_bitmap(struct feat_fd *ff, unsigned long *set, u64 size)
  118. {
  119. u64 *p = (u64 *) set;
  120. int i, ret;
  121. ret = do_write(ff, &size, sizeof(size));
  122. if (ret < 0)
  123. return ret;
  124. for (i = 0; (u64) i < BITS_TO_U64(size); i++) {
  125. ret = do_write(ff, p + i, sizeof(*p));
  126. if (ret < 0)
  127. return ret;
  128. }
  129. return 0;
  130. }
  131. /* Return: 0 if succeded, -ERR if failed. */
  132. int write_padded(struct feat_fd *ff, const void *bf,
  133. size_t count, size_t count_aligned)
  134. {
  135. static const char zero_buf[NAME_ALIGN];
  136. int err = do_write(ff, bf, count);
  137. if (!err)
  138. err = do_write(ff, zero_buf, count_aligned - count);
  139. return err;
  140. }
  141. #define string_size(str) \
  142. (PERF_ALIGN((strlen(str) + 1), NAME_ALIGN) + sizeof(u32))
  143. /* Return: 0 if succeded, -ERR if failed. */
  144. static int do_write_string(struct feat_fd *ff, const char *str)
  145. {
  146. u32 len, olen;
  147. int ret;
  148. olen = strlen(str) + 1;
  149. len = PERF_ALIGN(olen, NAME_ALIGN);
  150. /* write len, incl. \0 */
  151. ret = do_write(ff, &len, sizeof(len));
  152. if (ret < 0)
  153. return ret;
  154. return write_padded(ff, str, olen, len);
  155. }
  156. static int __do_read_fd(struct feat_fd *ff, void *addr, ssize_t size)
  157. {
  158. ssize_t ret = readn(ff->fd, addr, size);
  159. if (ret != size)
  160. return ret < 0 ? (int)ret : -1;
  161. return 0;
  162. }
  163. static int __do_read_buf(struct feat_fd *ff, void *addr, ssize_t size)
  164. {
  165. if (size > (ssize_t)ff->size - ff->offset)
  166. return -1;
  167. memcpy(addr, ff->buf + ff->offset, size);
  168. ff->offset += size;
  169. return 0;
  170. }
  171. static int __do_read(struct feat_fd *ff, void *addr, ssize_t size)
  172. {
  173. if (!ff->buf)
  174. return __do_read_fd(ff, addr, size);
  175. return __do_read_buf(ff, addr, size);
  176. }
  177. static int do_read_u32(struct feat_fd *ff, u32 *addr)
  178. {
  179. int ret;
  180. ret = __do_read(ff, addr, sizeof(*addr));
  181. if (ret)
  182. return ret;
  183. if (ff->ph->needs_swap)
  184. *addr = bswap_32(*addr);
  185. return 0;
  186. }
  187. static int do_read_u64(struct feat_fd *ff, u64 *addr)
  188. {
  189. int ret;
  190. ret = __do_read(ff, addr, sizeof(*addr));
  191. if (ret)
  192. return ret;
  193. if (ff->ph->needs_swap)
  194. *addr = bswap_64(*addr);
  195. return 0;
  196. }
  197. static char *do_read_string(struct feat_fd *ff)
  198. {
  199. u32 len;
  200. char *buf;
  201. if (do_read_u32(ff, &len))
  202. return NULL;
  203. buf = malloc(len);
  204. if (!buf)
  205. return NULL;
  206. if (!__do_read(ff, buf, len)) {
  207. /*
  208. * strings are padded by zeroes
  209. * thus the actual strlen of buf
  210. * may be less than len
  211. */
  212. return buf;
  213. }
  214. free(buf);
  215. return NULL;
  216. }
  217. /* Return: 0 if succeded, -ERR if failed. */
  218. static int do_read_bitmap(struct feat_fd *ff, unsigned long **pset, u64 *psize)
  219. {
  220. unsigned long *set;
  221. u64 size, *p;
  222. int i, ret;
  223. ret = do_read_u64(ff, &size);
  224. if (ret)
  225. return ret;
  226. set = bitmap_alloc(size);
  227. if (!set)
  228. return -ENOMEM;
  229. p = (u64 *) set;
  230. for (i = 0; (u64) i < BITS_TO_U64(size); i++) {
  231. ret = do_read_u64(ff, p + i);
  232. if (ret < 0) {
  233. free(set);
  234. return ret;
  235. }
  236. }
  237. *pset = set;
  238. *psize = size;
  239. return 0;
  240. }
  241. static int write_tracing_data(struct feat_fd *ff,
  242. struct perf_evlist *evlist)
  243. {
  244. if (WARN(ff->buf, "Error: calling %s in pipe-mode.\n", __func__))
  245. return -1;
  246. return read_tracing_data(ff->fd, &evlist->entries);
  247. }
  248. static int write_build_id(struct feat_fd *ff,
  249. struct perf_evlist *evlist __maybe_unused)
  250. {
  251. struct perf_session *session;
  252. int err;
  253. session = container_of(ff->ph, struct perf_session, header);
  254. if (!perf_session__read_build_ids(session, true))
  255. return -1;
  256. if (WARN(ff->buf, "Error: calling %s in pipe-mode.\n", __func__))
  257. return -1;
  258. err = perf_session__write_buildid_table(session, ff);
  259. if (err < 0) {
  260. pr_debug("failed to write buildid table\n");
  261. return err;
  262. }
  263. perf_session__cache_build_ids(session);
  264. return 0;
  265. }
  266. static int write_hostname(struct feat_fd *ff,
  267. struct perf_evlist *evlist __maybe_unused)
  268. {
  269. struct utsname uts;
  270. int ret;
  271. ret = uname(&uts);
  272. if (ret < 0)
  273. return -1;
  274. return do_write_string(ff, uts.nodename);
  275. }
  276. static int write_osrelease(struct feat_fd *ff,
  277. struct perf_evlist *evlist __maybe_unused)
  278. {
  279. struct utsname uts;
  280. int ret;
  281. ret = uname(&uts);
  282. if (ret < 0)
  283. return -1;
  284. return do_write_string(ff, uts.release);
  285. }
  286. static int write_arch(struct feat_fd *ff,
  287. struct perf_evlist *evlist __maybe_unused)
  288. {
  289. struct utsname uts;
  290. int ret;
  291. ret = uname(&uts);
  292. if (ret < 0)
  293. return -1;
  294. return do_write_string(ff, uts.machine);
  295. }
  296. static int write_version(struct feat_fd *ff,
  297. struct perf_evlist *evlist __maybe_unused)
  298. {
  299. return do_write_string(ff, perf_version_string);
  300. }
  301. static int __write_cpudesc(struct feat_fd *ff, const char *cpuinfo_proc)
  302. {
  303. FILE *file;
  304. char *buf = NULL;
  305. char *s, *p;
  306. const char *search = cpuinfo_proc;
  307. size_t len = 0;
  308. int ret = -1;
  309. if (!search)
  310. return -1;
  311. file = fopen("/proc/cpuinfo", "r");
  312. if (!file)
  313. return -1;
  314. while (getline(&buf, &len, file) > 0) {
  315. ret = strncmp(buf, search, strlen(search));
  316. if (!ret)
  317. break;
  318. }
  319. if (ret) {
  320. ret = -1;
  321. goto done;
  322. }
  323. s = buf;
  324. p = strchr(buf, ':');
  325. if (p && *(p+1) == ' ' && *(p+2))
  326. s = p + 2;
  327. p = strchr(s, '\n');
  328. if (p)
  329. *p = '\0';
  330. /* squash extra space characters (branding string) */
  331. p = s;
  332. while (*p) {
  333. if (isspace(*p)) {
  334. char *r = p + 1;
  335. char *q = r;
  336. *p = ' ';
  337. while (*q && isspace(*q))
  338. q++;
  339. if (q != (p+1))
  340. while ((*r++ = *q++));
  341. }
  342. p++;
  343. }
  344. ret = do_write_string(ff, s);
  345. done:
  346. free(buf);
  347. fclose(file);
  348. return ret;
  349. }
  350. static int write_cpudesc(struct feat_fd *ff,
  351. struct perf_evlist *evlist __maybe_unused)
  352. {
  353. const char *cpuinfo_procs[] = CPUINFO_PROC;
  354. unsigned int i;
  355. for (i = 0; i < ARRAY_SIZE(cpuinfo_procs); i++) {
  356. int ret;
  357. ret = __write_cpudesc(ff, cpuinfo_procs[i]);
  358. if (ret >= 0)
  359. return ret;
  360. }
  361. return -1;
  362. }
  363. static int write_nrcpus(struct feat_fd *ff,
  364. struct perf_evlist *evlist __maybe_unused)
  365. {
  366. long nr;
  367. u32 nrc, nra;
  368. int ret;
  369. nrc = cpu__max_present_cpu();
  370. nr = sysconf(_SC_NPROCESSORS_ONLN);
  371. if (nr < 0)
  372. return -1;
  373. nra = (u32)(nr & UINT_MAX);
  374. ret = do_write(ff, &nrc, sizeof(nrc));
  375. if (ret < 0)
  376. return ret;
  377. return do_write(ff, &nra, sizeof(nra));
  378. }
  379. static int write_event_desc(struct feat_fd *ff,
  380. struct perf_evlist *evlist)
  381. {
  382. struct perf_evsel *evsel;
  383. u32 nre, nri, sz;
  384. int ret;
  385. nre = evlist->nr_entries;
  386. /*
  387. * write number of events
  388. */
  389. ret = do_write(ff, &nre, sizeof(nre));
  390. if (ret < 0)
  391. return ret;
  392. /*
  393. * size of perf_event_attr struct
  394. */
  395. sz = (u32)sizeof(evsel->attr);
  396. ret = do_write(ff, &sz, sizeof(sz));
  397. if (ret < 0)
  398. return ret;
  399. evlist__for_each_entry(evlist, evsel) {
  400. ret = do_write(ff, &evsel->attr, sz);
  401. if (ret < 0)
  402. return ret;
  403. /*
  404. * write number of unique id per event
  405. * there is one id per instance of an event
  406. *
  407. * copy into an nri to be independent of the
  408. * type of ids,
  409. */
  410. nri = evsel->ids;
  411. ret = do_write(ff, &nri, sizeof(nri));
  412. if (ret < 0)
  413. return ret;
  414. /*
  415. * write event string as passed on cmdline
  416. */
  417. ret = do_write_string(ff, perf_evsel__name(evsel));
  418. if (ret < 0)
  419. return ret;
  420. /*
  421. * write unique ids for this event
  422. */
  423. ret = do_write(ff, evsel->id, evsel->ids * sizeof(u64));
  424. if (ret < 0)
  425. return ret;
  426. }
  427. return 0;
  428. }
  429. static int write_cmdline(struct feat_fd *ff,
  430. struct perf_evlist *evlist __maybe_unused)
  431. {
  432. char buf[MAXPATHLEN];
  433. u32 n;
  434. int i, ret;
  435. /* actual path to perf binary */
  436. ret = readlink("/proc/self/exe", buf, sizeof(buf) - 1);
  437. if (ret <= 0)
  438. return -1;
  439. /* readlink() does not add null termination */
  440. buf[ret] = '\0';
  441. /* account for binary path */
  442. n = perf_env.nr_cmdline + 1;
  443. ret = do_write(ff, &n, sizeof(n));
  444. if (ret < 0)
  445. return ret;
  446. ret = do_write_string(ff, buf);
  447. if (ret < 0)
  448. return ret;
  449. for (i = 0 ; i < perf_env.nr_cmdline; i++) {
  450. ret = do_write_string(ff, perf_env.cmdline_argv[i]);
  451. if (ret < 0)
  452. return ret;
  453. }
  454. return 0;
  455. }
  456. #define CORE_SIB_FMT \
  457. "/sys/devices/system/cpu/cpu%d/topology/core_siblings_list"
  458. #define THRD_SIB_FMT \
  459. "/sys/devices/system/cpu/cpu%d/topology/thread_siblings_list"
  460. struct cpu_topo {
  461. u32 cpu_nr;
  462. u32 core_sib;
  463. u32 thread_sib;
  464. char **core_siblings;
  465. char **thread_siblings;
  466. };
  467. static int build_cpu_topo(struct cpu_topo *tp, int cpu)
  468. {
  469. FILE *fp;
  470. char filename[MAXPATHLEN];
  471. char *buf = NULL, *p;
  472. size_t len = 0;
  473. ssize_t sret;
  474. u32 i = 0;
  475. int ret = -1;
  476. sprintf(filename, CORE_SIB_FMT, cpu);
  477. fp = fopen(filename, "r");
  478. if (!fp)
  479. goto try_threads;
  480. sret = getline(&buf, &len, fp);
  481. fclose(fp);
  482. if (sret <= 0)
  483. goto try_threads;
  484. p = strchr(buf, '\n');
  485. if (p)
  486. *p = '\0';
  487. for (i = 0; i < tp->core_sib; i++) {
  488. if (!strcmp(buf, tp->core_siblings[i]))
  489. break;
  490. }
  491. if (i == tp->core_sib) {
  492. tp->core_siblings[i] = buf;
  493. tp->core_sib++;
  494. buf = NULL;
  495. len = 0;
  496. }
  497. ret = 0;
  498. try_threads:
  499. sprintf(filename, THRD_SIB_FMT, cpu);
  500. fp = fopen(filename, "r");
  501. if (!fp)
  502. goto done;
  503. if (getline(&buf, &len, fp) <= 0)
  504. goto done;
  505. p = strchr(buf, '\n');
  506. if (p)
  507. *p = '\0';
  508. for (i = 0; i < tp->thread_sib; i++) {
  509. if (!strcmp(buf, tp->thread_siblings[i]))
  510. break;
  511. }
  512. if (i == tp->thread_sib) {
  513. tp->thread_siblings[i] = buf;
  514. tp->thread_sib++;
  515. buf = NULL;
  516. }
  517. ret = 0;
  518. done:
  519. if(fp)
  520. fclose(fp);
  521. free(buf);
  522. return ret;
  523. }
  524. static void free_cpu_topo(struct cpu_topo *tp)
  525. {
  526. u32 i;
  527. if (!tp)
  528. return;
  529. for (i = 0 ; i < tp->core_sib; i++)
  530. zfree(&tp->core_siblings[i]);
  531. for (i = 0 ; i < tp->thread_sib; i++)
  532. zfree(&tp->thread_siblings[i]);
  533. free(tp);
  534. }
  535. static struct cpu_topo *build_cpu_topology(void)
  536. {
  537. struct cpu_topo *tp = NULL;
  538. void *addr;
  539. u32 nr, i;
  540. size_t sz;
  541. long ncpus;
  542. int ret = -1;
  543. struct cpu_map *map;
  544. ncpus = cpu__max_present_cpu();
  545. /* build online CPU map */
  546. map = cpu_map__new(NULL);
  547. if (map == NULL) {
  548. pr_debug("failed to get system cpumap\n");
  549. return NULL;
  550. }
  551. nr = (u32)(ncpus & UINT_MAX);
  552. sz = nr * sizeof(char *);
  553. addr = calloc(1, sizeof(*tp) + 2 * sz);
  554. if (!addr)
  555. goto out_free;
  556. tp = addr;
  557. tp->cpu_nr = nr;
  558. addr += sizeof(*tp);
  559. tp->core_siblings = addr;
  560. addr += sz;
  561. tp->thread_siblings = addr;
  562. for (i = 0; i < nr; i++) {
  563. if (!cpu_map__has(map, i))
  564. continue;
  565. ret = build_cpu_topo(tp, i);
  566. if (ret < 0)
  567. break;
  568. }
  569. out_free:
  570. cpu_map__put(map);
  571. if (ret) {
  572. free_cpu_topo(tp);
  573. tp = NULL;
  574. }
  575. return tp;
  576. }
  577. static int write_cpu_topology(struct feat_fd *ff,
  578. struct perf_evlist *evlist __maybe_unused)
  579. {
  580. struct cpu_topo *tp;
  581. u32 i;
  582. int ret, j;
  583. tp = build_cpu_topology();
  584. if (!tp)
  585. return -1;
  586. ret = do_write(ff, &tp->core_sib, sizeof(tp->core_sib));
  587. if (ret < 0)
  588. goto done;
  589. for (i = 0; i < tp->core_sib; i++) {
  590. ret = do_write_string(ff, tp->core_siblings[i]);
  591. if (ret < 0)
  592. goto done;
  593. }
  594. ret = do_write(ff, &tp->thread_sib, sizeof(tp->thread_sib));
  595. if (ret < 0)
  596. goto done;
  597. for (i = 0; i < tp->thread_sib; i++) {
  598. ret = do_write_string(ff, tp->thread_siblings[i]);
  599. if (ret < 0)
  600. break;
  601. }
  602. ret = perf_env__read_cpu_topology_map(&perf_env);
  603. if (ret < 0)
  604. goto done;
  605. for (j = 0; j < perf_env.nr_cpus_avail; j++) {
  606. ret = do_write(ff, &perf_env.cpu[j].core_id,
  607. sizeof(perf_env.cpu[j].core_id));
  608. if (ret < 0)
  609. return ret;
  610. ret = do_write(ff, &perf_env.cpu[j].socket_id,
  611. sizeof(perf_env.cpu[j].socket_id));
  612. if (ret < 0)
  613. return ret;
  614. }
  615. done:
  616. free_cpu_topo(tp);
  617. return ret;
  618. }
  619. static int write_total_mem(struct feat_fd *ff,
  620. struct perf_evlist *evlist __maybe_unused)
  621. {
  622. char *buf = NULL;
  623. FILE *fp;
  624. size_t len = 0;
  625. int ret = -1, n;
  626. uint64_t mem;
  627. fp = fopen("/proc/meminfo", "r");
  628. if (!fp)
  629. return -1;
  630. while (getline(&buf, &len, fp) > 0) {
  631. ret = strncmp(buf, "MemTotal:", 9);
  632. if (!ret)
  633. break;
  634. }
  635. if (!ret) {
  636. n = sscanf(buf, "%*s %"PRIu64, &mem);
  637. if (n == 1)
  638. ret = do_write(ff, &mem, sizeof(mem));
  639. } else
  640. ret = -1;
  641. free(buf);
  642. fclose(fp);
  643. return ret;
  644. }
  645. static int write_topo_node(struct feat_fd *ff, int node)
  646. {
  647. char str[MAXPATHLEN];
  648. char field[32];
  649. char *buf = NULL, *p;
  650. size_t len = 0;
  651. FILE *fp;
  652. u64 mem_total, mem_free, mem;
  653. int ret = -1;
  654. sprintf(str, "/sys/devices/system/node/node%d/meminfo", node);
  655. fp = fopen(str, "r");
  656. if (!fp)
  657. return -1;
  658. while (getline(&buf, &len, fp) > 0) {
  659. /* skip over invalid lines */
  660. if (!strchr(buf, ':'))
  661. continue;
  662. if (sscanf(buf, "%*s %*d %31s %"PRIu64, field, &mem) != 2)
  663. goto done;
  664. if (!strcmp(field, "MemTotal:"))
  665. mem_total = mem;
  666. if (!strcmp(field, "MemFree:"))
  667. mem_free = mem;
  668. }
  669. fclose(fp);
  670. fp = NULL;
  671. ret = do_write(ff, &mem_total, sizeof(u64));
  672. if (ret)
  673. goto done;
  674. ret = do_write(ff, &mem_free, sizeof(u64));
  675. if (ret)
  676. goto done;
  677. ret = -1;
  678. sprintf(str, "/sys/devices/system/node/node%d/cpulist", node);
  679. fp = fopen(str, "r");
  680. if (!fp)
  681. goto done;
  682. if (getline(&buf, &len, fp) <= 0)
  683. goto done;
  684. p = strchr(buf, '\n');
  685. if (p)
  686. *p = '\0';
  687. ret = do_write_string(ff, buf);
  688. done:
  689. free(buf);
  690. if (fp)
  691. fclose(fp);
  692. return ret;
  693. }
  694. static int write_numa_topology(struct feat_fd *ff,
  695. struct perf_evlist *evlist __maybe_unused)
  696. {
  697. char *buf = NULL;
  698. size_t len = 0;
  699. FILE *fp;
  700. struct cpu_map *node_map = NULL;
  701. char *c;
  702. u32 nr, i, j;
  703. int ret = -1;
  704. fp = fopen("/sys/devices/system/node/online", "r");
  705. if (!fp)
  706. return -1;
  707. if (getline(&buf, &len, fp) <= 0)
  708. goto done;
  709. c = strchr(buf, '\n');
  710. if (c)
  711. *c = '\0';
  712. node_map = cpu_map__new(buf);
  713. if (!node_map)
  714. goto done;
  715. nr = (u32)node_map->nr;
  716. ret = do_write(ff, &nr, sizeof(nr));
  717. if (ret < 0)
  718. goto done;
  719. for (i = 0; i < nr; i++) {
  720. j = (u32)node_map->map[i];
  721. ret = do_write(ff, &j, sizeof(j));
  722. if (ret < 0)
  723. break;
  724. ret = write_topo_node(ff, i);
  725. if (ret < 0)
  726. break;
  727. }
  728. done:
  729. free(buf);
  730. fclose(fp);
  731. cpu_map__put(node_map);
  732. return ret;
  733. }
  734. /*
  735. * File format:
  736. *
  737. * struct pmu_mappings {
  738. * u32 pmu_num;
  739. * struct pmu_map {
  740. * u32 type;
  741. * char name[];
  742. * }[pmu_num];
  743. * };
  744. */
  745. static int write_pmu_mappings(struct feat_fd *ff,
  746. struct perf_evlist *evlist __maybe_unused)
  747. {
  748. struct perf_pmu *pmu = NULL;
  749. u32 pmu_num = 0;
  750. int ret;
  751. /*
  752. * Do a first pass to count number of pmu to avoid lseek so this
  753. * works in pipe mode as well.
  754. */
  755. while ((pmu = perf_pmu__scan(pmu))) {
  756. if (!pmu->name)
  757. continue;
  758. pmu_num++;
  759. }
  760. ret = do_write(ff, &pmu_num, sizeof(pmu_num));
  761. if (ret < 0)
  762. return ret;
  763. while ((pmu = perf_pmu__scan(pmu))) {
  764. if (!pmu->name)
  765. continue;
  766. ret = do_write(ff, &pmu->type, sizeof(pmu->type));
  767. if (ret < 0)
  768. return ret;
  769. ret = do_write_string(ff, pmu->name);
  770. if (ret < 0)
  771. return ret;
  772. }
  773. return 0;
  774. }
  775. /*
  776. * File format:
  777. *
  778. * struct group_descs {
  779. * u32 nr_groups;
  780. * struct group_desc {
  781. * char name[];
  782. * u32 leader_idx;
  783. * u32 nr_members;
  784. * }[nr_groups];
  785. * };
  786. */
  787. static int write_group_desc(struct feat_fd *ff,
  788. struct perf_evlist *evlist)
  789. {
  790. u32 nr_groups = evlist->nr_groups;
  791. struct perf_evsel *evsel;
  792. int ret;
  793. ret = do_write(ff, &nr_groups, sizeof(nr_groups));
  794. if (ret < 0)
  795. return ret;
  796. evlist__for_each_entry(evlist, evsel) {
  797. if (perf_evsel__is_group_leader(evsel) &&
  798. evsel->nr_members > 1) {
  799. const char *name = evsel->group_name ?: "{anon_group}";
  800. u32 leader_idx = evsel->idx;
  801. u32 nr_members = evsel->nr_members;
  802. ret = do_write_string(ff, name);
  803. if (ret < 0)
  804. return ret;
  805. ret = do_write(ff, &leader_idx, sizeof(leader_idx));
  806. if (ret < 0)
  807. return ret;
  808. ret = do_write(ff, &nr_members, sizeof(nr_members));
  809. if (ret < 0)
  810. return ret;
  811. }
  812. }
  813. return 0;
  814. }
  815. /*
  816. * default get_cpuid(): nothing gets recorded
  817. * actual implementation must be in arch/$(SRCARCH)/util/header.c
  818. */
  819. int __weak get_cpuid(char *buffer __maybe_unused, size_t sz __maybe_unused)
  820. {
  821. return -1;
  822. }
  823. static int write_cpuid(struct feat_fd *ff,
  824. struct perf_evlist *evlist __maybe_unused)
  825. {
  826. char buffer[64];
  827. int ret;
  828. ret = get_cpuid(buffer, sizeof(buffer));
  829. if (!ret)
  830. goto write_it;
  831. return -1;
  832. write_it:
  833. return do_write_string(ff, buffer);
  834. }
  835. static int write_branch_stack(struct feat_fd *ff __maybe_unused,
  836. struct perf_evlist *evlist __maybe_unused)
  837. {
  838. return 0;
  839. }
  840. static int write_auxtrace(struct feat_fd *ff,
  841. struct perf_evlist *evlist __maybe_unused)
  842. {
  843. struct perf_session *session;
  844. int err;
  845. if (WARN(ff->buf, "Error: calling %s in pipe-mode.\n", __func__))
  846. return -1;
  847. session = container_of(ff->ph, struct perf_session, header);
  848. err = auxtrace_index__write(ff->fd, &session->auxtrace_index);
  849. if (err < 0)
  850. pr_err("Failed to write auxtrace index\n");
  851. return err;
  852. }
  853. static int cpu_cache_level__sort(const void *a, const void *b)
  854. {
  855. struct cpu_cache_level *cache_a = (struct cpu_cache_level *)a;
  856. struct cpu_cache_level *cache_b = (struct cpu_cache_level *)b;
  857. return cache_a->level - cache_b->level;
  858. }
  859. static bool cpu_cache_level__cmp(struct cpu_cache_level *a, struct cpu_cache_level *b)
  860. {
  861. if (a->level != b->level)
  862. return false;
  863. if (a->line_size != b->line_size)
  864. return false;
  865. if (a->sets != b->sets)
  866. return false;
  867. if (a->ways != b->ways)
  868. return false;
  869. if (strcmp(a->type, b->type))
  870. return false;
  871. if (strcmp(a->size, b->size))
  872. return false;
  873. if (strcmp(a->map, b->map))
  874. return false;
  875. return true;
  876. }
  877. static int cpu_cache_level__read(struct cpu_cache_level *cache, u32 cpu, u16 level)
  878. {
  879. char path[PATH_MAX], file[PATH_MAX];
  880. struct stat st;
  881. size_t len;
  882. scnprintf(path, PATH_MAX, "devices/system/cpu/cpu%d/cache/index%d/", cpu, level);
  883. scnprintf(file, PATH_MAX, "%s/%s", sysfs__mountpoint(), path);
  884. if (stat(file, &st))
  885. return 1;
  886. scnprintf(file, PATH_MAX, "%s/level", path);
  887. if (sysfs__read_int(file, (int *) &cache->level))
  888. return -1;
  889. scnprintf(file, PATH_MAX, "%s/coherency_line_size", path);
  890. if (sysfs__read_int(file, (int *) &cache->line_size))
  891. return -1;
  892. scnprintf(file, PATH_MAX, "%s/number_of_sets", path);
  893. if (sysfs__read_int(file, (int *) &cache->sets))
  894. return -1;
  895. scnprintf(file, PATH_MAX, "%s/ways_of_associativity", path);
  896. if (sysfs__read_int(file, (int *) &cache->ways))
  897. return -1;
  898. scnprintf(file, PATH_MAX, "%s/type", path);
  899. if (sysfs__read_str(file, &cache->type, &len))
  900. return -1;
  901. cache->type[len] = 0;
  902. cache->type = rtrim(cache->type);
  903. scnprintf(file, PATH_MAX, "%s/size", path);
  904. if (sysfs__read_str(file, &cache->size, &len)) {
  905. free(cache->type);
  906. return -1;
  907. }
  908. cache->size[len] = 0;
  909. cache->size = rtrim(cache->size);
  910. scnprintf(file, PATH_MAX, "%s/shared_cpu_list", path);
  911. if (sysfs__read_str(file, &cache->map, &len)) {
  912. free(cache->map);
  913. free(cache->type);
  914. return -1;
  915. }
  916. cache->map[len] = 0;
  917. cache->map = rtrim(cache->map);
  918. return 0;
  919. }
  920. static void cpu_cache_level__fprintf(FILE *out, struct cpu_cache_level *c)
  921. {
  922. fprintf(out, "L%d %-15s %8s [%s]\n", c->level, c->type, c->size, c->map);
  923. }
  924. static int build_caches(struct cpu_cache_level caches[], u32 size, u32 *cntp)
  925. {
  926. u32 i, cnt = 0;
  927. long ncpus;
  928. u32 nr, cpu;
  929. u16 level;
  930. ncpus = sysconf(_SC_NPROCESSORS_CONF);
  931. if (ncpus < 0)
  932. return -1;
  933. nr = (u32)(ncpus & UINT_MAX);
  934. for (cpu = 0; cpu < nr; cpu++) {
  935. for (level = 0; level < 10; level++) {
  936. struct cpu_cache_level c;
  937. int err;
  938. err = cpu_cache_level__read(&c, cpu, level);
  939. if (err < 0)
  940. return err;
  941. if (err == 1)
  942. break;
  943. for (i = 0; i < cnt; i++) {
  944. if (cpu_cache_level__cmp(&c, &caches[i]))
  945. break;
  946. }
  947. if (i == cnt)
  948. caches[cnt++] = c;
  949. else
  950. cpu_cache_level__free(&c);
  951. if (WARN_ONCE(cnt == size, "way too many cpu caches.."))
  952. goto out;
  953. }
  954. }
  955. out:
  956. *cntp = cnt;
  957. return 0;
  958. }
  959. #define MAX_CACHES 2000
  960. static int write_cache(struct feat_fd *ff,
  961. struct perf_evlist *evlist __maybe_unused)
  962. {
  963. struct cpu_cache_level caches[MAX_CACHES];
  964. u32 cnt = 0, i, version = 1;
  965. int ret;
  966. ret = build_caches(caches, MAX_CACHES, &cnt);
  967. if (ret)
  968. goto out;
  969. qsort(&caches, cnt, sizeof(struct cpu_cache_level), cpu_cache_level__sort);
  970. ret = do_write(ff, &version, sizeof(u32));
  971. if (ret < 0)
  972. goto out;
  973. ret = do_write(ff, &cnt, sizeof(u32));
  974. if (ret < 0)
  975. goto out;
  976. for (i = 0; i < cnt; i++) {
  977. struct cpu_cache_level *c = &caches[i];
  978. #define _W(v) \
  979. ret = do_write(ff, &c->v, sizeof(u32)); \
  980. if (ret < 0) \
  981. goto out;
  982. _W(level)
  983. _W(line_size)
  984. _W(sets)
  985. _W(ways)
  986. #undef _W
  987. #define _W(v) \
  988. ret = do_write_string(ff, (const char *) c->v); \
  989. if (ret < 0) \
  990. goto out;
  991. _W(type)
  992. _W(size)
  993. _W(map)
  994. #undef _W
  995. }
  996. out:
  997. for (i = 0; i < cnt; i++)
  998. cpu_cache_level__free(&caches[i]);
  999. return ret;
  1000. }
  1001. static int write_stat(struct feat_fd *ff __maybe_unused,
  1002. struct perf_evlist *evlist __maybe_unused)
  1003. {
  1004. return 0;
  1005. }
  1006. static int write_sample_time(struct feat_fd *ff,
  1007. struct perf_evlist *evlist)
  1008. {
  1009. int ret;
  1010. ret = do_write(ff, &evlist->first_sample_time,
  1011. sizeof(evlist->first_sample_time));
  1012. if (ret < 0)
  1013. return ret;
  1014. return do_write(ff, &evlist->last_sample_time,
  1015. sizeof(evlist->last_sample_time));
  1016. }
  1017. static int memory_node__read(struct memory_node *n, unsigned long idx)
  1018. {
  1019. unsigned int phys, size = 0;
  1020. char path[PATH_MAX];
  1021. struct dirent *ent;
  1022. DIR *dir;
  1023. #define for_each_memory(mem, dir) \
  1024. while ((ent = readdir(dir))) \
  1025. if (strcmp(ent->d_name, ".") && \
  1026. strcmp(ent->d_name, "..") && \
  1027. sscanf(ent->d_name, "memory%u", &mem) == 1)
  1028. scnprintf(path, PATH_MAX,
  1029. "%s/devices/system/node/node%lu",
  1030. sysfs__mountpoint(), idx);
  1031. dir = opendir(path);
  1032. if (!dir) {
  1033. pr_warning("failed: cant' open memory sysfs data\n");
  1034. return -1;
  1035. }
  1036. for_each_memory(phys, dir) {
  1037. size = max(phys, size);
  1038. }
  1039. size++;
  1040. n->set = bitmap_alloc(size);
  1041. if (!n->set) {
  1042. closedir(dir);
  1043. return -ENOMEM;
  1044. }
  1045. n->node = idx;
  1046. n->size = size;
  1047. rewinddir(dir);
  1048. for_each_memory(phys, dir) {
  1049. set_bit(phys, n->set);
  1050. }
  1051. closedir(dir);
  1052. return 0;
  1053. }
  1054. static int memory_node__sort(const void *a, const void *b)
  1055. {
  1056. const struct memory_node *na = a;
  1057. const struct memory_node *nb = b;
  1058. return na->node - nb->node;
  1059. }
  1060. static int build_mem_topology(struct memory_node *nodes, u64 size, u64 *cntp)
  1061. {
  1062. char path[PATH_MAX];
  1063. struct dirent *ent;
  1064. DIR *dir;
  1065. u64 cnt = 0;
  1066. int ret = 0;
  1067. scnprintf(path, PATH_MAX, "%s/devices/system/node/",
  1068. sysfs__mountpoint());
  1069. dir = opendir(path);
  1070. if (!dir) {
  1071. pr_debug2("%s: could't read %s, does this arch have topology information?\n",
  1072. __func__, path);
  1073. return -1;
  1074. }
  1075. while (!ret && (ent = readdir(dir))) {
  1076. unsigned int idx;
  1077. int r;
  1078. if (!strcmp(ent->d_name, ".") ||
  1079. !strcmp(ent->d_name, ".."))
  1080. continue;
  1081. r = sscanf(ent->d_name, "node%u", &idx);
  1082. if (r != 1)
  1083. continue;
  1084. if (WARN_ONCE(cnt >= size,
  1085. "failed to write MEM_TOPOLOGY, way too many nodes\n"))
  1086. return -1;
  1087. ret = memory_node__read(&nodes[cnt++], idx);
  1088. }
  1089. *cntp = cnt;
  1090. closedir(dir);
  1091. if (!ret)
  1092. qsort(nodes, cnt, sizeof(nodes[0]), memory_node__sort);
  1093. return ret;
  1094. }
  1095. #define MAX_MEMORY_NODES 2000
  1096. /*
  1097. * The MEM_TOPOLOGY holds physical memory map for every
  1098. * node in system. The format of data is as follows:
  1099. *
  1100. * 0 - version | for future changes
  1101. * 8 - block_size_bytes | /sys/devices/system/memory/block_size_bytes
  1102. * 16 - count | number of nodes
  1103. *
  1104. * For each node we store map of physical indexes for
  1105. * each node:
  1106. *
  1107. * 32 - node id | node index
  1108. * 40 - size | size of bitmap
  1109. * 48 - bitmap | bitmap of memory indexes that belongs to node
  1110. */
  1111. static int write_mem_topology(struct feat_fd *ff __maybe_unused,
  1112. struct perf_evlist *evlist __maybe_unused)
  1113. {
  1114. static struct memory_node nodes[MAX_MEMORY_NODES];
  1115. u64 bsize, version = 1, i, nr;
  1116. int ret;
  1117. ret = sysfs__read_xll("devices/system/memory/block_size_bytes",
  1118. (unsigned long long *) &bsize);
  1119. if (ret)
  1120. return ret;
  1121. ret = build_mem_topology(&nodes[0], MAX_MEMORY_NODES, &nr);
  1122. if (ret)
  1123. return ret;
  1124. ret = do_write(ff, &version, sizeof(version));
  1125. if (ret < 0)
  1126. goto out;
  1127. ret = do_write(ff, &bsize, sizeof(bsize));
  1128. if (ret < 0)
  1129. goto out;
  1130. ret = do_write(ff, &nr, sizeof(nr));
  1131. if (ret < 0)
  1132. goto out;
  1133. for (i = 0; i < nr; i++) {
  1134. struct memory_node *n = &nodes[i];
  1135. #define _W(v) \
  1136. ret = do_write(ff, &n->v, sizeof(n->v)); \
  1137. if (ret < 0) \
  1138. goto out;
  1139. _W(node)
  1140. _W(size)
  1141. #undef _W
  1142. ret = do_write_bitmap(ff, n->set, n->size);
  1143. if (ret < 0)
  1144. goto out;
  1145. }
  1146. out:
  1147. return ret;
  1148. }
  1149. static void print_hostname(struct feat_fd *ff, FILE *fp)
  1150. {
  1151. fprintf(fp, "# hostname : %s\n", ff->ph->env.hostname);
  1152. }
  1153. static void print_osrelease(struct feat_fd *ff, FILE *fp)
  1154. {
  1155. fprintf(fp, "# os release : %s\n", ff->ph->env.os_release);
  1156. }
  1157. static void print_arch(struct feat_fd *ff, FILE *fp)
  1158. {
  1159. fprintf(fp, "# arch : %s\n", ff->ph->env.arch);
  1160. }
  1161. static void print_cpudesc(struct feat_fd *ff, FILE *fp)
  1162. {
  1163. fprintf(fp, "# cpudesc : %s\n", ff->ph->env.cpu_desc);
  1164. }
  1165. static void print_nrcpus(struct feat_fd *ff, FILE *fp)
  1166. {
  1167. fprintf(fp, "# nrcpus online : %u\n", ff->ph->env.nr_cpus_online);
  1168. fprintf(fp, "# nrcpus avail : %u\n", ff->ph->env.nr_cpus_avail);
  1169. }
  1170. static void print_version(struct feat_fd *ff, FILE *fp)
  1171. {
  1172. fprintf(fp, "# perf version : %s\n", ff->ph->env.version);
  1173. }
  1174. static void print_cmdline(struct feat_fd *ff, FILE *fp)
  1175. {
  1176. int nr, i;
  1177. nr = ff->ph->env.nr_cmdline;
  1178. fprintf(fp, "# cmdline : ");
  1179. for (i = 0; i < nr; i++) {
  1180. char *argv_i = strdup(ff->ph->env.cmdline_argv[i]);
  1181. if (!argv_i) {
  1182. fprintf(fp, "%s ", ff->ph->env.cmdline_argv[i]);
  1183. } else {
  1184. char *mem = argv_i;
  1185. do {
  1186. char *quote = strchr(argv_i, '\'');
  1187. if (!quote)
  1188. break;
  1189. *quote++ = '\0';
  1190. fprintf(fp, "%s\\\'", argv_i);
  1191. argv_i = quote;
  1192. } while (1);
  1193. fprintf(fp, "%s ", argv_i);
  1194. free(mem);
  1195. }
  1196. }
  1197. fputc('\n', fp);
  1198. }
  1199. static void print_cpu_topology(struct feat_fd *ff, FILE *fp)
  1200. {
  1201. struct perf_header *ph = ff->ph;
  1202. int cpu_nr = ph->env.nr_cpus_avail;
  1203. int nr, i;
  1204. char *str;
  1205. nr = ph->env.nr_sibling_cores;
  1206. str = ph->env.sibling_cores;
  1207. for (i = 0; i < nr; i++) {
  1208. fprintf(fp, "# sibling cores : %s\n", str);
  1209. str += strlen(str) + 1;
  1210. }
  1211. nr = ph->env.nr_sibling_threads;
  1212. str = ph->env.sibling_threads;
  1213. for (i = 0; i < nr; i++) {
  1214. fprintf(fp, "# sibling threads : %s\n", str);
  1215. str += strlen(str) + 1;
  1216. }
  1217. if (ph->env.cpu != NULL) {
  1218. for (i = 0; i < cpu_nr; i++)
  1219. fprintf(fp, "# CPU %d: Core ID %d, Socket ID %d\n", i,
  1220. ph->env.cpu[i].core_id, ph->env.cpu[i].socket_id);
  1221. } else
  1222. fprintf(fp, "# Core ID and Socket ID information is not available\n");
  1223. }
  1224. static void free_event_desc(struct perf_evsel *events)
  1225. {
  1226. struct perf_evsel *evsel;
  1227. if (!events)
  1228. return;
  1229. for (evsel = events; evsel->attr.size; evsel++) {
  1230. zfree(&evsel->name);
  1231. zfree(&evsel->id);
  1232. }
  1233. free(events);
  1234. }
  1235. static struct perf_evsel *read_event_desc(struct feat_fd *ff)
  1236. {
  1237. struct perf_evsel *evsel, *events = NULL;
  1238. u64 *id;
  1239. void *buf = NULL;
  1240. u32 nre, sz, nr, i, j;
  1241. size_t msz;
  1242. /* number of events */
  1243. if (do_read_u32(ff, &nre))
  1244. goto error;
  1245. if (do_read_u32(ff, &sz))
  1246. goto error;
  1247. /* buffer to hold on file attr struct */
  1248. buf = malloc(sz);
  1249. if (!buf)
  1250. goto error;
  1251. /* the last event terminates with evsel->attr.size == 0: */
  1252. events = calloc(nre + 1, sizeof(*events));
  1253. if (!events)
  1254. goto error;
  1255. msz = sizeof(evsel->attr);
  1256. if (sz < msz)
  1257. msz = sz;
  1258. for (i = 0, evsel = events; i < nre; evsel++, i++) {
  1259. evsel->idx = i;
  1260. /*
  1261. * must read entire on-file attr struct to
  1262. * sync up with layout.
  1263. */
  1264. if (__do_read(ff, buf, sz))
  1265. goto error;
  1266. if (ff->ph->needs_swap)
  1267. perf_event__attr_swap(buf);
  1268. memcpy(&evsel->attr, buf, msz);
  1269. if (do_read_u32(ff, &nr))
  1270. goto error;
  1271. if (ff->ph->needs_swap)
  1272. evsel->needs_swap = true;
  1273. evsel->name = do_read_string(ff);
  1274. if (!evsel->name)
  1275. goto error;
  1276. if (!nr)
  1277. continue;
  1278. id = calloc(nr, sizeof(*id));
  1279. if (!id)
  1280. goto error;
  1281. evsel->ids = nr;
  1282. evsel->id = id;
  1283. for (j = 0 ; j < nr; j++) {
  1284. if (do_read_u64(ff, id))
  1285. goto error;
  1286. id++;
  1287. }
  1288. }
  1289. out:
  1290. free(buf);
  1291. return events;
  1292. error:
  1293. free_event_desc(events);
  1294. events = NULL;
  1295. goto out;
  1296. }
  1297. static int __desc_attr__fprintf(FILE *fp, const char *name, const char *val,
  1298. void *priv __maybe_unused)
  1299. {
  1300. return fprintf(fp, ", %s = %s", name, val);
  1301. }
  1302. static void print_event_desc(struct feat_fd *ff, FILE *fp)
  1303. {
  1304. struct perf_evsel *evsel, *events;
  1305. u32 j;
  1306. u64 *id;
  1307. if (ff->events)
  1308. events = ff->events;
  1309. else
  1310. events = read_event_desc(ff);
  1311. if (!events) {
  1312. fprintf(fp, "# event desc: not available or unable to read\n");
  1313. return;
  1314. }
  1315. for (evsel = events; evsel->attr.size; evsel++) {
  1316. fprintf(fp, "# event : name = %s, ", evsel->name);
  1317. if (evsel->ids) {
  1318. fprintf(fp, ", id = {");
  1319. for (j = 0, id = evsel->id; j < evsel->ids; j++, id++) {
  1320. if (j)
  1321. fputc(',', fp);
  1322. fprintf(fp, " %"PRIu64, *id);
  1323. }
  1324. fprintf(fp, " }");
  1325. }
  1326. perf_event_attr__fprintf(fp, &evsel->attr, __desc_attr__fprintf, NULL);
  1327. fputc('\n', fp);
  1328. }
  1329. free_event_desc(events);
  1330. ff->events = NULL;
  1331. }
  1332. static void print_total_mem(struct feat_fd *ff, FILE *fp)
  1333. {
  1334. fprintf(fp, "# total memory : %llu kB\n", ff->ph->env.total_mem);
  1335. }
  1336. static void print_numa_topology(struct feat_fd *ff, FILE *fp)
  1337. {
  1338. int i;
  1339. struct numa_node *n;
  1340. for (i = 0; i < ff->ph->env.nr_numa_nodes; i++) {
  1341. n = &ff->ph->env.numa_nodes[i];
  1342. fprintf(fp, "# node%u meminfo : total = %"PRIu64" kB,"
  1343. " free = %"PRIu64" kB\n",
  1344. n->node, n->mem_total, n->mem_free);
  1345. fprintf(fp, "# node%u cpu list : ", n->node);
  1346. cpu_map__fprintf(n->map, fp);
  1347. }
  1348. }
  1349. static void print_cpuid(struct feat_fd *ff, FILE *fp)
  1350. {
  1351. fprintf(fp, "# cpuid : %s\n", ff->ph->env.cpuid);
  1352. }
  1353. static void print_branch_stack(struct feat_fd *ff __maybe_unused, FILE *fp)
  1354. {
  1355. fprintf(fp, "# contains samples with branch stack\n");
  1356. }
  1357. static void print_auxtrace(struct feat_fd *ff __maybe_unused, FILE *fp)
  1358. {
  1359. fprintf(fp, "# contains AUX area data (e.g. instruction trace)\n");
  1360. }
  1361. static void print_stat(struct feat_fd *ff __maybe_unused, FILE *fp)
  1362. {
  1363. fprintf(fp, "# contains stat data\n");
  1364. }
  1365. static void print_cache(struct feat_fd *ff, FILE *fp __maybe_unused)
  1366. {
  1367. int i;
  1368. fprintf(fp, "# CPU cache info:\n");
  1369. for (i = 0; i < ff->ph->env.caches_cnt; i++) {
  1370. fprintf(fp, "# ");
  1371. cpu_cache_level__fprintf(fp, &ff->ph->env.caches[i]);
  1372. }
  1373. }
  1374. static void print_pmu_mappings(struct feat_fd *ff, FILE *fp)
  1375. {
  1376. const char *delimiter = "# pmu mappings: ";
  1377. char *str, *tmp;
  1378. u32 pmu_num;
  1379. u32 type;
  1380. pmu_num = ff->ph->env.nr_pmu_mappings;
  1381. if (!pmu_num) {
  1382. fprintf(fp, "# pmu mappings: not available\n");
  1383. return;
  1384. }
  1385. str = ff->ph->env.pmu_mappings;
  1386. while (pmu_num) {
  1387. type = strtoul(str, &tmp, 0);
  1388. if (*tmp != ':')
  1389. goto error;
  1390. str = tmp + 1;
  1391. fprintf(fp, "%s%s = %" PRIu32, delimiter, str, type);
  1392. delimiter = ", ";
  1393. str += strlen(str) + 1;
  1394. pmu_num--;
  1395. }
  1396. fprintf(fp, "\n");
  1397. if (!pmu_num)
  1398. return;
  1399. error:
  1400. fprintf(fp, "# pmu mappings: unable to read\n");
  1401. }
  1402. static void print_group_desc(struct feat_fd *ff, FILE *fp)
  1403. {
  1404. struct perf_session *session;
  1405. struct perf_evsel *evsel;
  1406. u32 nr = 0;
  1407. session = container_of(ff->ph, struct perf_session, header);
  1408. evlist__for_each_entry(session->evlist, evsel) {
  1409. if (perf_evsel__is_group_leader(evsel) &&
  1410. evsel->nr_members > 1) {
  1411. fprintf(fp, "# group: %s{%s", evsel->group_name ?: "",
  1412. perf_evsel__name(evsel));
  1413. nr = evsel->nr_members - 1;
  1414. } else if (nr) {
  1415. fprintf(fp, ",%s", perf_evsel__name(evsel));
  1416. if (--nr == 0)
  1417. fprintf(fp, "}\n");
  1418. }
  1419. }
  1420. }
  1421. static void print_sample_time(struct feat_fd *ff, FILE *fp)
  1422. {
  1423. struct perf_session *session;
  1424. char time_buf[32];
  1425. double d;
  1426. session = container_of(ff->ph, struct perf_session, header);
  1427. timestamp__scnprintf_usec(session->evlist->first_sample_time,
  1428. time_buf, sizeof(time_buf));
  1429. fprintf(fp, "# time of first sample : %s\n", time_buf);
  1430. timestamp__scnprintf_usec(session->evlist->last_sample_time,
  1431. time_buf, sizeof(time_buf));
  1432. fprintf(fp, "# time of last sample : %s\n", time_buf);
  1433. d = (double)(session->evlist->last_sample_time -
  1434. session->evlist->first_sample_time) / NSEC_PER_MSEC;
  1435. fprintf(fp, "# sample duration : %10.3f ms\n", d);
  1436. }
  1437. static void memory_node__fprintf(struct memory_node *n,
  1438. unsigned long long bsize, FILE *fp)
  1439. {
  1440. char buf_map[100], buf_size[50];
  1441. unsigned long long size;
  1442. size = bsize * bitmap_weight(n->set, n->size);
  1443. unit_number__scnprintf(buf_size, 50, size);
  1444. bitmap_scnprintf(n->set, n->size, buf_map, 100);
  1445. fprintf(fp, "# %3" PRIu64 " [%s]: %s\n", n->node, buf_size, buf_map);
  1446. }
  1447. static void print_mem_topology(struct feat_fd *ff, FILE *fp)
  1448. {
  1449. struct memory_node *nodes;
  1450. int i, nr;
  1451. nodes = ff->ph->env.memory_nodes;
  1452. nr = ff->ph->env.nr_memory_nodes;
  1453. fprintf(fp, "# memory nodes (nr %d, block size 0x%llx):\n",
  1454. nr, ff->ph->env.memory_bsize);
  1455. for (i = 0; i < nr; i++) {
  1456. memory_node__fprintf(&nodes[i], ff->ph->env.memory_bsize, fp);
  1457. }
  1458. }
  1459. static int __event_process_build_id(struct build_id_event *bev,
  1460. char *filename,
  1461. struct perf_session *session)
  1462. {
  1463. int err = -1;
  1464. struct machine *machine;
  1465. u16 cpumode;
  1466. struct dso *dso;
  1467. enum dso_kernel_type dso_type;
  1468. machine = perf_session__findnew_machine(session, bev->pid);
  1469. if (!machine)
  1470. goto out;
  1471. cpumode = bev->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
  1472. switch (cpumode) {
  1473. case PERF_RECORD_MISC_KERNEL:
  1474. dso_type = DSO_TYPE_KERNEL;
  1475. break;
  1476. case PERF_RECORD_MISC_GUEST_KERNEL:
  1477. dso_type = DSO_TYPE_GUEST_KERNEL;
  1478. break;
  1479. case PERF_RECORD_MISC_USER:
  1480. case PERF_RECORD_MISC_GUEST_USER:
  1481. dso_type = DSO_TYPE_USER;
  1482. break;
  1483. default:
  1484. goto out;
  1485. }
  1486. dso = machine__findnew_dso(machine, filename);
  1487. if (dso != NULL) {
  1488. char sbuild_id[SBUILD_ID_SIZE];
  1489. dso__set_build_id(dso, &bev->build_id);
  1490. if (dso_type != DSO_TYPE_USER) {
  1491. struct kmod_path m = { .name = NULL, };
  1492. if (!kmod_path__parse_name(&m, filename) && m.kmod)
  1493. dso__set_module_info(dso, &m, machine);
  1494. else
  1495. dso->kernel = dso_type;
  1496. free(m.name);
  1497. }
  1498. build_id__sprintf(dso->build_id, sizeof(dso->build_id),
  1499. sbuild_id);
  1500. pr_debug("build id event received for %s: %s\n",
  1501. dso->long_name, sbuild_id);
  1502. dso__put(dso);
  1503. }
  1504. err = 0;
  1505. out:
  1506. return err;
  1507. }
  1508. static int perf_header__read_build_ids_abi_quirk(struct perf_header *header,
  1509. int input, u64 offset, u64 size)
  1510. {
  1511. struct perf_session *session = container_of(header, struct perf_session, header);
  1512. struct {
  1513. struct perf_event_header header;
  1514. u8 build_id[PERF_ALIGN(BUILD_ID_SIZE, sizeof(u64))];
  1515. char filename[0];
  1516. } old_bev;
  1517. struct build_id_event bev;
  1518. char filename[PATH_MAX];
  1519. u64 limit = offset + size;
  1520. while (offset < limit) {
  1521. ssize_t len;
  1522. if (readn(input, &old_bev, sizeof(old_bev)) != sizeof(old_bev))
  1523. return -1;
  1524. if (header->needs_swap)
  1525. perf_event_header__bswap(&old_bev.header);
  1526. len = old_bev.header.size - sizeof(old_bev);
  1527. if (readn(input, filename, len) != len)
  1528. return -1;
  1529. bev.header = old_bev.header;
  1530. /*
  1531. * As the pid is the missing value, we need to fill
  1532. * it properly. The header.misc value give us nice hint.
  1533. */
  1534. bev.pid = HOST_KERNEL_ID;
  1535. if (bev.header.misc == PERF_RECORD_MISC_GUEST_USER ||
  1536. bev.header.misc == PERF_RECORD_MISC_GUEST_KERNEL)
  1537. bev.pid = DEFAULT_GUEST_KERNEL_ID;
  1538. memcpy(bev.build_id, old_bev.build_id, sizeof(bev.build_id));
  1539. __event_process_build_id(&bev, filename, session);
  1540. offset += bev.header.size;
  1541. }
  1542. return 0;
  1543. }
  1544. static int perf_header__read_build_ids(struct perf_header *header,
  1545. int input, u64 offset, u64 size)
  1546. {
  1547. struct perf_session *session = container_of(header, struct perf_session, header);
  1548. struct build_id_event bev;
  1549. char filename[PATH_MAX];
  1550. u64 limit = offset + size, orig_offset = offset;
  1551. int err = -1;
  1552. while (offset < limit) {
  1553. ssize_t len;
  1554. if (readn(input, &bev, sizeof(bev)) != sizeof(bev))
  1555. goto out;
  1556. if (header->needs_swap)
  1557. perf_event_header__bswap(&bev.header);
  1558. len = bev.header.size - sizeof(bev);
  1559. if (readn(input, filename, len) != len)
  1560. goto out;
  1561. /*
  1562. * The a1645ce1 changeset:
  1563. *
  1564. * "perf: 'perf kvm' tool for monitoring guest performance from host"
  1565. *
  1566. * Added a field to struct build_id_event that broke the file
  1567. * format.
  1568. *
  1569. * Since the kernel build-id is the first entry, process the
  1570. * table using the old format if the well known
  1571. * '[kernel.kallsyms]' string for the kernel build-id has the
  1572. * first 4 characters chopped off (where the pid_t sits).
  1573. */
  1574. if (memcmp(filename, "nel.kallsyms]", 13) == 0) {
  1575. if (lseek(input, orig_offset, SEEK_SET) == (off_t)-1)
  1576. return -1;
  1577. return perf_header__read_build_ids_abi_quirk(header, input, offset, size);
  1578. }
  1579. __event_process_build_id(&bev, filename, session);
  1580. offset += bev.header.size;
  1581. }
  1582. err = 0;
  1583. out:
  1584. return err;
  1585. }
  1586. /* Macro for features that simply need to read and store a string. */
  1587. #define FEAT_PROCESS_STR_FUN(__feat, __feat_env) \
  1588. static int process_##__feat(struct feat_fd *ff, void *data __maybe_unused) \
  1589. {\
  1590. ff->ph->env.__feat_env = do_read_string(ff); \
  1591. return ff->ph->env.__feat_env ? 0 : -ENOMEM; \
  1592. }
  1593. FEAT_PROCESS_STR_FUN(hostname, hostname);
  1594. FEAT_PROCESS_STR_FUN(osrelease, os_release);
  1595. FEAT_PROCESS_STR_FUN(version, version);
  1596. FEAT_PROCESS_STR_FUN(arch, arch);
  1597. FEAT_PROCESS_STR_FUN(cpudesc, cpu_desc);
  1598. FEAT_PROCESS_STR_FUN(cpuid, cpuid);
  1599. static int process_tracing_data(struct feat_fd *ff, void *data)
  1600. {
  1601. ssize_t ret = trace_report(ff->fd, data, false);
  1602. return ret < 0 ? -1 : 0;
  1603. }
  1604. static int process_build_id(struct feat_fd *ff, void *data __maybe_unused)
  1605. {
  1606. if (perf_header__read_build_ids(ff->ph, ff->fd, ff->offset, ff->size))
  1607. pr_debug("Failed to read buildids, continuing...\n");
  1608. return 0;
  1609. }
  1610. static int process_nrcpus(struct feat_fd *ff, void *data __maybe_unused)
  1611. {
  1612. int ret;
  1613. u32 nr_cpus_avail, nr_cpus_online;
  1614. ret = do_read_u32(ff, &nr_cpus_avail);
  1615. if (ret)
  1616. return ret;
  1617. ret = do_read_u32(ff, &nr_cpus_online);
  1618. if (ret)
  1619. return ret;
  1620. ff->ph->env.nr_cpus_avail = (int)nr_cpus_avail;
  1621. ff->ph->env.nr_cpus_online = (int)nr_cpus_online;
  1622. return 0;
  1623. }
  1624. static int process_total_mem(struct feat_fd *ff, void *data __maybe_unused)
  1625. {
  1626. u64 total_mem;
  1627. int ret;
  1628. ret = do_read_u64(ff, &total_mem);
  1629. if (ret)
  1630. return -1;
  1631. ff->ph->env.total_mem = (unsigned long long)total_mem;
  1632. return 0;
  1633. }
  1634. static struct perf_evsel *
  1635. perf_evlist__find_by_index(struct perf_evlist *evlist, int idx)
  1636. {
  1637. struct perf_evsel *evsel;
  1638. evlist__for_each_entry(evlist, evsel) {
  1639. if (evsel->idx == idx)
  1640. return evsel;
  1641. }
  1642. return NULL;
  1643. }
  1644. static void
  1645. perf_evlist__set_event_name(struct perf_evlist *evlist,
  1646. struct perf_evsel *event)
  1647. {
  1648. struct perf_evsel *evsel;
  1649. if (!event->name)
  1650. return;
  1651. evsel = perf_evlist__find_by_index(evlist, event->idx);
  1652. if (!evsel)
  1653. return;
  1654. if (evsel->name)
  1655. return;
  1656. evsel->name = strdup(event->name);
  1657. }
  1658. static int
  1659. process_event_desc(struct feat_fd *ff, void *data __maybe_unused)
  1660. {
  1661. struct perf_session *session;
  1662. struct perf_evsel *evsel, *events = read_event_desc(ff);
  1663. if (!events)
  1664. return 0;
  1665. session = container_of(ff->ph, struct perf_session, header);
  1666. if (session->data->is_pipe) {
  1667. /* Save events for reading later by print_event_desc,
  1668. * since they can't be read again in pipe mode. */
  1669. ff->events = events;
  1670. }
  1671. for (evsel = events; evsel->attr.size; evsel++)
  1672. perf_evlist__set_event_name(session->evlist, evsel);
  1673. if (!session->data->is_pipe)
  1674. free_event_desc(events);
  1675. return 0;
  1676. }
  1677. static int process_cmdline(struct feat_fd *ff, void *data __maybe_unused)
  1678. {
  1679. char *str, *cmdline = NULL, **argv = NULL;
  1680. u32 nr, i, len = 0;
  1681. if (do_read_u32(ff, &nr))
  1682. return -1;
  1683. ff->ph->env.nr_cmdline = nr;
  1684. cmdline = zalloc(ff->size + nr + 1);
  1685. if (!cmdline)
  1686. return -1;
  1687. argv = zalloc(sizeof(char *) * (nr + 1));
  1688. if (!argv)
  1689. goto error;
  1690. for (i = 0; i < nr; i++) {
  1691. str = do_read_string(ff);
  1692. if (!str)
  1693. goto error;
  1694. argv[i] = cmdline + len;
  1695. memcpy(argv[i], str, strlen(str) + 1);
  1696. len += strlen(str) + 1;
  1697. free(str);
  1698. }
  1699. ff->ph->env.cmdline = cmdline;
  1700. ff->ph->env.cmdline_argv = (const char **) argv;
  1701. return 0;
  1702. error:
  1703. free(argv);
  1704. free(cmdline);
  1705. return -1;
  1706. }
  1707. static int process_cpu_topology(struct feat_fd *ff, void *data __maybe_unused)
  1708. {
  1709. u32 nr, i;
  1710. char *str;
  1711. struct strbuf sb;
  1712. int cpu_nr = ff->ph->env.nr_cpus_avail;
  1713. u64 size = 0;
  1714. struct perf_header *ph = ff->ph;
  1715. bool do_core_id_test = true;
  1716. ph->env.cpu = calloc(cpu_nr, sizeof(*ph->env.cpu));
  1717. if (!ph->env.cpu)
  1718. return -1;
  1719. if (do_read_u32(ff, &nr))
  1720. goto free_cpu;
  1721. ph->env.nr_sibling_cores = nr;
  1722. size += sizeof(u32);
  1723. if (strbuf_init(&sb, 128) < 0)
  1724. goto free_cpu;
  1725. for (i = 0; i < nr; i++) {
  1726. str = do_read_string(ff);
  1727. if (!str)
  1728. goto error;
  1729. /* include a NULL character at the end */
  1730. if (strbuf_add(&sb, str, strlen(str) + 1) < 0)
  1731. goto error;
  1732. size += string_size(str);
  1733. free(str);
  1734. }
  1735. ph->env.sibling_cores = strbuf_detach(&sb, NULL);
  1736. if (do_read_u32(ff, &nr))
  1737. return -1;
  1738. ph->env.nr_sibling_threads = nr;
  1739. size += sizeof(u32);
  1740. for (i = 0; i < nr; i++) {
  1741. str = do_read_string(ff);
  1742. if (!str)
  1743. goto error;
  1744. /* include a NULL character at the end */
  1745. if (strbuf_add(&sb, str, strlen(str) + 1) < 0)
  1746. goto error;
  1747. size += string_size(str);
  1748. free(str);
  1749. }
  1750. ph->env.sibling_threads = strbuf_detach(&sb, NULL);
  1751. /*
  1752. * The header may be from old perf,
  1753. * which doesn't include core id and socket id information.
  1754. */
  1755. if (ff->size <= size) {
  1756. zfree(&ph->env.cpu);
  1757. return 0;
  1758. }
  1759. /* On s390 the socket_id number is not related to the numbers of cpus.
  1760. * The socket_id number might be higher than the numbers of cpus.
  1761. * This depends on the configuration.
  1762. */
  1763. if (ph->env.arch && !strncmp(ph->env.arch, "s390", 4))
  1764. do_core_id_test = false;
  1765. for (i = 0; i < (u32)cpu_nr; i++) {
  1766. if (do_read_u32(ff, &nr))
  1767. goto free_cpu;
  1768. ph->env.cpu[i].core_id = nr;
  1769. if (do_read_u32(ff, &nr))
  1770. goto free_cpu;
  1771. if (do_core_id_test && nr != (u32)-1 && nr > (u32)cpu_nr) {
  1772. pr_debug("socket_id number is too big."
  1773. "You may need to upgrade the perf tool.\n");
  1774. goto free_cpu;
  1775. }
  1776. ph->env.cpu[i].socket_id = nr;
  1777. }
  1778. return 0;
  1779. error:
  1780. strbuf_release(&sb);
  1781. free_cpu:
  1782. zfree(&ph->env.cpu);
  1783. return -1;
  1784. }
  1785. static int process_numa_topology(struct feat_fd *ff, void *data __maybe_unused)
  1786. {
  1787. struct numa_node *nodes, *n;
  1788. u32 nr, i;
  1789. char *str;
  1790. /* nr nodes */
  1791. if (do_read_u32(ff, &nr))
  1792. return -1;
  1793. nodes = zalloc(sizeof(*nodes) * nr);
  1794. if (!nodes)
  1795. return -ENOMEM;
  1796. for (i = 0; i < nr; i++) {
  1797. n = &nodes[i];
  1798. /* node number */
  1799. if (do_read_u32(ff, &n->node))
  1800. goto error;
  1801. if (do_read_u64(ff, &n->mem_total))
  1802. goto error;
  1803. if (do_read_u64(ff, &n->mem_free))
  1804. goto error;
  1805. str = do_read_string(ff);
  1806. if (!str)
  1807. goto error;
  1808. n->map = cpu_map__new(str);
  1809. if (!n->map)
  1810. goto error;
  1811. free(str);
  1812. }
  1813. ff->ph->env.nr_numa_nodes = nr;
  1814. ff->ph->env.numa_nodes = nodes;
  1815. return 0;
  1816. error:
  1817. free(nodes);
  1818. return -1;
  1819. }
  1820. static int process_pmu_mappings(struct feat_fd *ff, void *data __maybe_unused)
  1821. {
  1822. char *name;
  1823. u32 pmu_num;
  1824. u32 type;
  1825. struct strbuf sb;
  1826. if (do_read_u32(ff, &pmu_num))
  1827. return -1;
  1828. if (!pmu_num) {
  1829. pr_debug("pmu mappings not available\n");
  1830. return 0;
  1831. }
  1832. ff->ph->env.nr_pmu_mappings = pmu_num;
  1833. if (strbuf_init(&sb, 128) < 0)
  1834. return -1;
  1835. while (pmu_num) {
  1836. if (do_read_u32(ff, &type))
  1837. goto error;
  1838. name = do_read_string(ff);
  1839. if (!name)
  1840. goto error;
  1841. if (strbuf_addf(&sb, "%u:%s", type, name) < 0)
  1842. goto error;
  1843. /* include a NULL character at the end */
  1844. if (strbuf_add(&sb, "", 1) < 0)
  1845. goto error;
  1846. if (!strcmp(name, "msr"))
  1847. ff->ph->env.msr_pmu_type = type;
  1848. free(name);
  1849. pmu_num--;
  1850. }
  1851. ff->ph->env.pmu_mappings = strbuf_detach(&sb, NULL);
  1852. return 0;
  1853. error:
  1854. strbuf_release(&sb);
  1855. return -1;
  1856. }
  1857. static int process_group_desc(struct feat_fd *ff, void *data __maybe_unused)
  1858. {
  1859. size_t ret = -1;
  1860. u32 i, nr, nr_groups;
  1861. struct perf_session *session;
  1862. struct perf_evsel *evsel, *leader = NULL;
  1863. struct group_desc {
  1864. char *name;
  1865. u32 leader_idx;
  1866. u32 nr_members;
  1867. } *desc;
  1868. if (do_read_u32(ff, &nr_groups))
  1869. return -1;
  1870. ff->ph->env.nr_groups = nr_groups;
  1871. if (!nr_groups) {
  1872. pr_debug("group desc not available\n");
  1873. return 0;
  1874. }
  1875. desc = calloc(nr_groups, sizeof(*desc));
  1876. if (!desc)
  1877. return -1;
  1878. for (i = 0; i < nr_groups; i++) {
  1879. desc[i].name = do_read_string(ff);
  1880. if (!desc[i].name)
  1881. goto out_free;
  1882. if (do_read_u32(ff, &desc[i].leader_idx))
  1883. goto out_free;
  1884. if (do_read_u32(ff, &desc[i].nr_members))
  1885. goto out_free;
  1886. }
  1887. /*
  1888. * Rebuild group relationship based on the group_desc
  1889. */
  1890. session = container_of(ff->ph, struct perf_session, header);
  1891. session->evlist->nr_groups = nr_groups;
  1892. i = nr = 0;
  1893. evlist__for_each_entry(session->evlist, evsel) {
  1894. if (evsel->idx == (int) desc[i].leader_idx) {
  1895. evsel->leader = evsel;
  1896. /* {anon_group} is a dummy name */
  1897. if (strcmp(desc[i].name, "{anon_group}")) {
  1898. evsel->group_name = desc[i].name;
  1899. desc[i].name = NULL;
  1900. }
  1901. evsel->nr_members = desc[i].nr_members;
  1902. if (i >= nr_groups || nr > 0) {
  1903. pr_debug("invalid group desc\n");
  1904. goto out_free;
  1905. }
  1906. leader = evsel;
  1907. nr = evsel->nr_members - 1;
  1908. i++;
  1909. } else if (nr) {
  1910. /* This is a group member */
  1911. evsel->leader = leader;
  1912. nr--;
  1913. }
  1914. }
  1915. if (i != nr_groups || nr != 0) {
  1916. pr_debug("invalid group desc\n");
  1917. goto out_free;
  1918. }
  1919. ret = 0;
  1920. out_free:
  1921. for (i = 0; i < nr_groups; i++)
  1922. zfree(&desc[i].name);
  1923. free(desc);
  1924. return ret;
  1925. }
  1926. static int process_auxtrace(struct feat_fd *ff, void *data __maybe_unused)
  1927. {
  1928. struct perf_session *session;
  1929. int err;
  1930. session = container_of(ff->ph, struct perf_session, header);
  1931. err = auxtrace_index__process(ff->fd, ff->size, session,
  1932. ff->ph->needs_swap);
  1933. if (err < 0)
  1934. pr_err("Failed to process auxtrace index\n");
  1935. return err;
  1936. }
  1937. static int process_cache(struct feat_fd *ff, void *data __maybe_unused)
  1938. {
  1939. struct cpu_cache_level *caches;
  1940. u32 cnt, i, version;
  1941. if (do_read_u32(ff, &version))
  1942. return -1;
  1943. if (version != 1)
  1944. return -1;
  1945. if (do_read_u32(ff, &cnt))
  1946. return -1;
  1947. caches = zalloc(sizeof(*caches) * cnt);
  1948. if (!caches)
  1949. return -1;
  1950. for (i = 0; i < cnt; i++) {
  1951. struct cpu_cache_level c;
  1952. #define _R(v) \
  1953. if (do_read_u32(ff, &c.v))\
  1954. goto out_free_caches; \
  1955. _R(level)
  1956. _R(line_size)
  1957. _R(sets)
  1958. _R(ways)
  1959. #undef _R
  1960. #define _R(v) \
  1961. c.v = do_read_string(ff); \
  1962. if (!c.v) \
  1963. goto out_free_caches;
  1964. _R(type)
  1965. _R(size)
  1966. _R(map)
  1967. #undef _R
  1968. caches[i] = c;
  1969. }
  1970. ff->ph->env.caches = caches;
  1971. ff->ph->env.caches_cnt = cnt;
  1972. return 0;
  1973. out_free_caches:
  1974. free(caches);
  1975. return -1;
  1976. }
  1977. static int process_sample_time(struct feat_fd *ff, void *data __maybe_unused)
  1978. {
  1979. struct perf_session *session;
  1980. u64 first_sample_time, last_sample_time;
  1981. int ret;
  1982. session = container_of(ff->ph, struct perf_session, header);
  1983. ret = do_read_u64(ff, &first_sample_time);
  1984. if (ret)
  1985. return -1;
  1986. ret = do_read_u64(ff, &last_sample_time);
  1987. if (ret)
  1988. return -1;
  1989. session->evlist->first_sample_time = first_sample_time;
  1990. session->evlist->last_sample_time = last_sample_time;
  1991. return 0;
  1992. }
  1993. static int process_mem_topology(struct feat_fd *ff,
  1994. void *data __maybe_unused)
  1995. {
  1996. struct memory_node *nodes;
  1997. u64 version, i, nr, bsize;
  1998. int ret = -1;
  1999. if (do_read_u64(ff, &version))
  2000. return -1;
  2001. if (version != 1)
  2002. return -1;
  2003. if (do_read_u64(ff, &bsize))
  2004. return -1;
  2005. if (do_read_u64(ff, &nr))
  2006. return -1;
  2007. nodes = zalloc(sizeof(*nodes) * nr);
  2008. if (!nodes)
  2009. return -1;
  2010. for (i = 0; i < nr; i++) {
  2011. struct memory_node n;
  2012. #define _R(v) \
  2013. if (do_read_u64(ff, &n.v)) \
  2014. goto out; \
  2015. _R(node)
  2016. _R(size)
  2017. #undef _R
  2018. if (do_read_bitmap(ff, &n.set, &n.size))
  2019. goto out;
  2020. nodes[i] = n;
  2021. }
  2022. ff->ph->env.memory_bsize = bsize;
  2023. ff->ph->env.memory_nodes = nodes;
  2024. ff->ph->env.nr_memory_nodes = nr;
  2025. ret = 0;
  2026. out:
  2027. if (ret)
  2028. free(nodes);
  2029. return ret;
  2030. }
  2031. struct feature_ops {
  2032. int (*write)(struct feat_fd *ff, struct perf_evlist *evlist);
  2033. void (*print)(struct feat_fd *ff, FILE *fp);
  2034. int (*process)(struct feat_fd *ff, void *data);
  2035. const char *name;
  2036. bool full_only;
  2037. bool synthesize;
  2038. };
  2039. #define FEAT_OPR(n, func, __full_only) \
  2040. [HEADER_##n] = { \
  2041. .name = __stringify(n), \
  2042. .write = write_##func, \
  2043. .print = print_##func, \
  2044. .full_only = __full_only, \
  2045. .process = process_##func, \
  2046. .synthesize = true \
  2047. }
  2048. #define FEAT_OPN(n, func, __full_only) \
  2049. [HEADER_##n] = { \
  2050. .name = __stringify(n), \
  2051. .write = write_##func, \
  2052. .print = print_##func, \
  2053. .full_only = __full_only, \
  2054. .process = process_##func \
  2055. }
  2056. /* feature_ops not implemented: */
  2057. #define print_tracing_data NULL
  2058. #define print_build_id NULL
  2059. #define process_branch_stack NULL
  2060. #define process_stat NULL
  2061. static const struct feature_ops feat_ops[HEADER_LAST_FEATURE] = {
  2062. FEAT_OPN(TRACING_DATA, tracing_data, false),
  2063. FEAT_OPN(BUILD_ID, build_id, false),
  2064. FEAT_OPR(HOSTNAME, hostname, false),
  2065. FEAT_OPR(OSRELEASE, osrelease, false),
  2066. FEAT_OPR(VERSION, version, false),
  2067. FEAT_OPR(ARCH, arch, false),
  2068. FEAT_OPR(NRCPUS, nrcpus, false),
  2069. FEAT_OPR(CPUDESC, cpudesc, false),
  2070. FEAT_OPR(CPUID, cpuid, false),
  2071. FEAT_OPR(TOTAL_MEM, total_mem, false),
  2072. FEAT_OPR(EVENT_DESC, event_desc, false),
  2073. FEAT_OPR(CMDLINE, cmdline, false),
  2074. FEAT_OPR(CPU_TOPOLOGY, cpu_topology, true),
  2075. FEAT_OPR(NUMA_TOPOLOGY, numa_topology, true),
  2076. FEAT_OPN(BRANCH_STACK, branch_stack, false),
  2077. FEAT_OPR(PMU_MAPPINGS, pmu_mappings, false),
  2078. FEAT_OPR(GROUP_DESC, group_desc, false),
  2079. FEAT_OPN(AUXTRACE, auxtrace, false),
  2080. FEAT_OPN(STAT, stat, false),
  2081. FEAT_OPN(CACHE, cache, true),
  2082. FEAT_OPR(SAMPLE_TIME, sample_time, false),
  2083. FEAT_OPR(MEM_TOPOLOGY, mem_topology, true),
  2084. };
  2085. struct header_print_data {
  2086. FILE *fp;
  2087. bool full; /* extended list of headers */
  2088. };
  2089. static int perf_file_section__fprintf_info(struct perf_file_section *section,
  2090. struct perf_header *ph,
  2091. int feat, int fd, void *data)
  2092. {
  2093. struct header_print_data *hd = data;
  2094. struct feat_fd ff;
  2095. if (lseek(fd, section->offset, SEEK_SET) == (off_t)-1) {
  2096. pr_debug("Failed to lseek to %" PRIu64 " offset for feature "
  2097. "%d, continuing...\n", section->offset, feat);
  2098. return 0;
  2099. }
  2100. if (feat >= HEADER_LAST_FEATURE) {
  2101. pr_warning("unknown feature %d\n", feat);
  2102. return 0;
  2103. }
  2104. if (!feat_ops[feat].print)
  2105. return 0;
  2106. ff = (struct feat_fd) {
  2107. .fd = fd,
  2108. .ph = ph,
  2109. };
  2110. if (!feat_ops[feat].full_only || hd->full)
  2111. feat_ops[feat].print(&ff, hd->fp);
  2112. else
  2113. fprintf(hd->fp, "# %s info available, use -I to display\n",
  2114. feat_ops[feat].name);
  2115. return 0;
  2116. }
  2117. int perf_header__fprintf_info(struct perf_session *session, FILE *fp, bool full)
  2118. {
  2119. struct header_print_data hd;
  2120. struct perf_header *header = &session->header;
  2121. int fd = perf_data__fd(session->data);
  2122. struct stat st;
  2123. int ret, bit;
  2124. hd.fp = fp;
  2125. hd.full = full;
  2126. ret = fstat(fd, &st);
  2127. if (ret == -1)
  2128. return -1;
  2129. fprintf(fp, "# captured on : %s", ctime(&st.st_ctime));
  2130. fprintf(fp, "# header version : %u\n", header->version);
  2131. fprintf(fp, "# data offset : %" PRIu64 "\n", header->data_offset);
  2132. fprintf(fp, "# data size : %" PRIu64 "\n", header->data_size);
  2133. fprintf(fp, "# feat offset : %" PRIu64 "\n", header->feat_offset);
  2134. perf_header__process_sections(header, fd, &hd,
  2135. perf_file_section__fprintf_info);
  2136. if (session->data->is_pipe)
  2137. return 0;
  2138. fprintf(fp, "# missing features: ");
  2139. for_each_clear_bit(bit, header->adds_features, HEADER_LAST_FEATURE) {
  2140. if (bit)
  2141. fprintf(fp, "%s ", feat_ops[bit].name);
  2142. }
  2143. fprintf(fp, "\n");
  2144. return 0;
  2145. }
  2146. static int do_write_feat(struct feat_fd *ff, int type,
  2147. struct perf_file_section **p,
  2148. struct perf_evlist *evlist)
  2149. {
  2150. int err;
  2151. int ret = 0;
  2152. if (perf_header__has_feat(ff->ph, type)) {
  2153. if (!feat_ops[type].write)
  2154. return -1;
  2155. if (WARN(ff->buf, "Error: calling %s in pipe-mode.\n", __func__))
  2156. return -1;
  2157. (*p)->offset = lseek(ff->fd, 0, SEEK_CUR);
  2158. err = feat_ops[type].write(ff, evlist);
  2159. if (err < 0) {
  2160. pr_debug("failed to write feature %s\n", feat_ops[type].name);
  2161. /* undo anything written */
  2162. lseek(ff->fd, (*p)->offset, SEEK_SET);
  2163. return -1;
  2164. }
  2165. (*p)->size = lseek(ff->fd, 0, SEEK_CUR) - (*p)->offset;
  2166. (*p)++;
  2167. }
  2168. return ret;
  2169. }
  2170. static int perf_header__adds_write(struct perf_header *header,
  2171. struct perf_evlist *evlist, int fd)
  2172. {
  2173. int nr_sections;
  2174. struct feat_fd ff;
  2175. struct perf_file_section *feat_sec, *p;
  2176. int sec_size;
  2177. u64 sec_start;
  2178. int feat;
  2179. int err;
  2180. ff = (struct feat_fd){
  2181. .fd = fd,
  2182. .ph = header,
  2183. };
  2184. nr_sections = bitmap_weight(header->adds_features, HEADER_FEAT_BITS);
  2185. if (!nr_sections)
  2186. return 0;
  2187. feat_sec = p = calloc(nr_sections, sizeof(*feat_sec));
  2188. if (feat_sec == NULL)
  2189. return -ENOMEM;
  2190. sec_size = sizeof(*feat_sec) * nr_sections;
  2191. sec_start = header->feat_offset;
  2192. lseek(fd, sec_start + sec_size, SEEK_SET);
  2193. for_each_set_bit(feat, header->adds_features, HEADER_FEAT_BITS) {
  2194. if (do_write_feat(&ff, feat, &p, evlist))
  2195. perf_header__clear_feat(header, feat);
  2196. }
  2197. lseek(fd, sec_start, SEEK_SET);
  2198. /*
  2199. * may write more than needed due to dropped feature, but
  2200. * this is okay, reader will skip the mising entries
  2201. */
  2202. err = do_write(&ff, feat_sec, sec_size);
  2203. if (err < 0)
  2204. pr_debug("failed to write feature section\n");
  2205. free(feat_sec);
  2206. return err;
  2207. }
  2208. int perf_header__write_pipe(int fd)
  2209. {
  2210. struct perf_pipe_file_header f_header;
  2211. struct feat_fd ff;
  2212. int err;
  2213. ff = (struct feat_fd){ .fd = fd };
  2214. f_header = (struct perf_pipe_file_header){
  2215. .magic = PERF_MAGIC,
  2216. .size = sizeof(f_header),
  2217. };
  2218. err = do_write(&ff, &f_header, sizeof(f_header));
  2219. if (err < 0) {
  2220. pr_debug("failed to write perf pipe header\n");
  2221. return err;
  2222. }
  2223. return 0;
  2224. }
  2225. int perf_session__write_header(struct perf_session *session,
  2226. struct perf_evlist *evlist,
  2227. int fd, bool at_exit)
  2228. {
  2229. struct perf_file_header f_header;
  2230. struct perf_file_attr f_attr;
  2231. struct perf_header *header = &session->header;
  2232. struct perf_evsel *evsel;
  2233. struct feat_fd ff;
  2234. u64 attr_offset;
  2235. int err;
  2236. ff = (struct feat_fd){ .fd = fd};
  2237. lseek(fd, sizeof(f_header), SEEK_SET);
  2238. evlist__for_each_entry(session->evlist, evsel) {
  2239. evsel->id_offset = lseek(fd, 0, SEEK_CUR);
  2240. err = do_write(&ff, evsel->id, evsel->ids * sizeof(u64));
  2241. if (err < 0) {
  2242. pr_debug("failed to write perf header\n");
  2243. return err;
  2244. }
  2245. }
  2246. attr_offset = lseek(ff.fd, 0, SEEK_CUR);
  2247. evlist__for_each_entry(evlist, evsel) {
  2248. f_attr = (struct perf_file_attr){
  2249. .attr = evsel->attr,
  2250. .ids = {
  2251. .offset = evsel->id_offset,
  2252. .size = evsel->ids * sizeof(u64),
  2253. }
  2254. };
  2255. err = do_write(&ff, &f_attr, sizeof(f_attr));
  2256. if (err < 0) {
  2257. pr_debug("failed to write perf header attribute\n");
  2258. return err;
  2259. }
  2260. }
  2261. if (!header->data_offset)
  2262. header->data_offset = lseek(fd, 0, SEEK_CUR);
  2263. header->feat_offset = header->data_offset + header->data_size;
  2264. if (at_exit) {
  2265. err = perf_header__adds_write(header, evlist, fd);
  2266. if (err < 0)
  2267. return err;
  2268. }
  2269. f_header = (struct perf_file_header){
  2270. .magic = PERF_MAGIC,
  2271. .size = sizeof(f_header),
  2272. .attr_size = sizeof(f_attr),
  2273. .attrs = {
  2274. .offset = attr_offset,
  2275. .size = evlist->nr_entries * sizeof(f_attr),
  2276. },
  2277. .data = {
  2278. .offset = header->data_offset,
  2279. .size = header->data_size,
  2280. },
  2281. /* event_types is ignored, store zeros */
  2282. };
  2283. memcpy(&f_header.adds_features, &header->adds_features, sizeof(header->adds_features));
  2284. lseek(fd, 0, SEEK_SET);
  2285. err = do_write(&ff, &f_header, sizeof(f_header));
  2286. if (err < 0) {
  2287. pr_debug("failed to write perf header\n");
  2288. return err;
  2289. }
  2290. lseek(fd, header->data_offset + header->data_size, SEEK_SET);
  2291. return 0;
  2292. }
  2293. static int perf_header__getbuffer64(struct perf_header *header,
  2294. int fd, void *buf, size_t size)
  2295. {
  2296. if (readn(fd, buf, size) <= 0)
  2297. return -1;
  2298. if (header->needs_swap)
  2299. mem_bswap_64(buf, size);
  2300. return 0;
  2301. }
  2302. int perf_header__process_sections(struct perf_header *header, int fd,
  2303. void *data,
  2304. int (*process)(struct perf_file_section *section,
  2305. struct perf_header *ph,
  2306. int feat, int fd, void *data))
  2307. {
  2308. struct perf_file_section *feat_sec, *sec;
  2309. int nr_sections;
  2310. int sec_size;
  2311. int feat;
  2312. int err;
  2313. nr_sections = bitmap_weight(header->adds_features, HEADER_FEAT_BITS);
  2314. if (!nr_sections)
  2315. return 0;
  2316. feat_sec = sec = calloc(nr_sections, sizeof(*feat_sec));
  2317. if (!feat_sec)
  2318. return -1;
  2319. sec_size = sizeof(*feat_sec) * nr_sections;
  2320. lseek(fd, header->feat_offset, SEEK_SET);
  2321. err = perf_header__getbuffer64(header, fd, feat_sec, sec_size);
  2322. if (err < 0)
  2323. goto out_free;
  2324. for_each_set_bit(feat, header->adds_features, HEADER_LAST_FEATURE) {
  2325. err = process(sec++, header, feat, fd, data);
  2326. if (err < 0)
  2327. goto out_free;
  2328. }
  2329. err = 0;
  2330. out_free:
  2331. free(feat_sec);
  2332. return err;
  2333. }
  2334. static const int attr_file_abi_sizes[] = {
  2335. [0] = PERF_ATTR_SIZE_VER0,
  2336. [1] = PERF_ATTR_SIZE_VER1,
  2337. [2] = PERF_ATTR_SIZE_VER2,
  2338. [3] = PERF_ATTR_SIZE_VER3,
  2339. [4] = PERF_ATTR_SIZE_VER4,
  2340. 0,
  2341. };
  2342. /*
  2343. * In the legacy file format, the magic number is not used to encode endianness.
  2344. * hdr_sz was used to encode endianness. But given that hdr_sz can vary based
  2345. * on ABI revisions, we need to try all combinations for all endianness to
  2346. * detect the endianness.
  2347. */
  2348. static int try_all_file_abis(uint64_t hdr_sz, struct perf_header *ph)
  2349. {
  2350. uint64_t ref_size, attr_size;
  2351. int i;
  2352. for (i = 0 ; attr_file_abi_sizes[i]; i++) {
  2353. ref_size = attr_file_abi_sizes[i]
  2354. + sizeof(struct perf_file_section);
  2355. if (hdr_sz != ref_size) {
  2356. attr_size = bswap_64(hdr_sz);
  2357. if (attr_size != ref_size)
  2358. continue;
  2359. ph->needs_swap = true;
  2360. }
  2361. pr_debug("ABI%d perf.data file detected, need_swap=%d\n",
  2362. i,
  2363. ph->needs_swap);
  2364. return 0;
  2365. }
  2366. /* could not determine endianness */
  2367. return -1;
  2368. }
  2369. #define PERF_PIPE_HDR_VER0 16
  2370. static const size_t attr_pipe_abi_sizes[] = {
  2371. [0] = PERF_PIPE_HDR_VER0,
  2372. 0,
  2373. };
  2374. /*
  2375. * In the legacy pipe format, there is an implicit assumption that endiannesss
  2376. * between host recording the samples, and host parsing the samples is the
  2377. * same. This is not always the case given that the pipe output may always be
  2378. * redirected into a file and analyzed on a different machine with possibly a
  2379. * different endianness and perf_event ABI revsions in the perf tool itself.
  2380. */
  2381. static int try_all_pipe_abis(uint64_t hdr_sz, struct perf_header *ph)
  2382. {
  2383. u64 attr_size;
  2384. int i;
  2385. for (i = 0 ; attr_pipe_abi_sizes[i]; i++) {
  2386. if (hdr_sz != attr_pipe_abi_sizes[i]) {
  2387. attr_size = bswap_64(hdr_sz);
  2388. if (attr_size != hdr_sz)
  2389. continue;
  2390. ph->needs_swap = true;
  2391. }
  2392. pr_debug("Pipe ABI%d perf.data file detected\n", i);
  2393. return 0;
  2394. }
  2395. return -1;
  2396. }
  2397. bool is_perf_magic(u64 magic)
  2398. {
  2399. if (!memcmp(&magic, __perf_magic1, sizeof(magic))
  2400. || magic == __perf_magic2
  2401. || magic == __perf_magic2_sw)
  2402. return true;
  2403. return false;
  2404. }
  2405. static int check_magic_endian(u64 magic, uint64_t hdr_sz,
  2406. bool is_pipe, struct perf_header *ph)
  2407. {
  2408. int ret;
  2409. /* check for legacy format */
  2410. ret = memcmp(&magic, __perf_magic1, sizeof(magic));
  2411. if (ret == 0) {
  2412. ph->version = PERF_HEADER_VERSION_1;
  2413. pr_debug("legacy perf.data format\n");
  2414. if (is_pipe)
  2415. return try_all_pipe_abis(hdr_sz, ph);
  2416. return try_all_file_abis(hdr_sz, ph);
  2417. }
  2418. /*
  2419. * the new magic number serves two purposes:
  2420. * - unique number to identify actual perf.data files
  2421. * - encode endianness of file
  2422. */
  2423. ph->version = PERF_HEADER_VERSION_2;
  2424. /* check magic number with one endianness */
  2425. if (magic == __perf_magic2)
  2426. return 0;
  2427. /* check magic number with opposite endianness */
  2428. if (magic != __perf_magic2_sw)
  2429. return -1;
  2430. ph->needs_swap = true;
  2431. return 0;
  2432. }
  2433. int perf_file_header__read(struct perf_file_header *header,
  2434. struct perf_header *ph, int fd)
  2435. {
  2436. ssize_t ret;
  2437. lseek(fd, 0, SEEK_SET);
  2438. ret = readn(fd, header, sizeof(*header));
  2439. if (ret <= 0)
  2440. return -1;
  2441. if (check_magic_endian(header->magic,
  2442. header->attr_size, false, ph) < 0) {
  2443. pr_debug("magic/endian check failed\n");
  2444. return -1;
  2445. }
  2446. if (ph->needs_swap) {
  2447. mem_bswap_64(header, offsetof(struct perf_file_header,
  2448. adds_features));
  2449. }
  2450. if (header->size != sizeof(*header)) {
  2451. /* Support the previous format */
  2452. if (header->size == offsetof(typeof(*header), adds_features))
  2453. bitmap_zero(header->adds_features, HEADER_FEAT_BITS);
  2454. else
  2455. return -1;
  2456. } else if (ph->needs_swap) {
  2457. /*
  2458. * feature bitmap is declared as an array of unsigned longs --
  2459. * not good since its size can differ between the host that
  2460. * generated the data file and the host analyzing the file.
  2461. *
  2462. * We need to handle endianness, but we don't know the size of
  2463. * the unsigned long where the file was generated. Take a best
  2464. * guess at determining it: try 64-bit swap first (ie., file
  2465. * created on a 64-bit host), and check if the hostname feature
  2466. * bit is set (this feature bit is forced on as of fbe96f2).
  2467. * If the bit is not, undo the 64-bit swap and try a 32-bit
  2468. * swap. If the hostname bit is still not set (e.g., older data
  2469. * file), punt and fallback to the original behavior --
  2470. * clearing all feature bits and setting buildid.
  2471. */
  2472. mem_bswap_64(&header->adds_features,
  2473. BITS_TO_U64(HEADER_FEAT_BITS));
  2474. if (!test_bit(HEADER_HOSTNAME, header->adds_features)) {
  2475. /* unswap as u64 */
  2476. mem_bswap_64(&header->adds_features,
  2477. BITS_TO_U64(HEADER_FEAT_BITS));
  2478. /* unswap as u32 */
  2479. mem_bswap_32(&header->adds_features,
  2480. BITS_TO_U32(HEADER_FEAT_BITS));
  2481. }
  2482. if (!test_bit(HEADER_HOSTNAME, header->adds_features)) {
  2483. bitmap_zero(header->adds_features, HEADER_FEAT_BITS);
  2484. set_bit(HEADER_BUILD_ID, header->adds_features);
  2485. }
  2486. }
  2487. memcpy(&ph->adds_features, &header->adds_features,
  2488. sizeof(ph->adds_features));
  2489. ph->data_offset = header->data.offset;
  2490. ph->data_size = header->data.size;
  2491. ph->feat_offset = header->data.offset + header->data.size;
  2492. return 0;
  2493. }
  2494. static int perf_file_section__process(struct perf_file_section *section,
  2495. struct perf_header *ph,
  2496. int feat, int fd, void *data)
  2497. {
  2498. struct feat_fd fdd = {
  2499. .fd = fd,
  2500. .ph = ph,
  2501. .size = section->size,
  2502. .offset = section->offset,
  2503. };
  2504. if (lseek(fd, section->offset, SEEK_SET) == (off_t)-1) {
  2505. pr_debug("Failed to lseek to %" PRIu64 " offset for feature "
  2506. "%d, continuing...\n", section->offset, feat);
  2507. return 0;
  2508. }
  2509. if (feat >= HEADER_LAST_FEATURE) {
  2510. pr_debug("unknown feature %d, continuing...\n", feat);
  2511. return 0;
  2512. }
  2513. if (!feat_ops[feat].process)
  2514. return 0;
  2515. return feat_ops[feat].process(&fdd, data);
  2516. }
  2517. static int perf_file_header__read_pipe(struct perf_pipe_file_header *header,
  2518. struct perf_header *ph, int fd,
  2519. bool repipe)
  2520. {
  2521. struct feat_fd ff = {
  2522. .fd = STDOUT_FILENO,
  2523. .ph = ph,
  2524. };
  2525. ssize_t ret;
  2526. ret = readn(fd, header, sizeof(*header));
  2527. if (ret <= 0)
  2528. return -1;
  2529. if (check_magic_endian(header->magic, header->size, true, ph) < 0) {
  2530. pr_debug("endian/magic failed\n");
  2531. return -1;
  2532. }
  2533. if (ph->needs_swap)
  2534. header->size = bswap_64(header->size);
  2535. if (repipe && do_write(&ff, header, sizeof(*header)) < 0)
  2536. return -1;
  2537. return 0;
  2538. }
  2539. static int perf_header__read_pipe(struct perf_session *session)
  2540. {
  2541. struct perf_header *header = &session->header;
  2542. struct perf_pipe_file_header f_header;
  2543. if (perf_file_header__read_pipe(&f_header, header,
  2544. perf_data__fd(session->data),
  2545. session->repipe) < 0) {
  2546. pr_debug("incompatible file format\n");
  2547. return -EINVAL;
  2548. }
  2549. return 0;
  2550. }
  2551. static int read_attr(int fd, struct perf_header *ph,
  2552. struct perf_file_attr *f_attr)
  2553. {
  2554. struct perf_event_attr *attr = &f_attr->attr;
  2555. size_t sz, left;
  2556. size_t our_sz = sizeof(f_attr->attr);
  2557. ssize_t ret;
  2558. memset(f_attr, 0, sizeof(*f_attr));
  2559. /* read minimal guaranteed structure */
  2560. ret = readn(fd, attr, PERF_ATTR_SIZE_VER0);
  2561. if (ret <= 0) {
  2562. pr_debug("cannot read %d bytes of header attr\n",
  2563. PERF_ATTR_SIZE_VER0);
  2564. return -1;
  2565. }
  2566. /* on file perf_event_attr size */
  2567. sz = attr->size;
  2568. if (ph->needs_swap)
  2569. sz = bswap_32(sz);
  2570. if (sz == 0) {
  2571. /* assume ABI0 */
  2572. sz = PERF_ATTR_SIZE_VER0;
  2573. } else if (sz > our_sz) {
  2574. pr_debug("file uses a more recent and unsupported ABI"
  2575. " (%zu bytes extra)\n", sz - our_sz);
  2576. return -1;
  2577. }
  2578. /* what we have not yet read and that we know about */
  2579. left = sz - PERF_ATTR_SIZE_VER0;
  2580. if (left) {
  2581. void *ptr = attr;
  2582. ptr += PERF_ATTR_SIZE_VER0;
  2583. ret = readn(fd, ptr, left);
  2584. }
  2585. /* read perf_file_section, ids are read in caller */
  2586. ret = readn(fd, &f_attr->ids, sizeof(f_attr->ids));
  2587. return ret <= 0 ? -1 : 0;
  2588. }
  2589. static int perf_evsel__prepare_tracepoint_event(struct perf_evsel *evsel,
  2590. struct tep_handle *pevent)
  2591. {
  2592. struct event_format *event;
  2593. char bf[128];
  2594. /* already prepared */
  2595. if (evsel->tp_format)
  2596. return 0;
  2597. if (pevent == NULL) {
  2598. pr_debug("broken or missing trace data\n");
  2599. return -1;
  2600. }
  2601. event = tep_find_event(pevent, evsel->attr.config);
  2602. if (event == NULL) {
  2603. pr_debug("cannot find event format for %d\n", (int)evsel->attr.config);
  2604. return -1;
  2605. }
  2606. if (!evsel->name) {
  2607. snprintf(bf, sizeof(bf), "%s:%s", event->system, event->name);
  2608. evsel->name = strdup(bf);
  2609. if (evsel->name == NULL)
  2610. return -1;
  2611. }
  2612. evsel->tp_format = event;
  2613. return 0;
  2614. }
  2615. static int perf_evlist__prepare_tracepoint_events(struct perf_evlist *evlist,
  2616. struct tep_handle *pevent)
  2617. {
  2618. struct perf_evsel *pos;
  2619. evlist__for_each_entry(evlist, pos) {
  2620. if (pos->attr.type == PERF_TYPE_TRACEPOINT &&
  2621. perf_evsel__prepare_tracepoint_event(pos, pevent))
  2622. return -1;
  2623. }
  2624. return 0;
  2625. }
  2626. int perf_session__read_header(struct perf_session *session)
  2627. {
  2628. struct perf_data *data = session->data;
  2629. struct perf_header *header = &session->header;
  2630. struct perf_file_header f_header;
  2631. struct perf_file_attr f_attr;
  2632. u64 f_id;
  2633. int nr_attrs, nr_ids, i, j;
  2634. int fd = perf_data__fd(data);
  2635. session->evlist = perf_evlist__new();
  2636. if (session->evlist == NULL)
  2637. return -ENOMEM;
  2638. session->evlist->env = &header->env;
  2639. session->machines.host.env = &header->env;
  2640. if (perf_data__is_pipe(data))
  2641. return perf_header__read_pipe(session);
  2642. if (perf_file_header__read(&f_header, header, fd) < 0)
  2643. return -EINVAL;
  2644. /*
  2645. * Sanity check that perf.data was written cleanly; data size is
  2646. * initialized to 0 and updated only if the on_exit function is run.
  2647. * If data size is still 0 then the file contains only partial
  2648. * information. Just warn user and process it as much as it can.
  2649. */
  2650. if (f_header.data.size == 0) {
  2651. pr_warning("WARNING: The %s file's data size field is 0 which is unexpected.\n"
  2652. "Was the 'perf record' command properly terminated?\n",
  2653. data->file.path);
  2654. }
  2655. nr_attrs = f_header.attrs.size / f_header.attr_size;
  2656. lseek(fd, f_header.attrs.offset, SEEK_SET);
  2657. for (i = 0; i < nr_attrs; i++) {
  2658. struct perf_evsel *evsel;
  2659. off_t tmp;
  2660. if (read_attr(fd, header, &f_attr) < 0)
  2661. goto out_errno;
  2662. if (header->needs_swap) {
  2663. f_attr.ids.size = bswap_64(f_attr.ids.size);
  2664. f_attr.ids.offset = bswap_64(f_attr.ids.offset);
  2665. perf_event__attr_swap(&f_attr.attr);
  2666. }
  2667. tmp = lseek(fd, 0, SEEK_CUR);
  2668. evsel = perf_evsel__new(&f_attr.attr);
  2669. if (evsel == NULL)
  2670. goto out_delete_evlist;
  2671. evsel->needs_swap = header->needs_swap;
  2672. /*
  2673. * Do it before so that if perf_evsel__alloc_id fails, this
  2674. * entry gets purged too at perf_evlist__delete().
  2675. */
  2676. perf_evlist__add(session->evlist, evsel);
  2677. nr_ids = f_attr.ids.size / sizeof(u64);
  2678. /*
  2679. * We don't have the cpu and thread maps on the header, so
  2680. * for allocating the perf_sample_id table we fake 1 cpu and
  2681. * hattr->ids threads.
  2682. */
  2683. if (perf_evsel__alloc_id(evsel, 1, nr_ids))
  2684. goto out_delete_evlist;
  2685. lseek(fd, f_attr.ids.offset, SEEK_SET);
  2686. for (j = 0; j < nr_ids; j++) {
  2687. if (perf_header__getbuffer64(header, fd, &f_id, sizeof(f_id)))
  2688. goto out_errno;
  2689. perf_evlist__id_add(session->evlist, evsel, 0, j, f_id);
  2690. }
  2691. lseek(fd, tmp, SEEK_SET);
  2692. }
  2693. perf_header__process_sections(header, fd, &session->tevent,
  2694. perf_file_section__process);
  2695. if (perf_evlist__prepare_tracepoint_events(session->evlist,
  2696. session->tevent.pevent))
  2697. goto out_delete_evlist;
  2698. return 0;
  2699. out_errno:
  2700. return -errno;
  2701. out_delete_evlist:
  2702. perf_evlist__delete(session->evlist);
  2703. session->evlist = NULL;
  2704. return -ENOMEM;
  2705. }
  2706. int perf_event__synthesize_attr(struct perf_tool *tool,
  2707. struct perf_event_attr *attr, u32 ids, u64 *id,
  2708. perf_event__handler_t process)
  2709. {
  2710. union perf_event *ev;
  2711. size_t size;
  2712. int err;
  2713. size = sizeof(struct perf_event_attr);
  2714. size = PERF_ALIGN(size, sizeof(u64));
  2715. size += sizeof(struct perf_event_header);
  2716. size += ids * sizeof(u64);
  2717. ev = malloc(size);
  2718. if (ev == NULL)
  2719. return -ENOMEM;
  2720. ev->attr.attr = *attr;
  2721. memcpy(ev->attr.id, id, ids * sizeof(u64));
  2722. ev->attr.header.type = PERF_RECORD_HEADER_ATTR;
  2723. ev->attr.header.size = (u16)size;
  2724. if (ev->attr.header.size == size)
  2725. err = process(tool, ev, NULL, NULL);
  2726. else
  2727. err = -E2BIG;
  2728. free(ev);
  2729. return err;
  2730. }
  2731. int perf_event__synthesize_features(struct perf_tool *tool,
  2732. struct perf_session *session,
  2733. struct perf_evlist *evlist,
  2734. perf_event__handler_t process)
  2735. {
  2736. struct perf_header *header = &session->header;
  2737. struct feat_fd ff;
  2738. struct feature_event *fe;
  2739. size_t sz, sz_hdr;
  2740. int feat, ret;
  2741. sz_hdr = sizeof(fe->header);
  2742. sz = sizeof(union perf_event);
  2743. /* get a nice alignment */
  2744. sz = PERF_ALIGN(sz, page_size);
  2745. memset(&ff, 0, sizeof(ff));
  2746. ff.buf = malloc(sz);
  2747. if (!ff.buf)
  2748. return -ENOMEM;
  2749. ff.size = sz - sz_hdr;
  2750. for_each_set_bit(feat, header->adds_features, HEADER_FEAT_BITS) {
  2751. if (!feat_ops[feat].synthesize) {
  2752. pr_debug("No record header feature for header :%d\n", feat);
  2753. continue;
  2754. }
  2755. ff.offset = sizeof(*fe);
  2756. ret = feat_ops[feat].write(&ff, evlist);
  2757. if (ret || ff.offset <= (ssize_t)sizeof(*fe)) {
  2758. pr_debug("Error writing feature\n");
  2759. continue;
  2760. }
  2761. /* ff.buf may have changed due to realloc in do_write() */
  2762. fe = ff.buf;
  2763. memset(fe, 0, sizeof(*fe));
  2764. fe->feat_id = feat;
  2765. fe->header.type = PERF_RECORD_HEADER_FEATURE;
  2766. fe->header.size = ff.offset;
  2767. ret = process(tool, ff.buf, NULL, NULL);
  2768. if (ret) {
  2769. free(ff.buf);
  2770. return ret;
  2771. }
  2772. }
  2773. /* Send HEADER_LAST_FEATURE mark. */
  2774. fe = ff.buf;
  2775. fe->feat_id = HEADER_LAST_FEATURE;
  2776. fe->header.type = PERF_RECORD_HEADER_FEATURE;
  2777. fe->header.size = sizeof(*fe);
  2778. ret = process(tool, ff.buf, NULL, NULL);
  2779. free(ff.buf);
  2780. return ret;
  2781. }
  2782. int perf_event__process_feature(struct perf_tool *tool,
  2783. union perf_event *event,
  2784. struct perf_session *session __maybe_unused)
  2785. {
  2786. struct feat_fd ff = { .fd = 0 };
  2787. struct feature_event *fe = (struct feature_event *)event;
  2788. int type = fe->header.type;
  2789. u64 feat = fe->feat_id;
  2790. if (type < 0 || type >= PERF_RECORD_HEADER_MAX) {
  2791. pr_warning("invalid record type %d in pipe-mode\n", type);
  2792. return 0;
  2793. }
  2794. if (feat == HEADER_RESERVED || feat >= HEADER_LAST_FEATURE) {
  2795. pr_warning("invalid record type %d in pipe-mode\n", type);
  2796. return -1;
  2797. }
  2798. if (!feat_ops[feat].process)
  2799. return 0;
  2800. ff.buf = (void *)fe->data;
  2801. ff.size = event->header.size - sizeof(event->header);
  2802. ff.ph = &session->header;
  2803. if (feat_ops[feat].process(&ff, NULL))
  2804. return -1;
  2805. if (!feat_ops[feat].print || !tool->show_feat_hdr)
  2806. return 0;
  2807. if (!feat_ops[feat].full_only ||
  2808. tool->show_feat_hdr >= SHOW_FEAT_HEADER_FULL_INFO) {
  2809. feat_ops[feat].print(&ff, stdout);
  2810. } else {
  2811. fprintf(stdout, "# %s info available, use -I to display\n",
  2812. feat_ops[feat].name);
  2813. }
  2814. return 0;
  2815. }
  2816. static struct event_update_event *
  2817. event_update_event__new(size_t size, u64 type, u64 id)
  2818. {
  2819. struct event_update_event *ev;
  2820. size += sizeof(*ev);
  2821. size = PERF_ALIGN(size, sizeof(u64));
  2822. ev = zalloc(size);
  2823. if (ev) {
  2824. ev->header.type = PERF_RECORD_EVENT_UPDATE;
  2825. ev->header.size = (u16)size;
  2826. ev->type = type;
  2827. ev->id = id;
  2828. }
  2829. return ev;
  2830. }
  2831. int
  2832. perf_event__synthesize_event_update_unit(struct perf_tool *tool,
  2833. struct perf_evsel *evsel,
  2834. perf_event__handler_t process)
  2835. {
  2836. struct event_update_event *ev;
  2837. size_t size = strlen(evsel->unit);
  2838. int err;
  2839. ev = event_update_event__new(size + 1, PERF_EVENT_UPDATE__UNIT, evsel->id[0]);
  2840. if (ev == NULL)
  2841. return -ENOMEM;
  2842. strncpy(ev->data, evsel->unit, size);
  2843. err = process(tool, (union perf_event *)ev, NULL, NULL);
  2844. free(ev);
  2845. return err;
  2846. }
  2847. int
  2848. perf_event__synthesize_event_update_scale(struct perf_tool *tool,
  2849. struct perf_evsel *evsel,
  2850. perf_event__handler_t process)
  2851. {
  2852. struct event_update_event *ev;
  2853. struct event_update_event_scale *ev_data;
  2854. int err;
  2855. ev = event_update_event__new(sizeof(*ev_data), PERF_EVENT_UPDATE__SCALE, evsel->id[0]);
  2856. if (ev == NULL)
  2857. return -ENOMEM;
  2858. ev_data = (struct event_update_event_scale *) ev->data;
  2859. ev_data->scale = evsel->scale;
  2860. err = process(tool, (union perf_event*) ev, NULL, NULL);
  2861. free(ev);
  2862. return err;
  2863. }
  2864. int
  2865. perf_event__synthesize_event_update_name(struct perf_tool *tool,
  2866. struct perf_evsel *evsel,
  2867. perf_event__handler_t process)
  2868. {
  2869. struct event_update_event *ev;
  2870. size_t len = strlen(evsel->name);
  2871. int err;
  2872. ev = event_update_event__new(len + 1, PERF_EVENT_UPDATE__NAME, evsel->id[0]);
  2873. if (ev == NULL)
  2874. return -ENOMEM;
  2875. strncpy(ev->data, evsel->name, len);
  2876. err = process(tool, (union perf_event*) ev, NULL, NULL);
  2877. free(ev);
  2878. return err;
  2879. }
  2880. int
  2881. perf_event__synthesize_event_update_cpus(struct perf_tool *tool,
  2882. struct perf_evsel *evsel,
  2883. perf_event__handler_t process)
  2884. {
  2885. size_t size = sizeof(struct event_update_event);
  2886. struct event_update_event *ev;
  2887. int max, err;
  2888. u16 type;
  2889. if (!evsel->own_cpus)
  2890. return 0;
  2891. ev = cpu_map_data__alloc(evsel->own_cpus, &size, &type, &max);
  2892. if (!ev)
  2893. return -ENOMEM;
  2894. ev->header.type = PERF_RECORD_EVENT_UPDATE;
  2895. ev->header.size = (u16)size;
  2896. ev->type = PERF_EVENT_UPDATE__CPUS;
  2897. ev->id = evsel->id[0];
  2898. cpu_map_data__synthesize((struct cpu_map_data *) ev->data,
  2899. evsel->own_cpus,
  2900. type, max);
  2901. err = process(tool, (union perf_event*) ev, NULL, NULL);
  2902. free(ev);
  2903. return err;
  2904. }
  2905. size_t perf_event__fprintf_event_update(union perf_event *event, FILE *fp)
  2906. {
  2907. struct event_update_event *ev = &event->event_update;
  2908. struct event_update_event_scale *ev_scale;
  2909. struct event_update_event_cpus *ev_cpus;
  2910. struct cpu_map *map;
  2911. size_t ret;
  2912. ret = fprintf(fp, "\n... id: %" PRIu64 "\n", ev->id);
  2913. switch (ev->type) {
  2914. case PERF_EVENT_UPDATE__SCALE:
  2915. ev_scale = (struct event_update_event_scale *) ev->data;
  2916. ret += fprintf(fp, "... scale: %f\n", ev_scale->scale);
  2917. break;
  2918. case PERF_EVENT_UPDATE__UNIT:
  2919. ret += fprintf(fp, "... unit: %s\n", ev->data);
  2920. break;
  2921. case PERF_EVENT_UPDATE__NAME:
  2922. ret += fprintf(fp, "... name: %s\n", ev->data);
  2923. break;
  2924. case PERF_EVENT_UPDATE__CPUS:
  2925. ev_cpus = (struct event_update_event_cpus *) ev->data;
  2926. ret += fprintf(fp, "... ");
  2927. map = cpu_map__new_data(&ev_cpus->cpus);
  2928. if (map)
  2929. ret += cpu_map__fprintf(map, fp);
  2930. else
  2931. ret += fprintf(fp, "failed to get cpus\n");
  2932. break;
  2933. default:
  2934. ret += fprintf(fp, "... unknown type\n");
  2935. break;
  2936. }
  2937. return ret;
  2938. }
  2939. int perf_event__synthesize_attrs(struct perf_tool *tool,
  2940. struct perf_session *session,
  2941. perf_event__handler_t process)
  2942. {
  2943. struct perf_evsel *evsel;
  2944. int err = 0;
  2945. evlist__for_each_entry(session->evlist, evsel) {
  2946. err = perf_event__synthesize_attr(tool, &evsel->attr, evsel->ids,
  2947. evsel->id, process);
  2948. if (err) {
  2949. pr_debug("failed to create perf header attribute\n");
  2950. return err;
  2951. }
  2952. }
  2953. return err;
  2954. }
  2955. static bool has_unit(struct perf_evsel *counter)
  2956. {
  2957. return counter->unit && *counter->unit;
  2958. }
  2959. static bool has_scale(struct perf_evsel *counter)
  2960. {
  2961. return counter->scale != 1;
  2962. }
  2963. int perf_event__synthesize_extra_attr(struct perf_tool *tool,
  2964. struct perf_evlist *evsel_list,
  2965. perf_event__handler_t process,
  2966. bool is_pipe)
  2967. {
  2968. struct perf_evsel *counter;
  2969. int err;
  2970. /*
  2971. * Synthesize other events stuff not carried within
  2972. * attr event - unit, scale, name
  2973. */
  2974. evlist__for_each_entry(evsel_list, counter) {
  2975. if (!counter->supported)
  2976. continue;
  2977. /*
  2978. * Synthesize unit and scale only if it's defined.
  2979. */
  2980. if (has_unit(counter)) {
  2981. err = perf_event__synthesize_event_update_unit(tool, counter, process);
  2982. if (err < 0) {
  2983. pr_err("Couldn't synthesize evsel unit.\n");
  2984. return err;
  2985. }
  2986. }
  2987. if (has_scale(counter)) {
  2988. err = perf_event__synthesize_event_update_scale(tool, counter, process);
  2989. if (err < 0) {
  2990. pr_err("Couldn't synthesize evsel counter.\n");
  2991. return err;
  2992. }
  2993. }
  2994. if (counter->own_cpus) {
  2995. err = perf_event__synthesize_event_update_cpus(tool, counter, process);
  2996. if (err < 0) {
  2997. pr_err("Couldn't synthesize evsel cpus.\n");
  2998. return err;
  2999. }
  3000. }
  3001. /*
  3002. * Name is needed only for pipe output,
  3003. * perf.data carries event names.
  3004. */
  3005. if (is_pipe) {
  3006. err = perf_event__synthesize_event_update_name(tool, counter, process);
  3007. if (err < 0) {
  3008. pr_err("Couldn't synthesize evsel name.\n");
  3009. return err;
  3010. }
  3011. }
  3012. }
  3013. return 0;
  3014. }
  3015. int perf_event__process_attr(struct perf_tool *tool __maybe_unused,
  3016. union perf_event *event,
  3017. struct perf_evlist **pevlist)
  3018. {
  3019. u32 i, ids, n_ids;
  3020. struct perf_evsel *evsel;
  3021. struct perf_evlist *evlist = *pevlist;
  3022. if (evlist == NULL) {
  3023. *pevlist = evlist = perf_evlist__new();
  3024. if (evlist == NULL)
  3025. return -ENOMEM;
  3026. }
  3027. evsel = perf_evsel__new(&event->attr.attr);
  3028. if (evsel == NULL)
  3029. return -ENOMEM;
  3030. perf_evlist__add(evlist, evsel);
  3031. ids = event->header.size;
  3032. ids -= (void *)&event->attr.id - (void *)event;
  3033. n_ids = ids / sizeof(u64);
  3034. /*
  3035. * We don't have the cpu and thread maps on the header, so
  3036. * for allocating the perf_sample_id table we fake 1 cpu and
  3037. * hattr->ids threads.
  3038. */
  3039. if (perf_evsel__alloc_id(evsel, 1, n_ids))
  3040. return -ENOMEM;
  3041. for (i = 0; i < n_ids; i++) {
  3042. perf_evlist__id_add(evlist, evsel, 0, i, event->attr.id[i]);
  3043. }
  3044. return 0;
  3045. }
  3046. int perf_event__process_event_update(struct perf_tool *tool __maybe_unused,
  3047. union perf_event *event,
  3048. struct perf_evlist **pevlist)
  3049. {
  3050. struct event_update_event *ev = &event->event_update;
  3051. struct event_update_event_scale *ev_scale;
  3052. struct event_update_event_cpus *ev_cpus;
  3053. struct perf_evlist *evlist;
  3054. struct perf_evsel *evsel;
  3055. struct cpu_map *map;
  3056. if (!pevlist || *pevlist == NULL)
  3057. return -EINVAL;
  3058. evlist = *pevlist;
  3059. evsel = perf_evlist__id2evsel(evlist, ev->id);
  3060. if (evsel == NULL)
  3061. return -EINVAL;
  3062. switch (ev->type) {
  3063. case PERF_EVENT_UPDATE__UNIT:
  3064. evsel->unit = strdup(ev->data);
  3065. break;
  3066. case PERF_EVENT_UPDATE__NAME:
  3067. evsel->name = strdup(ev->data);
  3068. break;
  3069. case PERF_EVENT_UPDATE__SCALE:
  3070. ev_scale = (struct event_update_event_scale *) ev->data;
  3071. evsel->scale = ev_scale->scale;
  3072. break;
  3073. case PERF_EVENT_UPDATE__CPUS:
  3074. ev_cpus = (struct event_update_event_cpus *) ev->data;
  3075. map = cpu_map__new_data(&ev_cpus->cpus);
  3076. if (map)
  3077. evsel->own_cpus = map;
  3078. else
  3079. pr_err("failed to get event_update cpus\n");
  3080. default:
  3081. break;
  3082. }
  3083. return 0;
  3084. }
  3085. int perf_event__synthesize_tracing_data(struct perf_tool *tool, int fd,
  3086. struct perf_evlist *evlist,
  3087. perf_event__handler_t process)
  3088. {
  3089. union perf_event ev;
  3090. struct tracing_data *tdata;
  3091. ssize_t size = 0, aligned_size = 0, padding;
  3092. struct feat_fd ff;
  3093. int err __maybe_unused = 0;
  3094. /*
  3095. * We are going to store the size of the data followed
  3096. * by the data contents. Since the fd descriptor is a pipe,
  3097. * we cannot seek back to store the size of the data once
  3098. * we know it. Instead we:
  3099. *
  3100. * - write the tracing data to the temp file
  3101. * - get/write the data size to pipe
  3102. * - write the tracing data from the temp file
  3103. * to the pipe
  3104. */
  3105. tdata = tracing_data_get(&evlist->entries, fd, true);
  3106. if (!tdata)
  3107. return -1;
  3108. memset(&ev, 0, sizeof(ev));
  3109. ev.tracing_data.header.type = PERF_RECORD_HEADER_TRACING_DATA;
  3110. size = tdata->size;
  3111. aligned_size = PERF_ALIGN(size, sizeof(u64));
  3112. padding = aligned_size - size;
  3113. ev.tracing_data.header.size = sizeof(ev.tracing_data);
  3114. ev.tracing_data.size = aligned_size;
  3115. process(tool, &ev, NULL, NULL);
  3116. /*
  3117. * The put function will copy all the tracing data
  3118. * stored in temp file to the pipe.
  3119. */
  3120. tracing_data_put(tdata);
  3121. ff = (struct feat_fd){ .fd = fd };
  3122. if (write_padded(&ff, NULL, 0, padding))
  3123. return -1;
  3124. return aligned_size;
  3125. }
  3126. int perf_event__process_tracing_data(struct perf_tool *tool __maybe_unused,
  3127. union perf_event *event,
  3128. struct perf_session *session)
  3129. {
  3130. ssize_t size_read, padding, size = event->tracing_data.size;
  3131. int fd = perf_data__fd(session->data);
  3132. off_t offset = lseek(fd, 0, SEEK_CUR);
  3133. char buf[BUFSIZ];
  3134. /* setup for reading amidst mmap */
  3135. lseek(fd, offset + sizeof(struct tracing_data_event),
  3136. SEEK_SET);
  3137. size_read = trace_report(fd, &session->tevent,
  3138. session->repipe);
  3139. padding = PERF_ALIGN(size_read, sizeof(u64)) - size_read;
  3140. if (readn(fd, buf, padding) < 0) {
  3141. pr_err("%s: reading input file", __func__);
  3142. return -1;
  3143. }
  3144. if (session->repipe) {
  3145. int retw = write(STDOUT_FILENO, buf, padding);
  3146. if (retw <= 0 || retw != padding) {
  3147. pr_err("%s: repiping tracing data padding", __func__);
  3148. return -1;
  3149. }
  3150. }
  3151. if (size_read + padding != size) {
  3152. pr_err("%s: tracing data size mismatch", __func__);
  3153. return -1;
  3154. }
  3155. perf_evlist__prepare_tracepoint_events(session->evlist,
  3156. session->tevent.pevent);
  3157. return size_read + padding;
  3158. }
  3159. int perf_event__synthesize_build_id(struct perf_tool *tool,
  3160. struct dso *pos, u16 misc,
  3161. perf_event__handler_t process,
  3162. struct machine *machine)
  3163. {
  3164. union perf_event ev;
  3165. size_t len;
  3166. int err = 0;
  3167. if (!pos->hit)
  3168. return err;
  3169. memset(&ev, 0, sizeof(ev));
  3170. len = pos->long_name_len + 1;
  3171. len = PERF_ALIGN(len, NAME_ALIGN);
  3172. memcpy(&ev.build_id.build_id, pos->build_id, sizeof(pos->build_id));
  3173. ev.build_id.header.type = PERF_RECORD_HEADER_BUILD_ID;
  3174. ev.build_id.header.misc = misc;
  3175. ev.build_id.pid = machine->pid;
  3176. ev.build_id.header.size = sizeof(ev.build_id) + len;
  3177. memcpy(&ev.build_id.filename, pos->long_name, pos->long_name_len);
  3178. err = process(tool, &ev, NULL, machine);
  3179. return err;
  3180. }
  3181. int perf_event__process_build_id(struct perf_tool *tool __maybe_unused,
  3182. union perf_event *event,
  3183. struct perf_session *session)
  3184. {
  3185. __event_process_build_id(&event->build_id,
  3186. event->build_id.filename,
  3187. session);
  3188. return 0;
  3189. }