evsel.c 56 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330
  1. /*
  2. * Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
  3. *
  4. * Parts came from builtin-{top,stat,record}.c, see those files for further
  5. * copyright notes.
  6. *
  7. * Released under the GPL v2. (and only v2, not any later version)
  8. */
  9. #include <byteswap.h>
  10. #include <linux/bitops.h>
  11. #include <api/fs/debugfs.h>
  12. #include <traceevent/event-parse.h>
  13. #include <linux/hw_breakpoint.h>
  14. #include <linux/perf_event.h>
  15. #include <sys/resource.h>
  16. #include "asm/bug.h"
  17. #include "callchain.h"
  18. #include "cgroup.h"
  19. #include "evsel.h"
  20. #include "evlist.h"
  21. #include "util.h"
  22. #include "cpumap.h"
  23. #include "thread_map.h"
  24. #include "target.h"
  25. #include "perf_regs.h"
  26. #include "debug.h"
  27. #include "trace-event.h"
  28. #include "stat.h"
  29. static struct {
  30. bool sample_id_all;
  31. bool exclude_guest;
  32. bool mmap2;
  33. bool cloexec;
  34. bool clockid;
  35. bool clockid_wrong;
  36. } perf_missing_features;
  37. static clockid_t clockid;
  38. static int perf_evsel__no_extra_init(struct perf_evsel *evsel __maybe_unused)
  39. {
  40. return 0;
  41. }
  42. static void perf_evsel__no_extra_fini(struct perf_evsel *evsel __maybe_unused)
  43. {
  44. }
  45. static struct {
  46. size_t size;
  47. int (*init)(struct perf_evsel *evsel);
  48. void (*fini)(struct perf_evsel *evsel);
  49. } perf_evsel__object = {
  50. .size = sizeof(struct perf_evsel),
  51. .init = perf_evsel__no_extra_init,
  52. .fini = perf_evsel__no_extra_fini,
  53. };
  54. int perf_evsel__object_config(size_t object_size,
  55. int (*init)(struct perf_evsel *evsel),
  56. void (*fini)(struct perf_evsel *evsel))
  57. {
  58. if (object_size == 0)
  59. goto set_methods;
  60. if (perf_evsel__object.size > object_size)
  61. return -EINVAL;
  62. perf_evsel__object.size = object_size;
  63. set_methods:
  64. if (init != NULL)
  65. perf_evsel__object.init = init;
  66. if (fini != NULL)
  67. perf_evsel__object.fini = fini;
  68. return 0;
  69. }
  70. #define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y))
  71. int __perf_evsel__sample_size(u64 sample_type)
  72. {
  73. u64 mask = sample_type & PERF_SAMPLE_MASK;
  74. int size = 0;
  75. int i;
  76. for (i = 0; i < 64; i++) {
  77. if (mask & (1ULL << i))
  78. size++;
  79. }
  80. size *= sizeof(u64);
  81. return size;
  82. }
  83. /**
  84. * __perf_evsel__calc_id_pos - calculate id_pos.
  85. * @sample_type: sample type
  86. *
  87. * This function returns the position of the event id (PERF_SAMPLE_ID or
  88. * PERF_SAMPLE_IDENTIFIER) in a sample event i.e. in the array of struct
  89. * sample_event.
  90. */
  91. static int __perf_evsel__calc_id_pos(u64 sample_type)
  92. {
  93. int idx = 0;
  94. if (sample_type & PERF_SAMPLE_IDENTIFIER)
  95. return 0;
  96. if (!(sample_type & PERF_SAMPLE_ID))
  97. return -1;
  98. if (sample_type & PERF_SAMPLE_IP)
  99. idx += 1;
  100. if (sample_type & PERF_SAMPLE_TID)
  101. idx += 1;
  102. if (sample_type & PERF_SAMPLE_TIME)
  103. idx += 1;
  104. if (sample_type & PERF_SAMPLE_ADDR)
  105. idx += 1;
  106. return idx;
  107. }
  108. /**
  109. * __perf_evsel__calc_is_pos - calculate is_pos.
  110. * @sample_type: sample type
  111. *
  112. * This function returns the position (counting backwards) of the event id
  113. * (PERF_SAMPLE_ID or PERF_SAMPLE_IDENTIFIER) in a non-sample event i.e. if
  114. * sample_id_all is used there is an id sample appended to non-sample events.
  115. */
  116. static int __perf_evsel__calc_is_pos(u64 sample_type)
  117. {
  118. int idx = 1;
  119. if (sample_type & PERF_SAMPLE_IDENTIFIER)
  120. return 1;
  121. if (!(sample_type & PERF_SAMPLE_ID))
  122. return -1;
  123. if (sample_type & PERF_SAMPLE_CPU)
  124. idx += 1;
  125. if (sample_type & PERF_SAMPLE_STREAM_ID)
  126. idx += 1;
  127. return idx;
  128. }
  129. void perf_evsel__calc_id_pos(struct perf_evsel *evsel)
  130. {
  131. evsel->id_pos = __perf_evsel__calc_id_pos(evsel->attr.sample_type);
  132. evsel->is_pos = __perf_evsel__calc_is_pos(evsel->attr.sample_type);
  133. }
  134. void __perf_evsel__set_sample_bit(struct perf_evsel *evsel,
  135. enum perf_event_sample_format bit)
  136. {
  137. if (!(evsel->attr.sample_type & bit)) {
  138. evsel->attr.sample_type |= bit;
  139. evsel->sample_size += sizeof(u64);
  140. perf_evsel__calc_id_pos(evsel);
  141. }
  142. }
  143. void __perf_evsel__reset_sample_bit(struct perf_evsel *evsel,
  144. enum perf_event_sample_format bit)
  145. {
  146. if (evsel->attr.sample_type & bit) {
  147. evsel->attr.sample_type &= ~bit;
  148. evsel->sample_size -= sizeof(u64);
  149. perf_evsel__calc_id_pos(evsel);
  150. }
  151. }
  152. void perf_evsel__set_sample_id(struct perf_evsel *evsel,
  153. bool can_sample_identifier)
  154. {
  155. if (can_sample_identifier) {
  156. perf_evsel__reset_sample_bit(evsel, ID);
  157. perf_evsel__set_sample_bit(evsel, IDENTIFIER);
  158. } else {
  159. perf_evsel__set_sample_bit(evsel, ID);
  160. }
  161. evsel->attr.read_format |= PERF_FORMAT_ID;
  162. }
  163. void perf_evsel__init(struct perf_evsel *evsel,
  164. struct perf_event_attr *attr, int idx)
  165. {
  166. evsel->idx = idx;
  167. evsel->tracking = !idx;
  168. evsel->attr = *attr;
  169. evsel->leader = evsel;
  170. evsel->unit = "";
  171. evsel->scale = 1.0;
  172. evsel->evlist = NULL;
  173. INIT_LIST_HEAD(&evsel->node);
  174. INIT_LIST_HEAD(&evsel->config_terms);
  175. perf_evsel__object.init(evsel);
  176. evsel->sample_size = __perf_evsel__sample_size(attr->sample_type);
  177. perf_evsel__calc_id_pos(evsel);
  178. evsel->cmdline_group_boundary = false;
  179. }
  180. struct perf_evsel *perf_evsel__new_idx(struct perf_event_attr *attr, int idx)
  181. {
  182. struct perf_evsel *evsel = zalloc(perf_evsel__object.size);
  183. if (evsel != NULL)
  184. perf_evsel__init(evsel, attr, idx);
  185. return evsel;
  186. }
  187. struct perf_evsel *perf_evsel__newtp_idx(const char *sys, const char *name, int idx)
  188. {
  189. struct perf_evsel *evsel = zalloc(perf_evsel__object.size);
  190. if (evsel != NULL) {
  191. struct perf_event_attr attr = {
  192. .type = PERF_TYPE_TRACEPOINT,
  193. .sample_type = (PERF_SAMPLE_RAW | PERF_SAMPLE_TIME |
  194. PERF_SAMPLE_CPU | PERF_SAMPLE_PERIOD),
  195. };
  196. if (asprintf(&evsel->name, "%s:%s", sys, name) < 0)
  197. goto out_free;
  198. evsel->tp_format = trace_event__tp_format(sys, name);
  199. if (evsel->tp_format == NULL)
  200. goto out_free;
  201. event_attr_init(&attr);
  202. attr.config = evsel->tp_format->id;
  203. attr.sample_period = 1;
  204. perf_evsel__init(evsel, &attr, idx);
  205. }
  206. return evsel;
  207. out_free:
  208. zfree(&evsel->name);
  209. free(evsel);
  210. return NULL;
  211. }
  212. const char *perf_evsel__hw_names[PERF_COUNT_HW_MAX] = {
  213. "cycles",
  214. "instructions",
  215. "cache-references",
  216. "cache-misses",
  217. "branches",
  218. "branch-misses",
  219. "bus-cycles",
  220. "stalled-cycles-frontend",
  221. "stalled-cycles-backend",
  222. "ref-cycles",
  223. };
  224. static const char *__perf_evsel__hw_name(u64 config)
  225. {
  226. if (config < PERF_COUNT_HW_MAX && perf_evsel__hw_names[config])
  227. return perf_evsel__hw_names[config];
  228. return "unknown-hardware";
  229. }
  230. static int perf_evsel__add_modifiers(struct perf_evsel *evsel, char *bf, size_t size)
  231. {
  232. int colon = 0, r = 0;
  233. struct perf_event_attr *attr = &evsel->attr;
  234. bool exclude_guest_default = false;
  235. #define MOD_PRINT(context, mod) do { \
  236. if (!attr->exclude_##context) { \
  237. if (!colon) colon = ++r; \
  238. r += scnprintf(bf + r, size - r, "%c", mod); \
  239. } } while(0)
  240. if (attr->exclude_kernel || attr->exclude_user || attr->exclude_hv) {
  241. MOD_PRINT(kernel, 'k');
  242. MOD_PRINT(user, 'u');
  243. MOD_PRINT(hv, 'h');
  244. exclude_guest_default = true;
  245. }
  246. if (attr->precise_ip) {
  247. if (!colon)
  248. colon = ++r;
  249. r += scnprintf(bf + r, size - r, "%.*s", attr->precise_ip, "ppp");
  250. exclude_guest_default = true;
  251. }
  252. if (attr->exclude_host || attr->exclude_guest == exclude_guest_default) {
  253. MOD_PRINT(host, 'H');
  254. MOD_PRINT(guest, 'G');
  255. }
  256. #undef MOD_PRINT
  257. if (colon)
  258. bf[colon - 1] = ':';
  259. return r;
  260. }
  261. static int perf_evsel__hw_name(struct perf_evsel *evsel, char *bf, size_t size)
  262. {
  263. int r = scnprintf(bf, size, "%s", __perf_evsel__hw_name(evsel->attr.config));
  264. return r + perf_evsel__add_modifiers(evsel, bf + r, size - r);
  265. }
  266. const char *perf_evsel__sw_names[PERF_COUNT_SW_MAX] = {
  267. "cpu-clock",
  268. "task-clock",
  269. "page-faults",
  270. "context-switches",
  271. "cpu-migrations",
  272. "minor-faults",
  273. "major-faults",
  274. "alignment-faults",
  275. "emulation-faults",
  276. "dummy",
  277. };
  278. static const char *__perf_evsel__sw_name(u64 config)
  279. {
  280. if (config < PERF_COUNT_SW_MAX && perf_evsel__sw_names[config])
  281. return perf_evsel__sw_names[config];
  282. return "unknown-software";
  283. }
  284. static int perf_evsel__sw_name(struct perf_evsel *evsel, char *bf, size_t size)
  285. {
  286. int r = scnprintf(bf, size, "%s", __perf_evsel__sw_name(evsel->attr.config));
  287. return r + perf_evsel__add_modifiers(evsel, bf + r, size - r);
  288. }
  289. static int __perf_evsel__bp_name(char *bf, size_t size, u64 addr, u64 type)
  290. {
  291. int r;
  292. r = scnprintf(bf, size, "mem:0x%" PRIx64 ":", addr);
  293. if (type & HW_BREAKPOINT_R)
  294. r += scnprintf(bf + r, size - r, "r");
  295. if (type & HW_BREAKPOINT_W)
  296. r += scnprintf(bf + r, size - r, "w");
  297. if (type & HW_BREAKPOINT_X)
  298. r += scnprintf(bf + r, size - r, "x");
  299. return r;
  300. }
  301. static int perf_evsel__bp_name(struct perf_evsel *evsel, char *bf, size_t size)
  302. {
  303. struct perf_event_attr *attr = &evsel->attr;
  304. int r = __perf_evsel__bp_name(bf, size, attr->bp_addr, attr->bp_type);
  305. return r + perf_evsel__add_modifiers(evsel, bf + r, size - r);
  306. }
  307. const char *perf_evsel__hw_cache[PERF_COUNT_HW_CACHE_MAX]
  308. [PERF_EVSEL__MAX_ALIASES] = {
  309. { "L1-dcache", "l1-d", "l1d", "L1-data", },
  310. { "L1-icache", "l1-i", "l1i", "L1-instruction", },
  311. { "LLC", "L2", },
  312. { "dTLB", "d-tlb", "Data-TLB", },
  313. { "iTLB", "i-tlb", "Instruction-TLB", },
  314. { "branch", "branches", "bpu", "btb", "bpc", },
  315. { "node", },
  316. };
  317. const char *perf_evsel__hw_cache_op[PERF_COUNT_HW_CACHE_OP_MAX]
  318. [PERF_EVSEL__MAX_ALIASES] = {
  319. { "load", "loads", "read", },
  320. { "store", "stores", "write", },
  321. { "prefetch", "prefetches", "speculative-read", "speculative-load", },
  322. };
  323. const char *perf_evsel__hw_cache_result[PERF_COUNT_HW_CACHE_RESULT_MAX]
  324. [PERF_EVSEL__MAX_ALIASES] = {
  325. { "refs", "Reference", "ops", "access", },
  326. { "misses", "miss", },
  327. };
  328. #define C(x) PERF_COUNT_HW_CACHE_##x
  329. #define CACHE_READ (1 << C(OP_READ))
  330. #define CACHE_WRITE (1 << C(OP_WRITE))
  331. #define CACHE_PREFETCH (1 << C(OP_PREFETCH))
  332. #define COP(x) (1 << x)
  333. /*
  334. * cache operartion stat
  335. * L1I : Read and prefetch only
  336. * ITLB and BPU : Read-only
  337. */
  338. static unsigned long perf_evsel__hw_cache_stat[C(MAX)] = {
  339. [C(L1D)] = (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH),
  340. [C(L1I)] = (CACHE_READ | CACHE_PREFETCH),
  341. [C(LL)] = (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH),
  342. [C(DTLB)] = (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH),
  343. [C(ITLB)] = (CACHE_READ),
  344. [C(BPU)] = (CACHE_READ),
  345. [C(NODE)] = (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH),
  346. };
  347. bool perf_evsel__is_cache_op_valid(u8 type, u8 op)
  348. {
  349. if (perf_evsel__hw_cache_stat[type] & COP(op))
  350. return true; /* valid */
  351. else
  352. return false; /* invalid */
  353. }
  354. int __perf_evsel__hw_cache_type_op_res_name(u8 type, u8 op, u8 result,
  355. char *bf, size_t size)
  356. {
  357. if (result) {
  358. return scnprintf(bf, size, "%s-%s-%s", perf_evsel__hw_cache[type][0],
  359. perf_evsel__hw_cache_op[op][0],
  360. perf_evsel__hw_cache_result[result][0]);
  361. }
  362. return scnprintf(bf, size, "%s-%s", perf_evsel__hw_cache[type][0],
  363. perf_evsel__hw_cache_op[op][1]);
  364. }
  365. static int __perf_evsel__hw_cache_name(u64 config, char *bf, size_t size)
  366. {
  367. u8 op, result, type = (config >> 0) & 0xff;
  368. const char *err = "unknown-ext-hardware-cache-type";
  369. if (type > PERF_COUNT_HW_CACHE_MAX)
  370. goto out_err;
  371. op = (config >> 8) & 0xff;
  372. err = "unknown-ext-hardware-cache-op";
  373. if (op > PERF_COUNT_HW_CACHE_OP_MAX)
  374. goto out_err;
  375. result = (config >> 16) & 0xff;
  376. err = "unknown-ext-hardware-cache-result";
  377. if (result > PERF_COUNT_HW_CACHE_RESULT_MAX)
  378. goto out_err;
  379. err = "invalid-cache";
  380. if (!perf_evsel__is_cache_op_valid(type, op))
  381. goto out_err;
  382. return __perf_evsel__hw_cache_type_op_res_name(type, op, result, bf, size);
  383. out_err:
  384. return scnprintf(bf, size, "%s", err);
  385. }
  386. static int perf_evsel__hw_cache_name(struct perf_evsel *evsel, char *bf, size_t size)
  387. {
  388. int ret = __perf_evsel__hw_cache_name(evsel->attr.config, bf, size);
  389. return ret + perf_evsel__add_modifiers(evsel, bf + ret, size - ret);
  390. }
  391. static int perf_evsel__raw_name(struct perf_evsel *evsel, char *bf, size_t size)
  392. {
  393. int ret = scnprintf(bf, size, "raw 0x%" PRIx64, evsel->attr.config);
  394. return ret + perf_evsel__add_modifiers(evsel, bf + ret, size - ret);
  395. }
  396. const char *perf_evsel__name(struct perf_evsel *evsel)
  397. {
  398. char bf[128];
  399. if (evsel->name)
  400. return evsel->name;
  401. switch (evsel->attr.type) {
  402. case PERF_TYPE_RAW:
  403. perf_evsel__raw_name(evsel, bf, sizeof(bf));
  404. break;
  405. case PERF_TYPE_HARDWARE:
  406. perf_evsel__hw_name(evsel, bf, sizeof(bf));
  407. break;
  408. case PERF_TYPE_HW_CACHE:
  409. perf_evsel__hw_cache_name(evsel, bf, sizeof(bf));
  410. break;
  411. case PERF_TYPE_SOFTWARE:
  412. perf_evsel__sw_name(evsel, bf, sizeof(bf));
  413. break;
  414. case PERF_TYPE_TRACEPOINT:
  415. scnprintf(bf, sizeof(bf), "%s", "unknown tracepoint");
  416. break;
  417. case PERF_TYPE_BREAKPOINT:
  418. perf_evsel__bp_name(evsel, bf, sizeof(bf));
  419. break;
  420. default:
  421. scnprintf(bf, sizeof(bf), "unknown attr type: %d",
  422. evsel->attr.type);
  423. break;
  424. }
  425. evsel->name = strdup(bf);
  426. return evsel->name ?: "unknown";
  427. }
  428. const char *perf_evsel__group_name(struct perf_evsel *evsel)
  429. {
  430. return evsel->group_name ?: "anon group";
  431. }
  432. int perf_evsel__group_desc(struct perf_evsel *evsel, char *buf, size_t size)
  433. {
  434. int ret;
  435. struct perf_evsel *pos;
  436. const char *group_name = perf_evsel__group_name(evsel);
  437. ret = scnprintf(buf, size, "%s", group_name);
  438. ret += scnprintf(buf + ret, size - ret, " { %s",
  439. perf_evsel__name(evsel));
  440. for_each_group_member(pos, evsel)
  441. ret += scnprintf(buf + ret, size - ret, ", %s",
  442. perf_evsel__name(pos));
  443. ret += scnprintf(buf + ret, size - ret, " }");
  444. return ret;
  445. }
  446. static void
  447. perf_evsel__config_callgraph(struct perf_evsel *evsel,
  448. struct record_opts *opts,
  449. struct callchain_param *param)
  450. {
  451. bool function = perf_evsel__is_function_event(evsel);
  452. struct perf_event_attr *attr = &evsel->attr;
  453. perf_evsel__set_sample_bit(evsel, CALLCHAIN);
  454. if (param->record_mode == CALLCHAIN_LBR) {
  455. if (!opts->branch_stack) {
  456. if (attr->exclude_user) {
  457. pr_warning("LBR callstack option is only available "
  458. "to get user callchain information. "
  459. "Falling back to framepointers.\n");
  460. } else {
  461. perf_evsel__set_sample_bit(evsel, BRANCH_STACK);
  462. attr->branch_sample_type = PERF_SAMPLE_BRANCH_USER |
  463. PERF_SAMPLE_BRANCH_CALL_STACK;
  464. }
  465. } else
  466. pr_warning("Cannot use LBR callstack with branch stack. "
  467. "Falling back to framepointers.\n");
  468. }
  469. if (param->record_mode == CALLCHAIN_DWARF) {
  470. if (!function) {
  471. perf_evsel__set_sample_bit(evsel, REGS_USER);
  472. perf_evsel__set_sample_bit(evsel, STACK_USER);
  473. attr->sample_regs_user = PERF_REGS_MASK;
  474. attr->sample_stack_user = param->dump_size;
  475. attr->exclude_callchain_user = 1;
  476. } else {
  477. pr_info("Cannot use DWARF unwind for function trace event,"
  478. " falling back to framepointers.\n");
  479. }
  480. }
  481. if (function) {
  482. pr_info("Disabling user space callchains for function trace event.\n");
  483. attr->exclude_callchain_user = 1;
  484. }
  485. }
  486. static void
  487. perf_evsel__reset_callgraph(struct perf_evsel *evsel,
  488. struct callchain_param *param)
  489. {
  490. struct perf_event_attr *attr = &evsel->attr;
  491. perf_evsel__reset_sample_bit(evsel, CALLCHAIN);
  492. if (param->record_mode == CALLCHAIN_LBR) {
  493. perf_evsel__reset_sample_bit(evsel, BRANCH_STACK);
  494. attr->branch_sample_type &= ~(PERF_SAMPLE_BRANCH_USER |
  495. PERF_SAMPLE_BRANCH_CALL_STACK);
  496. }
  497. if (param->record_mode == CALLCHAIN_DWARF) {
  498. perf_evsel__reset_sample_bit(evsel, REGS_USER);
  499. perf_evsel__reset_sample_bit(evsel, STACK_USER);
  500. }
  501. }
  502. static void apply_config_terms(struct perf_evsel *evsel,
  503. struct record_opts *opts)
  504. {
  505. struct perf_evsel_config_term *term;
  506. struct list_head *config_terms = &evsel->config_terms;
  507. struct perf_event_attr *attr = &evsel->attr;
  508. struct callchain_param param;
  509. u32 dump_size = 0;
  510. char *callgraph_buf = NULL;
  511. /* callgraph default */
  512. param.record_mode = callchain_param.record_mode;
  513. list_for_each_entry(term, config_terms, list) {
  514. switch (term->type) {
  515. case PERF_EVSEL__CONFIG_TERM_PERIOD:
  516. attr->sample_period = term->val.period;
  517. attr->freq = 0;
  518. break;
  519. case PERF_EVSEL__CONFIG_TERM_FREQ:
  520. attr->sample_freq = term->val.freq;
  521. attr->freq = 1;
  522. break;
  523. case PERF_EVSEL__CONFIG_TERM_TIME:
  524. if (term->val.time)
  525. perf_evsel__set_sample_bit(evsel, TIME);
  526. else
  527. perf_evsel__reset_sample_bit(evsel, TIME);
  528. break;
  529. case PERF_EVSEL__CONFIG_TERM_CALLGRAPH:
  530. callgraph_buf = term->val.callgraph;
  531. break;
  532. case PERF_EVSEL__CONFIG_TERM_STACK_USER:
  533. dump_size = term->val.stack_user;
  534. break;
  535. default:
  536. break;
  537. }
  538. }
  539. /* User explicitly set per-event callgraph, clear the old setting and reset. */
  540. if ((callgraph_buf != NULL) || (dump_size > 0)) {
  541. /* parse callgraph parameters */
  542. if (callgraph_buf != NULL) {
  543. if (!strcmp(callgraph_buf, "no")) {
  544. param.enabled = false;
  545. param.record_mode = CALLCHAIN_NONE;
  546. } else {
  547. param.enabled = true;
  548. if (parse_callchain_record(callgraph_buf, &param)) {
  549. pr_err("per-event callgraph setting for %s failed. "
  550. "Apply callgraph global setting for it\n",
  551. evsel->name);
  552. return;
  553. }
  554. }
  555. }
  556. if (dump_size > 0) {
  557. dump_size = round_up(dump_size, sizeof(u64));
  558. param.dump_size = dump_size;
  559. }
  560. /* If global callgraph set, clear it */
  561. if (callchain_param.enabled)
  562. perf_evsel__reset_callgraph(evsel, &callchain_param);
  563. /* set perf-event callgraph */
  564. if (param.enabled)
  565. perf_evsel__config_callgraph(evsel, opts, &param);
  566. }
  567. }
  568. /*
  569. * The enable_on_exec/disabled value strategy:
  570. *
  571. * 1) For any type of traced program:
  572. * - all independent events and group leaders are disabled
  573. * - all group members are enabled
  574. *
  575. * Group members are ruled by group leaders. They need to
  576. * be enabled, because the group scheduling relies on that.
  577. *
  578. * 2) For traced programs executed by perf:
  579. * - all independent events and group leaders have
  580. * enable_on_exec set
  581. * - we don't specifically enable or disable any event during
  582. * the record command
  583. *
  584. * Independent events and group leaders are initially disabled
  585. * and get enabled by exec. Group members are ruled by group
  586. * leaders as stated in 1).
  587. *
  588. * 3) For traced programs attached by perf (pid/tid):
  589. * - we specifically enable or disable all events during
  590. * the record command
  591. *
  592. * When attaching events to already running traced we
  593. * enable/disable events specifically, as there's no
  594. * initial traced exec call.
  595. */
  596. void perf_evsel__config(struct perf_evsel *evsel, struct record_opts *opts)
  597. {
  598. struct perf_evsel *leader = evsel->leader;
  599. struct perf_event_attr *attr = &evsel->attr;
  600. int track = evsel->tracking;
  601. bool per_cpu = opts->target.default_per_cpu && !opts->target.per_thread;
  602. attr->sample_id_all = perf_missing_features.sample_id_all ? 0 : 1;
  603. attr->inherit = !opts->no_inherit;
  604. perf_evsel__set_sample_bit(evsel, IP);
  605. perf_evsel__set_sample_bit(evsel, TID);
  606. if (evsel->sample_read) {
  607. perf_evsel__set_sample_bit(evsel, READ);
  608. /*
  609. * We need ID even in case of single event, because
  610. * PERF_SAMPLE_READ process ID specific data.
  611. */
  612. perf_evsel__set_sample_id(evsel, false);
  613. /*
  614. * Apply group format only if we belong to group
  615. * with more than one members.
  616. */
  617. if (leader->nr_members > 1) {
  618. attr->read_format |= PERF_FORMAT_GROUP;
  619. attr->inherit = 0;
  620. }
  621. }
  622. /*
  623. * We default some events to have a default interval. But keep
  624. * it a weak assumption overridable by the user.
  625. */
  626. if (!attr->sample_period || (opts->user_freq != UINT_MAX ||
  627. opts->user_interval != ULLONG_MAX)) {
  628. if (opts->freq) {
  629. perf_evsel__set_sample_bit(evsel, PERIOD);
  630. attr->freq = 1;
  631. attr->sample_freq = opts->freq;
  632. } else {
  633. attr->sample_period = opts->default_interval;
  634. }
  635. }
  636. /*
  637. * Disable sampling for all group members other
  638. * than leader in case leader 'leads' the sampling.
  639. */
  640. if ((leader != evsel) && leader->sample_read) {
  641. attr->sample_freq = 0;
  642. attr->sample_period = 0;
  643. }
  644. if (opts->no_samples)
  645. attr->sample_freq = 0;
  646. if (opts->inherit_stat)
  647. attr->inherit_stat = 1;
  648. if (opts->sample_address) {
  649. perf_evsel__set_sample_bit(evsel, ADDR);
  650. attr->mmap_data = track;
  651. }
  652. /*
  653. * We don't allow user space callchains for function trace
  654. * event, due to issues with page faults while tracing page
  655. * fault handler and its overall trickiness nature.
  656. */
  657. if (perf_evsel__is_function_event(evsel))
  658. evsel->attr.exclude_callchain_user = 1;
  659. if (callchain_param.enabled && !evsel->no_aux_samples)
  660. perf_evsel__config_callgraph(evsel, opts, &callchain_param);
  661. if (opts->sample_intr_regs) {
  662. attr->sample_regs_intr = opts->sample_intr_regs;
  663. perf_evsel__set_sample_bit(evsel, REGS_INTR);
  664. }
  665. if (target__has_cpu(&opts->target))
  666. perf_evsel__set_sample_bit(evsel, CPU);
  667. if (opts->period)
  668. perf_evsel__set_sample_bit(evsel, PERIOD);
  669. /*
  670. * When the user explicitely disabled time don't force it here.
  671. */
  672. if (opts->sample_time &&
  673. (!perf_missing_features.sample_id_all &&
  674. (!opts->no_inherit || target__has_cpu(&opts->target) || per_cpu ||
  675. opts->sample_time_set)))
  676. perf_evsel__set_sample_bit(evsel, TIME);
  677. if (opts->raw_samples && !evsel->no_aux_samples) {
  678. perf_evsel__set_sample_bit(evsel, TIME);
  679. perf_evsel__set_sample_bit(evsel, RAW);
  680. perf_evsel__set_sample_bit(evsel, CPU);
  681. }
  682. if (opts->sample_address)
  683. perf_evsel__set_sample_bit(evsel, DATA_SRC);
  684. if (opts->no_buffering) {
  685. attr->watermark = 0;
  686. attr->wakeup_events = 1;
  687. }
  688. if (opts->branch_stack && !evsel->no_aux_samples) {
  689. perf_evsel__set_sample_bit(evsel, BRANCH_STACK);
  690. attr->branch_sample_type = opts->branch_stack;
  691. }
  692. if (opts->sample_weight)
  693. perf_evsel__set_sample_bit(evsel, WEIGHT);
  694. attr->task = track;
  695. attr->mmap = track;
  696. attr->mmap2 = track && !perf_missing_features.mmap2;
  697. attr->comm = track;
  698. if (opts->record_switch_events)
  699. attr->context_switch = track;
  700. if (opts->sample_transaction)
  701. perf_evsel__set_sample_bit(evsel, TRANSACTION);
  702. if (opts->running_time) {
  703. evsel->attr.read_format |=
  704. PERF_FORMAT_TOTAL_TIME_ENABLED |
  705. PERF_FORMAT_TOTAL_TIME_RUNNING;
  706. }
  707. /*
  708. * XXX see the function comment above
  709. *
  710. * Disabling only independent events or group leaders,
  711. * keeping group members enabled.
  712. */
  713. if (perf_evsel__is_group_leader(evsel))
  714. attr->disabled = 1;
  715. /*
  716. * Setting enable_on_exec for independent events and
  717. * group leaders for traced executed by perf.
  718. */
  719. if (target__none(&opts->target) && perf_evsel__is_group_leader(evsel) &&
  720. !opts->initial_delay)
  721. attr->enable_on_exec = 1;
  722. if (evsel->immediate) {
  723. attr->disabled = 0;
  724. attr->enable_on_exec = 0;
  725. }
  726. clockid = opts->clockid;
  727. if (opts->use_clockid) {
  728. attr->use_clockid = 1;
  729. attr->clockid = opts->clockid;
  730. }
  731. /*
  732. * Apply event specific term settings,
  733. * it overloads any global configuration.
  734. */
  735. apply_config_terms(evsel, opts);
  736. }
  737. static int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads)
  738. {
  739. int cpu, thread;
  740. if (evsel->system_wide)
  741. nthreads = 1;
  742. evsel->fd = xyarray__new(ncpus, nthreads, sizeof(int));
  743. if (evsel->fd) {
  744. for (cpu = 0; cpu < ncpus; cpu++) {
  745. for (thread = 0; thread < nthreads; thread++) {
  746. FD(evsel, cpu, thread) = -1;
  747. }
  748. }
  749. }
  750. return evsel->fd != NULL ? 0 : -ENOMEM;
  751. }
  752. static int perf_evsel__run_ioctl(struct perf_evsel *evsel, int ncpus, int nthreads,
  753. int ioc, void *arg)
  754. {
  755. int cpu, thread;
  756. if (evsel->system_wide)
  757. nthreads = 1;
  758. for (cpu = 0; cpu < ncpus; cpu++) {
  759. for (thread = 0; thread < nthreads; thread++) {
  760. int fd = FD(evsel, cpu, thread),
  761. err = ioctl(fd, ioc, arg);
  762. if (err)
  763. return err;
  764. }
  765. }
  766. return 0;
  767. }
  768. int perf_evsel__apply_filter(struct perf_evsel *evsel, int ncpus, int nthreads,
  769. const char *filter)
  770. {
  771. return perf_evsel__run_ioctl(evsel, ncpus, nthreads,
  772. PERF_EVENT_IOC_SET_FILTER,
  773. (void *)filter);
  774. }
  775. int perf_evsel__set_filter(struct perf_evsel *evsel, const char *filter)
  776. {
  777. char *new_filter = strdup(filter);
  778. if (new_filter != NULL) {
  779. free(evsel->filter);
  780. evsel->filter = new_filter;
  781. return 0;
  782. }
  783. return -1;
  784. }
  785. int perf_evsel__append_filter(struct perf_evsel *evsel,
  786. const char *op, const char *filter)
  787. {
  788. char *new_filter;
  789. if (evsel->filter == NULL)
  790. return perf_evsel__set_filter(evsel, filter);
  791. if (asprintf(&new_filter,"(%s) %s (%s)", evsel->filter, op, filter) > 0) {
  792. free(evsel->filter);
  793. evsel->filter = new_filter;
  794. return 0;
  795. }
  796. return -1;
  797. }
  798. int perf_evsel__enable(struct perf_evsel *evsel, int ncpus, int nthreads)
  799. {
  800. return perf_evsel__run_ioctl(evsel, ncpus, nthreads,
  801. PERF_EVENT_IOC_ENABLE,
  802. 0);
  803. }
  804. int perf_evsel__alloc_id(struct perf_evsel *evsel, int ncpus, int nthreads)
  805. {
  806. if (ncpus == 0 || nthreads == 0)
  807. return 0;
  808. if (evsel->system_wide)
  809. nthreads = 1;
  810. evsel->sample_id = xyarray__new(ncpus, nthreads, sizeof(struct perf_sample_id));
  811. if (evsel->sample_id == NULL)
  812. return -ENOMEM;
  813. evsel->id = zalloc(ncpus * nthreads * sizeof(u64));
  814. if (evsel->id == NULL) {
  815. xyarray__delete(evsel->sample_id);
  816. evsel->sample_id = NULL;
  817. return -ENOMEM;
  818. }
  819. return 0;
  820. }
  821. static void perf_evsel__free_fd(struct perf_evsel *evsel)
  822. {
  823. xyarray__delete(evsel->fd);
  824. evsel->fd = NULL;
  825. }
  826. static void perf_evsel__free_id(struct perf_evsel *evsel)
  827. {
  828. xyarray__delete(evsel->sample_id);
  829. evsel->sample_id = NULL;
  830. zfree(&evsel->id);
  831. }
  832. static void perf_evsel__free_config_terms(struct perf_evsel *evsel)
  833. {
  834. struct perf_evsel_config_term *term, *h;
  835. list_for_each_entry_safe(term, h, &evsel->config_terms, list) {
  836. list_del(&term->list);
  837. free(term);
  838. }
  839. }
  840. void perf_evsel__close_fd(struct perf_evsel *evsel, int ncpus, int nthreads)
  841. {
  842. int cpu, thread;
  843. if (evsel->system_wide)
  844. nthreads = 1;
  845. for (cpu = 0; cpu < ncpus; cpu++)
  846. for (thread = 0; thread < nthreads; ++thread) {
  847. close(FD(evsel, cpu, thread));
  848. FD(evsel, cpu, thread) = -1;
  849. }
  850. }
  851. void perf_evsel__exit(struct perf_evsel *evsel)
  852. {
  853. assert(list_empty(&evsel->node));
  854. assert(evsel->evlist == NULL);
  855. perf_evsel__free_fd(evsel);
  856. perf_evsel__free_id(evsel);
  857. perf_evsel__free_config_terms(evsel);
  858. close_cgroup(evsel->cgrp);
  859. cpu_map__put(evsel->cpus);
  860. cpu_map__put(evsel->own_cpus);
  861. thread_map__put(evsel->threads);
  862. zfree(&evsel->group_name);
  863. zfree(&evsel->name);
  864. perf_evsel__object.fini(evsel);
  865. }
  866. void perf_evsel__delete(struct perf_evsel *evsel)
  867. {
  868. perf_evsel__exit(evsel);
  869. free(evsel);
  870. }
  871. void perf_evsel__compute_deltas(struct perf_evsel *evsel, int cpu, int thread,
  872. struct perf_counts_values *count)
  873. {
  874. struct perf_counts_values tmp;
  875. if (!evsel->prev_raw_counts)
  876. return;
  877. if (cpu == -1) {
  878. tmp = evsel->prev_raw_counts->aggr;
  879. evsel->prev_raw_counts->aggr = *count;
  880. } else {
  881. tmp = *perf_counts(evsel->prev_raw_counts, cpu, thread);
  882. *perf_counts(evsel->prev_raw_counts, cpu, thread) = *count;
  883. }
  884. count->val = count->val - tmp.val;
  885. count->ena = count->ena - tmp.ena;
  886. count->run = count->run - tmp.run;
  887. }
  888. void perf_counts_values__scale(struct perf_counts_values *count,
  889. bool scale, s8 *pscaled)
  890. {
  891. s8 scaled = 0;
  892. if (scale) {
  893. if (count->run == 0) {
  894. scaled = -1;
  895. count->val = 0;
  896. } else if (count->run < count->ena) {
  897. scaled = 1;
  898. count->val = (u64)((double) count->val * count->ena / count->run + 0.5);
  899. }
  900. } else
  901. count->ena = count->run = 0;
  902. if (pscaled)
  903. *pscaled = scaled;
  904. }
  905. int perf_evsel__read(struct perf_evsel *evsel, int cpu, int thread,
  906. struct perf_counts_values *count)
  907. {
  908. memset(count, 0, sizeof(*count));
  909. if (FD(evsel, cpu, thread) < 0)
  910. return -EINVAL;
  911. if (readn(FD(evsel, cpu, thread), count, sizeof(*count)) < 0)
  912. return -errno;
  913. return 0;
  914. }
  915. int __perf_evsel__read_on_cpu(struct perf_evsel *evsel,
  916. int cpu, int thread, bool scale)
  917. {
  918. struct perf_counts_values count;
  919. size_t nv = scale ? 3 : 1;
  920. if (FD(evsel, cpu, thread) < 0)
  921. return -EINVAL;
  922. if (evsel->counts == NULL && perf_evsel__alloc_counts(evsel, cpu + 1, thread + 1) < 0)
  923. return -ENOMEM;
  924. if (readn(FD(evsel, cpu, thread), &count, nv * sizeof(u64)) < 0)
  925. return -errno;
  926. perf_evsel__compute_deltas(evsel, cpu, thread, &count);
  927. perf_counts_values__scale(&count, scale, NULL);
  928. *perf_counts(evsel->counts, cpu, thread) = count;
  929. return 0;
  930. }
  931. static int get_group_fd(struct perf_evsel *evsel, int cpu, int thread)
  932. {
  933. struct perf_evsel *leader = evsel->leader;
  934. int fd;
  935. if (perf_evsel__is_group_leader(evsel))
  936. return -1;
  937. /*
  938. * Leader must be already processed/open,
  939. * if not it's a bug.
  940. */
  941. BUG_ON(!leader->fd);
  942. fd = FD(leader, cpu, thread);
  943. BUG_ON(fd == -1);
  944. return fd;
  945. }
  946. struct bit_names {
  947. int bit;
  948. const char *name;
  949. };
  950. static void __p_bits(char *buf, size_t size, u64 value, struct bit_names *bits)
  951. {
  952. bool first_bit = true;
  953. int i = 0;
  954. do {
  955. if (value & bits[i].bit) {
  956. buf += scnprintf(buf, size, "%s%s", first_bit ? "" : "|", bits[i].name);
  957. first_bit = false;
  958. }
  959. } while (bits[++i].name != NULL);
  960. }
  961. static void __p_sample_type(char *buf, size_t size, u64 value)
  962. {
  963. #define bit_name(n) { PERF_SAMPLE_##n, #n }
  964. struct bit_names bits[] = {
  965. bit_name(IP), bit_name(TID), bit_name(TIME), bit_name(ADDR),
  966. bit_name(READ), bit_name(CALLCHAIN), bit_name(ID), bit_name(CPU),
  967. bit_name(PERIOD), bit_name(STREAM_ID), bit_name(RAW),
  968. bit_name(BRANCH_STACK), bit_name(REGS_USER), bit_name(STACK_USER),
  969. bit_name(IDENTIFIER), bit_name(REGS_INTR),
  970. { .name = NULL, }
  971. };
  972. #undef bit_name
  973. __p_bits(buf, size, value, bits);
  974. }
  975. static void __p_read_format(char *buf, size_t size, u64 value)
  976. {
  977. #define bit_name(n) { PERF_FORMAT_##n, #n }
  978. struct bit_names bits[] = {
  979. bit_name(TOTAL_TIME_ENABLED), bit_name(TOTAL_TIME_RUNNING),
  980. bit_name(ID), bit_name(GROUP),
  981. { .name = NULL, }
  982. };
  983. #undef bit_name
  984. __p_bits(buf, size, value, bits);
  985. }
  986. #define BUF_SIZE 1024
  987. #define p_hex(val) snprintf(buf, BUF_SIZE, "%#"PRIx64, (uint64_t)(val))
  988. #define p_unsigned(val) snprintf(buf, BUF_SIZE, "%"PRIu64, (uint64_t)(val))
  989. #define p_signed(val) snprintf(buf, BUF_SIZE, "%"PRId64, (int64_t)(val))
  990. #define p_sample_type(val) __p_sample_type(buf, BUF_SIZE, val)
  991. #define p_read_format(val) __p_read_format(buf, BUF_SIZE, val)
  992. #define PRINT_ATTRn(_n, _f, _p) \
  993. do { \
  994. if (attr->_f) { \
  995. _p(attr->_f); \
  996. ret += attr__fprintf(fp, _n, buf, priv);\
  997. } \
  998. } while (0)
  999. #define PRINT_ATTRf(_f, _p) PRINT_ATTRn(#_f, _f, _p)
  1000. int perf_event_attr__fprintf(FILE *fp, struct perf_event_attr *attr,
  1001. attr__fprintf_f attr__fprintf, void *priv)
  1002. {
  1003. char buf[BUF_SIZE];
  1004. int ret = 0;
  1005. PRINT_ATTRf(type, p_unsigned);
  1006. PRINT_ATTRf(size, p_unsigned);
  1007. PRINT_ATTRf(config, p_hex);
  1008. PRINT_ATTRn("{ sample_period, sample_freq }", sample_period, p_unsigned);
  1009. PRINT_ATTRf(sample_type, p_sample_type);
  1010. PRINT_ATTRf(read_format, p_read_format);
  1011. PRINT_ATTRf(disabled, p_unsigned);
  1012. PRINT_ATTRf(inherit, p_unsigned);
  1013. PRINT_ATTRf(pinned, p_unsigned);
  1014. PRINT_ATTRf(exclusive, p_unsigned);
  1015. PRINT_ATTRf(exclude_user, p_unsigned);
  1016. PRINT_ATTRf(exclude_kernel, p_unsigned);
  1017. PRINT_ATTRf(exclude_hv, p_unsigned);
  1018. PRINT_ATTRf(exclude_idle, p_unsigned);
  1019. PRINT_ATTRf(mmap, p_unsigned);
  1020. PRINT_ATTRf(comm, p_unsigned);
  1021. PRINT_ATTRf(freq, p_unsigned);
  1022. PRINT_ATTRf(inherit_stat, p_unsigned);
  1023. PRINT_ATTRf(enable_on_exec, p_unsigned);
  1024. PRINT_ATTRf(task, p_unsigned);
  1025. PRINT_ATTRf(watermark, p_unsigned);
  1026. PRINT_ATTRf(precise_ip, p_unsigned);
  1027. PRINT_ATTRf(mmap_data, p_unsigned);
  1028. PRINT_ATTRf(sample_id_all, p_unsigned);
  1029. PRINT_ATTRf(exclude_host, p_unsigned);
  1030. PRINT_ATTRf(exclude_guest, p_unsigned);
  1031. PRINT_ATTRf(exclude_callchain_kernel, p_unsigned);
  1032. PRINT_ATTRf(exclude_callchain_user, p_unsigned);
  1033. PRINT_ATTRf(mmap2, p_unsigned);
  1034. PRINT_ATTRf(comm_exec, p_unsigned);
  1035. PRINT_ATTRf(use_clockid, p_unsigned);
  1036. PRINT_ATTRf(context_switch, p_unsigned);
  1037. PRINT_ATTRn("{ wakeup_events, wakeup_watermark }", wakeup_events, p_unsigned);
  1038. PRINT_ATTRf(bp_type, p_unsigned);
  1039. PRINT_ATTRn("{ bp_addr, config1 }", bp_addr, p_hex);
  1040. PRINT_ATTRn("{ bp_len, config2 }", bp_len, p_hex);
  1041. PRINT_ATTRf(sample_regs_user, p_hex);
  1042. PRINT_ATTRf(sample_stack_user, p_unsigned);
  1043. PRINT_ATTRf(clockid, p_signed);
  1044. PRINT_ATTRf(sample_regs_intr, p_hex);
  1045. PRINT_ATTRf(aux_watermark, p_unsigned);
  1046. return ret;
  1047. }
  1048. static int __open_attr__fprintf(FILE *fp, const char *name, const char *val,
  1049. void *priv __attribute__((unused)))
  1050. {
  1051. return fprintf(fp, " %-32s %s\n", name, val);
  1052. }
  1053. static int __perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus,
  1054. struct thread_map *threads)
  1055. {
  1056. int cpu, thread, nthreads;
  1057. unsigned long flags = PERF_FLAG_FD_CLOEXEC;
  1058. int pid = -1, err;
  1059. enum { NO_CHANGE, SET_TO_MAX, INCREASED_MAX } set_rlimit = NO_CHANGE;
  1060. if (evsel->system_wide)
  1061. nthreads = 1;
  1062. else
  1063. nthreads = threads->nr;
  1064. if (evsel->fd == NULL &&
  1065. perf_evsel__alloc_fd(evsel, cpus->nr, nthreads) < 0)
  1066. return -ENOMEM;
  1067. if (evsel->cgrp) {
  1068. flags |= PERF_FLAG_PID_CGROUP;
  1069. pid = evsel->cgrp->fd;
  1070. }
  1071. fallback_missing_features:
  1072. if (perf_missing_features.clockid_wrong)
  1073. evsel->attr.clockid = CLOCK_MONOTONIC; /* should always work */
  1074. if (perf_missing_features.clockid) {
  1075. evsel->attr.use_clockid = 0;
  1076. evsel->attr.clockid = 0;
  1077. }
  1078. if (perf_missing_features.cloexec)
  1079. flags &= ~(unsigned long)PERF_FLAG_FD_CLOEXEC;
  1080. if (perf_missing_features.mmap2)
  1081. evsel->attr.mmap2 = 0;
  1082. if (perf_missing_features.exclude_guest)
  1083. evsel->attr.exclude_guest = evsel->attr.exclude_host = 0;
  1084. retry_sample_id:
  1085. if (perf_missing_features.sample_id_all)
  1086. evsel->attr.sample_id_all = 0;
  1087. if (verbose >= 2) {
  1088. fprintf(stderr, "%.60s\n", graph_dotted_line);
  1089. fprintf(stderr, "perf_event_attr:\n");
  1090. perf_event_attr__fprintf(stderr, &evsel->attr, __open_attr__fprintf, NULL);
  1091. fprintf(stderr, "%.60s\n", graph_dotted_line);
  1092. }
  1093. for (cpu = 0; cpu < cpus->nr; cpu++) {
  1094. for (thread = 0; thread < nthreads; thread++) {
  1095. int group_fd;
  1096. if (!evsel->cgrp && !evsel->system_wide)
  1097. pid = thread_map__pid(threads, thread);
  1098. group_fd = get_group_fd(evsel, cpu, thread);
  1099. retry_open:
  1100. pr_debug2("sys_perf_event_open: pid %d cpu %d group_fd %d flags %#lx\n",
  1101. pid, cpus->map[cpu], group_fd, flags);
  1102. FD(evsel, cpu, thread) = sys_perf_event_open(&evsel->attr,
  1103. pid,
  1104. cpus->map[cpu],
  1105. group_fd, flags);
  1106. if (FD(evsel, cpu, thread) < 0) {
  1107. err = -errno;
  1108. pr_debug2("sys_perf_event_open failed, error %d\n",
  1109. err);
  1110. goto try_fallback;
  1111. }
  1112. set_rlimit = NO_CHANGE;
  1113. /*
  1114. * If we succeeded but had to kill clockid, fail and
  1115. * have perf_evsel__open_strerror() print us a nice
  1116. * error.
  1117. */
  1118. if (perf_missing_features.clockid ||
  1119. perf_missing_features.clockid_wrong) {
  1120. err = -EINVAL;
  1121. goto out_close;
  1122. }
  1123. }
  1124. }
  1125. return 0;
  1126. try_fallback:
  1127. /*
  1128. * perf stat needs between 5 and 22 fds per CPU. When we run out
  1129. * of them try to increase the limits.
  1130. */
  1131. if (err == -EMFILE && set_rlimit < INCREASED_MAX) {
  1132. struct rlimit l;
  1133. int old_errno = errno;
  1134. if (getrlimit(RLIMIT_NOFILE, &l) == 0) {
  1135. if (set_rlimit == NO_CHANGE)
  1136. l.rlim_cur = l.rlim_max;
  1137. else {
  1138. l.rlim_cur = l.rlim_max + 1000;
  1139. l.rlim_max = l.rlim_cur;
  1140. }
  1141. if (setrlimit(RLIMIT_NOFILE, &l) == 0) {
  1142. set_rlimit++;
  1143. errno = old_errno;
  1144. goto retry_open;
  1145. }
  1146. }
  1147. errno = old_errno;
  1148. }
  1149. if (err != -EINVAL || cpu > 0 || thread > 0)
  1150. goto out_close;
  1151. /*
  1152. * Must probe features in the order they were added to the
  1153. * perf_event_attr interface.
  1154. */
  1155. if (!perf_missing_features.clockid_wrong && evsel->attr.use_clockid) {
  1156. perf_missing_features.clockid_wrong = true;
  1157. goto fallback_missing_features;
  1158. } else if (!perf_missing_features.clockid && evsel->attr.use_clockid) {
  1159. perf_missing_features.clockid = true;
  1160. goto fallback_missing_features;
  1161. } else if (!perf_missing_features.cloexec && (flags & PERF_FLAG_FD_CLOEXEC)) {
  1162. perf_missing_features.cloexec = true;
  1163. goto fallback_missing_features;
  1164. } else if (!perf_missing_features.mmap2 && evsel->attr.mmap2) {
  1165. perf_missing_features.mmap2 = true;
  1166. goto fallback_missing_features;
  1167. } else if (!perf_missing_features.exclude_guest &&
  1168. (evsel->attr.exclude_guest || evsel->attr.exclude_host)) {
  1169. perf_missing_features.exclude_guest = true;
  1170. goto fallback_missing_features;
  1171. } else if (!perf_missing_features.sample_id_all) {
  1172. perf_missing_features.sample_id_all = true;
  1173. goto retry_sample_id;
  1174. }
  1175. out_close:
  1176. do {
  1177. while (--thread >= 0) {
  1178. close(FD(evsel, cpu, thread));
  1179. FD(evsel, cpu, thread) = -1;
  1180. }
  1181. thread = nthreads;
  1182. } while (--cpu >= 0);
  1183. return err;
  1184. }
  1185. void perf_evsel__close(struct perf_evsel *evsel, int ncpus, int nthreads)
  1186. {
  1187. if (evsel->fd == NULL)
  1188. return;
  1189. perf_evsel__close_fd(evsel, ncpus, nthreads);
  1190. perf_evsel__free_fd(evsel);
  1191. }
  1192. static struct {
  1193. struct cpu_map map;
  1194. int cpus[1];
  1195. } empty_cpu_map = {
  1196. .map.nr = 1,
  1197. .cpus = { -1, },
  1198. };
  1199. static struct {
  1200. struct thread_map map;
  1201. int threads[1];
  1202. } empty_thread_map = {
  1203. .map.nr = 1,
  1204. .threads = { -1, },
  1205. };
  1206. int perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus,
  1207. struct thread_map *threads)
  1208. {
  1209. if (cpus == NULL) {
  1210. /* Work around old compiler warnings about strict aliasing */
  1211. cpus = &empty_cpu_map.map;
  1212. }
  1213. if (threads == NULL)
  1214. threads = &empty_thread_map.map;
  1215. return __perf_evsel__open(evsel, cpus, threads);
  1216. }
  1217. int perf_evsel__open_per_cpu(struct perf_evsel *evsel,
  1218. struct cpu_map *cpus)
  1219. {
  1220. return __perf_evsel__open(evsel, cpus, &empty_thread_map.map);
  1221. }
  1222. int perf_evsel__open_per_thread(struct perf_evsel *evsel,
  1223. struct thread_map *threads)
  1224. {
  1225. return __perf_evsel__open(evsel, &empty_cpu_map.map, threads);
  1226. }
  1227. static int perf_evsel__parse_id_sample(const struct perf_evsel *evsel,
  1228. const union perf_event *event,
  1229. struct perf_sample *sample)
  1230. {
  1231. u64 type = evsel->attr.sample_type;
  1232. const u64 *array = event->sample.array;
  1233. bool swapped = evsel->needs_swap;
  1234. union u64_swap u;
  1235. array += ((event->header.size -
  1236. sizeof(event->header)) / sizeof(u64)) - 1;
  1237. if (type & PERF_SAMPLE_IDENTIFIER) {
  1238. sample->id = *array;
  1239. array--;
  1240. }
  1241. if (type & PERF_SAMPLE_CPU) {
  1242. u.val64 = *array;
  1243. if (swapped) {
  1244. /* undo swap of u64, then swap on individual u32s */
  1245. u.val64 = bswap_64(u.val64);
  1246. u.val32[0] = bswap_32(u.val32[0]);
  1247. }
  1248. sample->cpu = u.val32[0];
  1249. array--;
  1250. }
  1251. if (type & PERF_SAMPLE_STREAM_ID) {
  1252. sample->stream_id = *array;
  1253. array--;
  1254. }
  1255. if (type & PERF_SAMPLE_ID) {
  1256. sample->id = *array;
  1257. array--;
  1258. }
  1259. if (type & PERF_SAMPLE_TIME) {
  1260. sample->time = *array;
  1261. array--;
  1262. }
  1263. if (type & PERF_SAMPLE_TID) {
  1264. u.val64 = *array;
  1265. if (swapped) {
  1266. /* undo swap of u64, then swap on individual u32s */
  1267. u.val64 = bswap_64(u.val64);
  1268. u.val32[0] = bswap_32(u.val32[0]);
  1269. u.val32[1] = bswap_32(u.val32[1]);
  1270. }
  1271. sample->pid = u.val32[0];
  1272. sample->tid = u.val32[1];
  1273. array--;
  1274. }
  1275. return 0;
  1276. }
  1277. static inline bool overflow(const void *endp, u16 max_size, const void *offset,
  1278. u64 size)
  1279. {
  1280. return size > max_size || offset + size > endp;
  1281. }
  1282. #define OVERFLOW_CHECK(offset, size, max_size) \
  1283. do { \
  1284. if (overflow(endp, (max_size), (offset), (size))) \
  1285. return -EFAULT; \
  1286. } while (0)
  1287. #define OVERFLOW_CHECK_u64(offset) \
  1288. OVERFLOW_CHECK(offset, sizeof(u64), sizeof(u64))
  1289. int perf_evsel__parse_sample(struct perf_evsel *evsel, union perf_event *event,
  1290. struct perf_sample *data)
  1291. {
  1292. u64 type = evsel->attr.sample_type;
  1293. bool swapped = evsel->needs_swap;
  1294. const u64 *array;
  1295. u16 max_size = event->header.size;
  1296. const void *endp = (void *)event + max_size;
  1297. u64 sz;
  1298. /*
  1299. * used for cross-endian analysis. See git commit 65014ab3
  1300. * for why this goofiness is needed.
  1301. */
  1302. union u64_swap u;
  1303. memset(data, 0, sizeof(*data));
  1304. data->cpu = data->pid = data->tid = -1;
  1305. data->stream_id = data->id = data->time = -1ULL;
  1306. data->period = evsel->attr.sample_period;
  1307. data->weight = 0;
  1308. if (event->header.type != PERF_RECORD_SAMPLE) {
  1309. if (!evsel->attr.sample_id_all)
  1310. return 0;
  1311. return perf_evsel__parse_id_sample(evsel, event, data);
  1312. }
  1313. array = event->sample.array;
  1314. /*
  1315. * The evsel's sample_size is based on PERF_SAMPLE_MASK which includes
  1316. * up to PERF_SAMPLE_PERIOD. After that overflow() must be used to
  1317. * check the format does not go past the end of the event.
  1318. */
  1319. if (evsel->sample_size + sizeof(event->header) > event->header.size)
  1320. return -EFAULT;
  1321. data->id = -1ULL;
  1322. if (type & PERF_SAMPLE_IDENTIFIER) {
  1323. data->id = *array;
  1324. array++;
  1325. }
  1326. if (type & PERF_SAMPLE_IP) {
  1327. data->ip = *array;
  1328. array++;
  1329. }
  1330. if (type & PERF_SAMPLE_TID) {
  1331. u.val64 = *array;
  1332. if (swapped) {
  1333. /* undo swap of u64, then swap on individual u32s */
  1334. u.val64 = bswap_64(u.val64);
  1335. u.val32[0] = bswap_32(u.val32[0]);
  1336. u.val32[1] = bswap_32(u.val32[1]);
  1337. }
  1338. data->pid = u.val32[0];
  1339. data->tid = u.val32[1];
  1340. array++;
  1341. }
  1342. if (type & PERF_SAMPLE_TIME) {
  1343. data->time = *array;
  1344. array++;
  1345. }
  1346. data->addr = 0;
  1347. if (type & PERF_SAMPLE_ADDR) {
  1348. data->addr = *array;
  1349. array++;
  1350. }
  1351. if (type & PERF_SAMPLE_ID) {
  1352. data->id = *array;
  1353. array++;
  1354. }
  1355. if (type & PERF_SAMPLE_STREAM_ID) {
  1356. data->stream_id = *array;
  1357. array++;
  1358. }
  1359. if (type & PERF_SAMPLE_CPU) {
  1360. u.val64 = *array;
  1361. if (swapped) {
  1362. /* undo swap of u64, then swap on individual u32s */
  1363. u.val64 = bswap_64(u.val64);
  1364. u.val32[0] = bswap_32(u.val32[0]);
  1365. }
  1366. data->cpu = u.val32[0];
  1367. array++;
  1368. }
  1369. if (type & PERF_SAMPLE_PERIOD) {
  1370. data->period = *array;
  1371. array++;
  1372. }
  1373. if (type & PERF_SAMPLE_READ) {
  1374. u64 read_format = evsel->attr.read_format;
  1375. OVERFLOW_CHECK_u64(array);
  1376. if (read_format & PERF_FORMAT_GROUP)
  1377. data->read.group.nr = *array;
  1378. else
  1379. data->read.one.value = *array;
  1380. array++;
  1381. if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
  1382. OVERFLOW_CHECK_u64(array);
  1383. data->read.time_enabled = *array;
  1384. array++;
  1385. }
  1386. if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
  1387. OVERFLOW_CHECK_u64(array);
  1388. data->read.time_running = *array;
  1389. array++;
  1390. }
  1391. /* PERF_FORMAT_ID is forced for PERF_SAMPLE_READ */
  1392. if (read_format & PERF_FORMAT_GROUP) {
  1393. const u64 max_group_nr = UINT64_MAX /
  1394. sizeof(struct sample_read_value);
  1395. if (data->read.group.nr > max_group_nr)
  1396. return -EFAULT;
  1397. sz = data->read.group.nr *
  1398. sizeof(struct sample_read_value);
  1399. OVERFLOW_CHECK(array, sz, max_size);
  1400. data->read.group.values =
  1401. (struct sample_read_value *)array;
  1402. array = (void *)array + sz;
  1403. } else {
  1404. OVERFLOW_CHECK_u64(array);
  1405. data->read.one.id = *array;
  1406. array++;
  1407. }
  1408. }
  1409. if (type & PERF_SAMPLE_CALLCHAIN) {
  1410. const u64 max_callchain_nr = UINT64_MAX / sizeof(u64);
  1411. OVERFLOW_CHECK_u64(array);
  1412. data->callchain = (struct ip_callchain *)array++;
  1413. if (data->callchain->nr > max_callchain_nr)
  1414. return -EFAULT;
  1415. sz = data->callchain->nr * sizeof(u64);
  1416. OVERFLOW_CHECK(array, sz, max_size);
  1417. array = (void *)array + sz;
  1418. }
  1419. if (type & PERF_SAMPLE_RAW) {
  1420. OVERFLOW_CHECK_u64(array);
  1421. u.val64 = *array;
  1422. if (WARN_ONCE(swapped,
  1423. "Endianness of raw data not corrected!\n")) {
  1424. /* undo swap of u64, then swap on individual u32s */
  1425. u.val64 = bswap_64(u.val64);
  1426. u.val32[0] = bswap_32(u.val32[0]);
  1427. u.val32[1] = bswap_32(u.val32[1]);
  1428. }
  1429. data->raw_size = u.val32[0];
  1430. array = (void *)array + sizeof(u32);
  1431. OVERFLOW_CHECK(array, data->raw_size, max_size);
  1432. data->raw_data = (void *)array;
  1433. array = (void *)array + data->raw_size;
  1434. }
  1435. if (type & PERF_SAMPLE_BRANCH_STACK) {
  1436. const u64 max_branch_nr = UINT64_MAX /
  1437. sizeof(struct branch_entry);
  1438. OVERFLOW_CHECK_u64(array);
  1439. data->branch_stack = (struct branch_stack *)array++;
  1440. if (data->branch_stack->nr > max_branch_nr)
  1441. return -EFAULT;
  1442. sz = data->branch_stack->nr * sizeof(struct branch_entry);
  1443. OVERFLOW_CHECK(array, sz, max_size);
  1444. array = (void *)array + sz;
  1445. }
  1446. if (type & PERF_SAMPLE_REGS_USER) {
  1447. OVERFLOW_CHECK_u64(array);
  1448. data->user_regs.abi = *array;
  1449. array++;
  1450. if (data->user_regs.abi) {
  1451. u64 mask = evsel->attr.sample_regs_user;
  1452. sz = hweight_long(mask) * sizeof(u64);
  1453. OVERFLOW_CHECK(array, sz, max_size);
  1454. data->user_regs.mask = mask;
  1455. data->user_regs.regs = (u64 *)array;
  1456. array = (void *)array + sz;
  1457. }
  1458. }
  1459. if (type & PERF_SAMPLE_STACK_USER) {
  1460. OVERFLOW_CHECK_u64(array);
  1461. sz = *array++;
  1462. data->user_stack.offset = ((char *)(array - 1)
  1463. - (char *) event);
  1464. if (!sz) {
  1465. data->user_stack.size = 0;
  1466. } else {
  1467. OVERFLOW_CHECK(array, sz, max_size);
  1468. data->user_stack.data = (char *)array;
  1469. array = (void *)array + sz;
  1470. OVERFLOW_CHECK_u64(array);
  1471. data->user_stack.size = *array++;
  1472. if (WARN_ONCE(data->user_stack.size > sz,
  1473. "user stack dump failure\n"))
  1474. return -EFAULT;
  1475. }
  1476. }
  1477. data->weight = 0;
  1478. if (type & PERF_SAMPLE_WEIGHT) {
  1479. OVERFLOW_CHECK_u64(array);
  1480. data->weight = *array;
  1481. array++;
  1482. }
  1483. data->data_src = PERF_MEM_DATA_SRC_NONE;
  1484. if (type & PERF_SAMPLE_DATA_SRC) {
  1485. OVERFLOW_CHECK_u64(array);
  1486. data->data_src = *array;
  1487. array++;
  1488. }
  1489. data->transaction = 0;
  1490. if (type & PERF_SAMPLE_TRANSACTION) {
  1491. OVERFLOW_CHECK_u64(array);
  1492. data->transaction = *array;
  1493. array++;
  1494. }
  1495. data->intr_regs.abi = PERF_SAMPLE_REGS_ABI_NONE;
  1496. if (type & PERF_SAMPLE_REGS_INTR) {
  1497. OVERFLOW_CHECK_u64(array);
  1498. data->intr_regs.abi = *array;
  1499. array++;
  1500. if (data->intr_regs.abi != PERF_SAMPLE_REGS_ABI_NONE) {
  1501. u64 mask = evsel->attr.sample_regs_intr;
  1502. sz = hweight_long(mask) * sizeof(u64);
  1503. OVERFLOW_CHECK(array, sz, max_size);
  1504. data->intr_regs.mask = mask;
  1505. data->intr_regs.regs = (u64 *)array;
  1506. array = (void *)array + sz;
  1507. }
  1508. }
  1509. return 0;
  1510. }
  1511. size_t perf_event__sample_event_size(const struct perf_sample *sample, u64 type,
  1512. u64 read_format)
  1513. {
  1514. size_t sz, result = sizeof(struct sample_event);
  1515. if (type & PERF_SAMPLE_IDENTIFIER)
  1516. result += sizeof(u64);
  1517. if (type & PERF_SAMPLE_IP)
  1518. result += sizeof(u64);
  1519. if (type & PERF_SAMPLE_TID)
  1520. result += sizeof(u64);
  1521. if (type & PERF_SAMPLE_TIME)
  1522. result += sizeof(u64);
  1523. if (type & PERF_SAMPLE_ADDR)
  1524. result += sizeof(u64);
  1525. if (type & PERF_SAMPLE_ID)
  1526. result += sizeof(u64);
  1527. if (type & PERF_SAMPLE_STREAM_ID)
  1528. result += sizeof(u64);
  1529. if (type & PERF_SAMPLE_CPU)
  1530. result += sizeof(u64);
  1531. if (type & PERF_SAMPLE_PERIOD)
  1532. result += sizeof(u64);
  1533. if (type & PERF_SAMPLE_READ) {
  1534. result += sizeof(u64);
  1535. if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
  1536. result += sizeof(u64);
  1537. if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
  1538. result += sizeof(u64);
  1539. /* PERF_FORMAT_ID is forced for PERF_SAMPLE_READ */
  1540. if (read_format & PERF_FORMAT_GROUP) {
  1541. sz = sample->read.group.nr *
  1542. sizeof(struct sample_read_value);
  1543. result += sz;
  1544. } else {
  1545. result += sizeof(u64);
  1546. }
  1547. }
  1548. if (type & PERF_SAMPLE_CALLCHAIN) {
  1549. sz = (sample->callchain->nr + 1) * sizeof(u64);
  1550. result += sz;
  1551. }
  1552. if (type & PERF_SAMPLE_RAW) {
  1553. result += sizeof(u32);
  1554. result += sample->raw_size;
  1555. }
  1556. if (type & PERF_SAMPLE_BRANCH_STACK) {
  1557. sz = sample->branch_stack->nr * sizeof(struct branch_entry);
  1558. sz += sizeof(u64);
  1559. result += sz;
  1560. }
  1561. if (type & PERF_SAMPLE_REGS_USER) {
  1562. if (sample->user_regs.abi) {
  1563. result += sizeof(u64);
  1564. sz = hweight_long(sample->user_regs.mask) * sizeof(u64);
  1565. result += sz;
  1566. } else {
  1567. result += sizeof(u64);
  1568. }
  1569. }
  1570. if (type & PERF_SAMPLE_STACK_USER) {
  1571. sz = sample->user_stack.size;
  1572. result += sizeof(u64);
  1573. if (sz) {
  1574. result += sz;
  1575. result += sizeof(u64);
  1576. }
  1577. }
  1578. if (type & PERF_SAMPLE_WEIGHT)
  1579. result += sizeof(u64);
  1580. if (type & PERF_SAMPLE_DATA_SRC)
  1581. result += sizeof(u64);
  1582. if (type & PERF_SAMPLE_TRANSACTION)
  1583. result += sizeof(u64);
  1584. if (type & PERF_SAMPLE_REGS_INTR) {
  1585. if (sample->intr_regs.abi) {
  1586. result += sizeof(u64);
  1587. sz = hweight_long(sample->intr_regs.mask) * sizeof(u64);
  1588. result += sz;
  1589. } else {
  1590. result += sizeof(u64);
  1591. }
  1592. }
  1593. return result;
  1594. }
  1595. int perf_event__synthesize_sample(union perf_event *event, u64 type,
  1596. u64 read_format,
  1597. const struct perf_sample *sample,
  1598. bool swapped)
  1599. {
  1600. u64 *array;
  1601. size_t sz;
  1602. /*
  1603. * used for cross-endian analysis. See git commit 65014ab3
  1604. * for why this goofiness is needed.
  1605. */
  1606. union u64_swap u;
  1607. array = event->sample.array;
  1608. if (type & PERF_SAMPLE_IDENTIFIER) {
  1609. *array = sample->id;
  1610. array++;
  1611. }
  1612. if (type & PERF_SAMPLE_IP) {
  1613. *array = sample->ip;
  1614. array++;
  1615. }
  1616. if (type & PERF_SAMPLE_TID) {
  1617. u.val32[0] = sample->pid;
  1618. u.val32[1] = sample->tid;
  1619. if (swapped) {
  1620. /*
  1621. * Inverse of what is done in perf_evsel__parse_sample
  1622. */
  1623. u.val32[0] = bswap_32(u.val32[0]);
  1624. u.val32[1] = bswap_32(u.val32[1]);
  1625. u.val64 = bswap_64(u.val64);
  1626. }
  1627. *array = u.val64;
  1628. array++;
  1629. }
  1630. if (type & PERF_SAMPLE_TIME) {
  1631. *array = sample->time;
  1632. array++;
  1633. }
  1634. if (type & PERF_SAMPLE_ADDR) {
  1635. *array = sample->addr;
  1636. array++;
  1637. }
  1638. if (type & PERF_SAMPLE_ID) {
  1639. *array = sample->id;
  1640. array++;
  1641. }
  1642. if (type & PERF_SAMPLE_STREAM_ID) {
  1643. *array = sample->stream_id;
  1644. array++;
  1645. }
  1646. if (type & PERF_SAMPLE_CPU) {
  1647. u.val32[0] = sample->cpu;
  1648. if (swapped) {
  1649. /*
  1650. * Inverse of what is done in perf_evsel__parse_sample
  1651. */
  1652. u.val32[0] = bswap_32(u.val32[0]);
  1653. u.val64 = bswap_64(u.val64);
  1654. }
  1655. *array = u.val64;
  1656. array++;
  1657. }
  1658. if (type & PERF_SAMPLE_PERIOD) {
  1659. *array = sample->period;
  1660. array++;
  1661. }
  1662. if (type & PERF_SAMPLE_READ) {
  1663. if (read_format & PERF_FORMAT_GROUP)
  1664. *array = sample->read.group.nr;
  1665. else
  1666. *array = sample->read.one.value;
  1667. array++;
  1668. if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
  1669. *array = sample->read.time_enabled;
  1670. array++;
  1671. }
  1672. if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
  1673. *array = sample->read.time_running;
  1674. array++;
  1675. }
  1676. /* PERF_FORMAT_ID is forced for PERF_SAMPLE_READ */
  1677. if (read_format & PERF_FORMAT_GROUP) {
  1678. sz = sample->read.group.nr *
  1679. sizeof(struct sample_read_value);
  1680. memcpy(array, sample->read.group.values, sz);
  1681. array = (void *)array + sz;
  1682. } else {
  1683. *array = sample->read.one.id;
  1684. array++;
  1685. }
  1686. }
  1687. if (type & PERF_SAMPLE_CALLCHAIN) {
  1688. sz = (sample->callchain->nr + 1) * sizeof(u64);
  1689. memcpy(array, sample->callchain, sz);
  1690. array = (void *)array + sz;
  1691. }
  1692. if (type & PERF_SAMPLE_RAW) {
  1693. u.val32[0] = sample->raw_size;
  1694. if (WARN_ONCE(swapped,
  1695. "Endianness of raw data not corrected!\n")) {
  1696. /*
  1697. * Inverse of what is done in perf_evsel__parse_sample
  1698. */
  1699. u.val32[0] = bswap_32(u.val32[0]);
  1700. u.val32[1] = bswap_32(u.val32[1]);
  1701. u.val64 = bswap_64(u.val64);
  1702. }
  1703. *array = u.val64;
  1704. array = (void *)array + sizeof(u32);
  1705. memcpy(array, sample->raw_data, sample->raw_size);
  1706. array = (void *)array + sample->raw_size;
  1707. }
  1708. if (type & PERF_SAMPLE_BRANCH_STACK) {
  1709. sz = sample->branch_stack->nr * sizeof(struct branch_entry);
  1710. sz += sizeof(u64);
  1711. memcpy(array, sample->branch_stack, sz);
  1712. array = (void *)array + sz;
  1713. }
  1714. if (type & PERF_SAMPLE_REGS_USER) {
  1715. if (sample->user_regs.abi) {
  1716. *array++ = sample->user_regs.abi;
  1717. sz = hweight_long(sample->user_regs.mask) * sizeof(u64);
  1718. memcpy(array, sample->user_regs.regs, sz);
  1719. array = (void *)array + sz;
  1720. } else {
  1721. *array++ = 0;
  1722. }
  1723. }
  1724. if (type & PERF_SAMPLE_STACK_USER) {
  1725. sz = sample->user_stack.size;
  1726. *array++ = sz;
  1727. if (sz) {
  1728. memcpy(array, sample->user_stack.data, sz);
  1729. array = (void *)array + sz;
  1730. *array++ = sz;
  1731. }
  1732. }
  1733. if (type & PERF_SAMPLE_WEIGHT) {
  1734. *array = sample->weight;
  1735. array++;
  1736. }
  1737. if (type & PERF_SAMPLE_DATA_SRC) {
  1738. *array = sample->data_src;
  1739. array++;
  1740. }
  1741. if (type & PERF_SAMPLE_TRANSACTION) {
  1742. *array = sample->transaction;
  1743. array++;
  1744. }
  1745. if (type & PERF_SAMPLE_REGS_INTR) {
  1746. if (sample->intr_regs.abi) {
  1747. *array++ = sample->intr_regs.abi;
  1748. sz = hweight_long(sample->intr_regs.mask) * sizeof(u64);
  1749. memcpy(array, sample->intr_regs.regs, sz);
  1750. array = (void *)array + sz;
  1751. } else {
  1752. *array++ = 0;
  1753. }
  1754. }
  1755. return 0;
  1756. }
  1757. struct format_field *perf_evsel__field(struct perf_evsel *evsel, const char *name)
  1758. {
  1759. return pevent_find_field(evsel->tp_format, name);
  1760. }
  1761. void *perf_evsel__rawptr(struct perf_evsel *evsel, struct perf_sample *sample,
  1762. const char *name)
  1763. {
  1764. struct format_field *field = perf_evsel__field(evsel, name);
  1765. int offset;
  1766. if (!field)
  1767. return NULL;
  1768. offset = field->offset;
  1769. if (field->flags & FIELD_IS_DYNAMIC) {
  1770. offset = *(int *)(sample->raw_data + field->offset);
  1771. offset &= 0xffff;
  1772. }
  1773. return sample->raw_data + offset;
  1774. }
  1775. u64 perf_evsel__intval(struct perf_evsel *evsel, struct perf_sample *sample,
  1776. const char *name)
  1777. {
  1778. struct format_field *field = perf_evsel__field(evsel, name);
  1779. void *ptr;
  1780. u64 value;
  1781. if (!field)
  1782. return 0;
  1783. ptr = sample->raw_data + field->offset;
  1784. switch (field->size) {
  1785. case 1:
  1786. return *(u8 *)ptr;
  1787. case 2:
  1788. value = *(u16 *)ptr;
  1789. break;
  1790. case 4:
  1791. value = *(u32 *)ptr;
  1792. break;
  1793. case 8:
  1794. memcpy(&value, ptr, sizeof(u64));
  1795. break;
  1796. default:
  1797. return 0;
  1798. }
  1799. if (!evsel->needs_swap)
  1800. return value;
  1801. switch (field->size) {
  1802. case 2:
  1803. return bswap_16(value);
  1804. case 4:
  1805. return bswap_32(value);
  1806. case 8:
  1807. return bswap_64(value);
  1808. default:
  1809. return 0;
  1810. }
  1811. return 0;
  1812. }
  1813. static int comma_fprintf(FILE *fp, bool *first, const char *fmt, ...)
  1814. {
  1815. va_list args;
  1816. int ret = 0;
  1817. if (!*first) {
  1818. ret += fprintf(fp, ",");
  1819. } else {
  1820. ret += fprintf(fp, ":");
  1821. *first = false;
  1822. }
  1823. va_start(args, fmt);
  1824. ret += vfprintf(fp, fmt, args);
  1825. va_end(args);
  1826. return ret;
  1827. }
  1828. static int __print_attr__fprintf(FILE *fp, const char *name, const char *val, void *priv)
  1829. {
  1830. return comma_fprintf(fp, (bool *)priv, " %s: %s", name, val);
  1831. }
  1832. int perf_evsel__fprintf(struct perf_evsel *evsel,
  1833. struct perf_attr_details *details, FILE *fp)
  1834. {
  1835. bool first = true;
  1836. int printed = 0;
  1837. if (details->event_group) {
  1838. struct perf_evsel *pos;
  1839. if (!perf_evsel__is_group_leader(evsel))
  1840. return 0;
  1841. if (evsel->nr_members > 1)
  1842. printed += fprintf(fp, "%s{", evsel->group_name ?: "");
  1843. printed += fprintf(fp, "%s", perf_evsel__name(evsel));
  1844. for_each_group_member(pos, evsel)
  1845. printed += fprintf(fp, ",%s", perf_evsel__name(pos));
  1846. if (evsel->nr_members > 1)
  1847. printed += fprintf(fp, "}");
  1848. goto out;
  1849. }
  1850. printed += fprintf(fp, "%s", perf_evsel__name(evsel));
  1851. if (details->verbose) {
  1852. printed += perf_event_attr__fprintf(fp, &evsel->attr,
  1853. __print_attr__fprintf, &first);
  1854. } else if (details->freq) {
  1855. const char *term = "sample_freq";
  1856. if (!evsel->attr.freq)
  1857. term = "sample_period";
  1858. printed += comma_fprintf(fp, &first, " %s=%" PRIu64,
  1859. term, (u64)evsel->attr.sample_freq);
  1860. }
  1861. out:
  1862. fputc('\n', fp);
  1863. return ++printed;
  1864. }
  1865. bool perf_evsel__fallback(struct perf_evsel *evsel, int err,
  1866. char *msg, size_t msgsize)
  1867. {
  1868. if ((err == ENOENT || err == ENXIO || err == ENODEV) &&
  1869. evsel->attr.type == PERF_TYPE_HARDWARE &&
  1870. evsel->attr.config == PERF_COUNT_HW_CPU_CYCLES) {
  1871. /*
  1872. * If it's cycles then fall back to hrtimer based
  1873. * cpu-clock-tick sw counter, which is always available even if
  1874. * no PMU support.
  1875. *
  1876. * PPC returns ENXIO until 2.6.37 (behavior changed with commit
  1877. * b0a873e).
  1878. */
  1879. scnprintf(msg, msgsize, "%s",
  1880. "The cycles event is not supported, trying to fall back to cpu-clock-ticks");
  1881. evsel->attr.type = PERF_TYPE_SOFTWARE;
  1882. evsel->attr.config = PERF_COUNT_SW_CPU_CLOCK;
  1883. zfree(&evsel->name);
  1884. return true;
  1885. }
  1886. return false;
  1887. }
  1888. int perf_evsel__open_strerror(struct perf_evsel *evsel, struct target *target,
  1889. int err, char *msg, size_t size)
  1890. {
  1891. char sbuf[STRERR_BUFSIZE];
  1892. switch (err) {
  1893. case EPERM:
  1894. case EACCES:
  1895. return scnprintf(msg, size,
  1896. "You may not have permission to collect %sstats.\n"
  1897. "Consider tweaking /proc/sys/kernel/perf_event_paranoid:\n"
  1898. " -1 - Not paranoid at all\n"
  1899. " 0 - Disallow raw tracepoint access for unpriv\n"
  1900. " 1 - Disallow cpu events for unpriv\n"
  1901. " 2 - Disallow kernel profiling for unpriv",
  1902. target->system_wide ? "system-wide " : "");
  1903. case ENOENT:
  1904. return scnprintf(msg, size, "The %s event is not supported.",
  1905. perf_evsel__name(evsel));
  1906. case EMFILE:
  1907. return scnprintf(msg, size, "%s",
  1908. "Too many events are opened.\n"
  1909. "Probably the maximum number of open file descriptors has been reached.\n"
  1910. "Hint: Try again after reducing the number of events.\n"
  1911. "Hint: Try increasing the limit with 'ulimit -n <limit>'");
  1912. case ENODEV:
  1913. if (target->cpu_list)
  1914. return scnprintf(msg, size, "%s",
  1915. "No such device - did you specify an out-of-range profile CPU?\n");
  1916. break;
  1917. case EOPNOTSUPP:
  1918. if (evsel->attr.precise_ip)
  1919. return scnprintf(msg, size, "%s",
  1920. "\'precise\' request may not be supported. Try removing 'p' modifier.");
  1921. #if defined(__i386__) || defined(__x86_64__)
  1922. if (evsel->attr.type == PERF_TYPE_HARDWARE)
  1923. return scnprintf(msg, size, "%s",
  1924. "No hardware sampling interrupt available.\n"
  1925. "No APIC? If so then you can boot the kernel with the \"lapic\" boot parameter to force-enable it.");
  1926. #endif
  1927. break;
  1928. case EBUSY:
  1929. if (find_process("oprofiled"))
  1930. return scnprintf(msg, size,
  1931. "The PMU counters are busy/taken by another profiler.\n"
  1932. "We found oprofile daemon running, please stop it and try again.");
  1933. break;
  1934. case EINVAL:
  1935. if (perf_missing_features.clockid)
  1936. return scnprintf(msg, size, "clockid feature not supported.");
  1937. if (perf_missing_features.clockid_wrong)
  1938. return scnprintf(msg, size, "wrong clockid (%d).", clockid);
  1939. break;
  1940. default:
  1941. break;
  1942. }
  1943. return scnprintf(msg, size,
  1944. "The sys_perf_event_open() syscall returned with %d (%s) for event (%s).\n"
  1945. "/bin/dmesg may provide additional information.\n"
  1946. "No CONFIG_PERF_EVENTS=y kernel support configured?\n",
  1947. err, strerror_r(err, sbuf, sizeof(sbuf)),
  1948. perf_evsel__name(evsel));
  1949. }