evsel.c 56 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329
  1. /*
  2. * Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
  3. *
  4. * Parts came from builtin-{top,stat,record}.c, see those files for further
  5. * copyright notes.
  6. *
  7. * Released under the GPL v2. (and only v2, not any later version)
  8. */
  9. #include <byteswap.h>
  10. #include <linux/bitops.h>
  11. #include <api/fs/debugfs.h>
  12. #include <traceevent/event-parse.h>
  13. #include <linux/hw_breakpoint.h>
  14. #include <linux/perf_event.h>
  15. #include <sys/resource.h>
  16. #include "asm/bug.h"
  17. #include "callchain.h"
  18. #include "cgroup.h"
  19. #include "evsel.h"
  20. #include "evlist.h"
  21. #include "util.h"
  22. #include "cpumap.h"
  23. #include "thread_map.h"
  24. #include "target.h"
  25. #include "perf_regs.h"
  26. #include "debug.h"
  27. #include "trace-event.h"
  28. #include "stat.h"
  29. static struct {
  30. bool sample_id_all;
  31. bool exclude_guest;
  32. bool mmap2;
  33. bool cloexec;
  34. bool clockid;
  35. bool clockid_wrong;
  36. } perf_missing_features;
  37. static clockid_t clockid;
  38. static int perf_evsel__no_extra_init(struct perf_evsel *evsel __maybe_unused)
  39. {
  40. return 0;
  41. }
  42. static void perf_evsel__no_extra_fini(struct perf_evsel *evsel __maybe_unused)
  43. {
  44. }
  45. static struct {
  46. size_t size;
  47. int (*init)(struct perf_evsel *evsel);
  48. void (*fini)(struct perf_evsel *evsel);
  49. } perf_evsel__object = {
  50. .size = sizeof(struct perf_evsel),
  51. .init = perf_evsel__no_extra_init,
  52. .fini = perf_evsel__no_extra_fini,
  53. };
  54. int perf_evsel__object_config(size_t object_size,
  55. int (*init)(struct perf_evsel *evsel),
  56. void (*fini)(struct perf_evsel *evsel))
  57. {
  58. if (object_size == 0)
  59. goto set_methods;
  60. if (perf_evsel__object.size > object_size)
  61. return -EINVAL;
  62. perf_evsel__object.size = object_size;
  63. set_methods:
  64. if (init != NULL)
  65. perf_evsel__object.init = init;
  66. if (fini != NULL)
  67. perf_evsel__object.fini = fini;
  68. return 0;
  69. }
  70. #define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y))
  71. int __perf_evsel__sample_size(u64 sample_type)
  72. {
  73. u64 mask = sample_type & PERF_SAMPLE_MASK;
  74. int size = 0;
  75. int i;
  76. for (i = 0; i < 64; i++) {
  77. if (mask & (1ULL << i))
  78. size++;
  79. }
  80. size *= sizeof(u64);
  81. return size;
  82. }
  83. /**
  84. * __perf_evsel__calc_id_pos - calculate id_pos.
  85. * @sample_type: sample type
  86. *
  87. * This function returns the position of the event id (PERF_SAMPLE_ID or
  88. * PERF_SAMPLE_IDENTIFIER) in a sample event i.e. in the array of struct
  89. * sample_event.
  90. */
  91. static int __perf_evsel__calc_id_pos(u64 sample_type)
  92. {
  93. int idx = 0;
  94. if (sample_type & PERF_SAMPLE_IDENTIFIER)
  95. return 0;
  96. if (!(sample_type & PERF_SAMPLE_ID))
  97. return -1;
  98. if (sample_type & PERF_SAMPLE_IP)
  99. idx += 1;
  100. if (sample_type & PERF_SAMPLE_TID)
  101. idx += 1;
  102. if (sample_type & PERF_SAMPLE_TIME)
  103. idx += 1;
  104. if (sample_type & PERF_SAMPLE_ADDR)
  105. idx += 1;
  106. return idx;
  107. }
  108. /**
  109. * __perf_evsel__calc_is_pos - calculate is_pos.
  110. * @sample_type: sample type
  111. *
  112. * This function returns the position (counting backwards) of the event id
  113. * (PERF_SAMPLE_ID or PERF_SAMPLE_IDENTIFIER) in a non-sample event i.e. if
  114. * sample_id_all is used there is an id sample appended to non-sample events.
  115. */
  116. static int __perf_evsel__calc_is_pos(u64 sample_type)
  117. {
  118. int idx = 1;
  119. if (sample_type & PERF_SAMPLE_IDENTIFIER)
  120. return 1;
  121. if (!(sample_type & PERF_SAMPLE_ID))
  122. return -1;
  123. if (sample_type & PERF_SAMPLE_CPU)
  124. idx += 1;
  125. if (sample_type & PERF_SAMPLE_STREAM_ID)
  126. idx += 1;
  127. return idx;
  128. }
  129. void perf_evsel__calc_id_pos(struct perf_evsel *evsel)
  130. {
  131. evsel->id_pos = __perf_evsel__calc_id_pos(evsel->attr.sample_type);
  132. evsel->is_pos = __perf_evsel__calc_is_pos(evsel->attr.sample_type);
  133. }
  134. void __perf_evsel__set_sample_bit(struct perf_evsel *evsel,
  135. enum perf_event_sample_format bit)
  136. {
  137. if (!(evsel->attr.sample_type & bit)) {
  138. evsel->attr.sample_type |= bit;
  139. evsel->sample_size += sizeof(u64);
  140. perf_evsel__calc_id_pos(evsel);
  141. }
  142. }
  143. void __perf_evsel__reset_sample_bit(struct perf_evsel *evsel,
  144. enum perf_event_sample_format bit)
  145. {
  146. if (evsel->attr.sample_type & bit) {
  147. evsel->attr.sample_type &= ~bit;
  148. evsel->sample_size -= sizeof(u64);
  149. perf_evsel__calc_id_pos(evsel);
  150. }
  151. }
  152. void perf_evsel__set_sample_id(struct perf_evsel *evsel,
  153. bool can_sample_identifier)
  154. {
  155. if (can_sample_identifier) {
  156. perf_evsel__reset_sample_bit(evsel, ID);
  157. perf_evsel__set_sample_bit(evsel, IDENTIFIER);
  158. } else {
  159. perf_evsel__set_sample_bit(evsel, ID);
  160. }
  161. evsel->attr.read_format |= PERF_FORMAT_ID;
  162. }
  163. void perf_evsel__init(struct perf_evsel *evsel,
  164. struct perf_event_attr *attr, int idx)
  165. {
  166. evsel->idx = idx;
  167. evsel->tracking = !idx;
  168. evsel->attr = *attr;
  169. evsel->leader = evsel;
  170. evsel->unit = "";
  171. evsel->scale = 1.0;
  172. evsel->evlist = NULL;
  173. INIT_LIST_HEAD(&evsel->node);
  174. INIT_LIST_HEAD(&evsel->config_terms);
  175. perf_evsel__object.init(evsel);
  176. evsel->sample_size = __perf_evsel__sample_size(attr->sample_type);
  177. perf_evsel__calc_id_pos(evsel);
  178. evsel->cmdline_group_boundary = false;
  179. }
  180. struct perf_evsel *perf_evsel__new_idx(struct perf_event_attr *attr, int idx)
  181. {
  182. struct perf_evsel *evsel = zalloc(perf_evsel__object.size);
  183. if (evsel != NULL)
  184. perf_evsel__init(evsel, attr, idx);
  185. return evsel;
  186. }
  187. struct perf_evsel *perf_evsel__newtp_idx(const char *sys, const char *name, int idx)
  188. {
  189. struct perf_evsel *evsel = zalloc(perf_evsel__object.size);
  190. if (evsel != NULL) {
  191. struct perf_event_attr attr = {
  192. .type = PERF_TYPE_TRACEPOINT,
  193. .sample_type = (PERF_SAMPLE_RAW | PERF_SAMPLE_TIME |
  194. PERF_SAMPLE_CPU | PERF_SAMPLE_PERIOD),
  195. };
  196. if (asprintf(&evsel->name, "%s:%s", sys, name) < 0)
  197. goto out_free;
  198. evsel->tp_format = trace_event__tp_format(sys, name);
  199. if (evsel->tp_format == NULL)
  200. goto out_free;
  201. event_attr_init(&attr);
  202. attr.config = evsel->tp_format->id;
  203. attr.sample_period = 1;
  204. perf_evsel__init(evsel, &attr, idx);
  205. }
  206. return evsel;
  207. out_free:
  208. zfree(&evsel->name);
  209. free(evsel);
  210. return NULL;
  211. }
  212. const char *perf_evsel__hw_names[PERF_COUNT_HW_MAX] = {
  213. "cycles",
  214. "instructions",
  215. "cache-references",
  216. "cache-misses",
  217. "branches",
  218. "branch-misses",
  219. "bus-cycles",
  220. "stalled-cycles-frontend",
  221. "stalled-cycles-backend",
  222. "ref-cycles",
  223. };
  224. static const char *__perf_evsel__hw_name(u64 config)
  225. {
  226. if (config < PERF_COUNT_HW_MAX && perf_evsel__hw_names[config])
  227. return perf_evsel__hw_names[config];
  228. return "unknown-hardware";
  229. }
  230. static int perf_evsel__add_modifiers(struct perf_evsel *evsel, char *bf, size_t size)
  231. {
  232. int colon = 0, r = 0;
  233. struct perf_event_attr *attr = &evsel->attr;
  234. bool exclude_guest_default = false;
  235. #define MOD_PRINT(context, mod) do { \
  236. if (!attr->exclude_##context) { \
  237. if (!colon) colon = ++r; \
  238. r += scnprintf(bf + r, size - r, "%c", mod); \
  239. } } while(0)
  240. if (attr->exclude_kernel || attr->exclude_user || attr->exclude_hv) {
  241. MOD_PRINT(kernel, 'k');
  242. MOD_PRINT(user, 'u');
  243. MOD_PRINT(hv, 'h');
  244. exclude_guest_default = true;
  245. }
  246. if (attr->precise_ip) {
  247. if (!colon)
  248. colon = ++r;
  249. r += scnprintf(bf + r, size - r, "%.*s", attr->precise_ip, "ppp");
  250. exclude_guest_default = true;
  251. }
  252. if (attr->exclude_host || attr->exclude_guest == exclude_guest_default) {
  253. MOD_PRINT(host, 'H');
  254. MOD_PRINT(guest, 'G');
  255. }
  256. #undef MOD_PRINT
  257. if (colon)
  258. bf[colon - 1] = ':';
  259. return r;
  260. }
  261. static int perf_evsel__hw_name(struct perf_evsel *evsel, char *bf, size_t size)
  262. {
  263. int r = scnprintf(bf, size, "%s", __perf_evsel__hw_name(evsel->attr.config));
  264. return r + perf_evsel__add_modifiers(evsel, bf + r, size - r);
  265. }
  266. const char *perf_evsel__sw_names[PERF_COUNT_SW_MAX] = {
  267. "cpu-clock",
  268. "task-clock",
  269. "page-faults",
  270. "context-switches",
  271. "cpu-migrations",
  272. "minor-faults",
  273. "major-faults",
  274. "alignment-faults",
  275. "emulation-faults",
  276. "dummy",
  277. };
  278. static const char *__perf_evsel__sw_name(u64 config)
  279. {
  280. if (config < PERF_COUNT_SW_MAX && perf_evsel__sw_names[config])
  281. return perf_evsel__sw_names[config];
  282. return "unknown-software";
  283. }
  284. static int perf_evsel__sw_name(struct perf_evsel *evsel, char *bf, size_t size)
  285. {
  286. int r = scnprintf(bf, size, "%s", __perf_evsel__sw_name(evsel->attr.config));
  287. return r + perf_evsel__add_modifiers(evsel, bf + r, size - r);
  288. }
  289. static int __perf_evsel__bp_name(char *bf, size_t size, u64 addr, u64 type)
  290. {
  291. int r;
  292. r = scnprintf(bf, size, "mem:0x%" PRIx64 ":", addr);
  293. if (type & HW_BREAKPOINT_R)
  294. r += scnprintf(bf + r, size - r, "r");
  295. if (type & HW_BREAKPOINT_W)
  296. r += scnprintf(bf + r, size - r, "w");
  297. if (type & HW_BREAKPOINT_X)
  298. r += scnprintf(bf + r, size - r, "x");
  299. return r;
  300. }
  301. static int perf_evsel__bp_name(struct perf_evsel *evsel, char *bf, size_t size)
  302. {
  303. struct perf_event_attr *attr = &evsel->attr;
  304. int r = __perf_evsel__bp_name(bf, size, attr->bp_addr, attr->bp_type);
  305. return r + perf_evsel__add_modifiers(evsel, bf + r, size - r);
  306. }
  307. const char *perf_evsel__hw_cache[PERF_COUNT_HW_CACHE_MAX]
  308. [PERF_EVSEL__MAX_ALIASES] = {
  309. { "L1-dcache", "l1-d", "l1d", "L1-data", },
  310. { "L1-icache", "l1-i", "l1i", "L1-instruction", },
  311. { "LLC", "L2", },
  312. { "dTLB", "d-tlb", "Data-TLB", },
  313. { "iTLB", "i-tlb", "Instruction-TLB", },
  314. { "branch", "branches", "bpu", "btb", "bpc", },
  315. { "node", },
  316. };
  317. const char *perf_evsel__hw_cache_op[PERF_COUNT_HW_CACHE_OP_MAX]
  318. [PERF_EVSEL__MAX_ALIASES] = {
  319. { "load", "loads", "read", },
  320. { "store", "stores", "write", },
  321. { "prefetch", "prefetches", "speculative-read", "speculative-load", },
  322. };
  323. const char *perf_evsel__hw_cache_result[PERF_COUNT_HW_CACHE_RESULT_MAX]
  324. [PERF_EVSEL__MAX_ALIASES] = {
  325. { "refs", "Reference", "ops", "access", },
  326. { "misses", "miss", },
  327. };
  328. #define C(x) PERF_COUNT_HW_CACHE_##x
  329. #define CACHE_READ (1 << C(OP_READ))
  330. #define CACHE_WRITE (1 << C(OP_WRITE))
  331. #define CACHE_PREFETCH (1 << C(OP_PREFETCH))
  332. #define COP(x) (1 << x)
  333. /*
  334. * cache operartion stat
  335. * L1I : Read and prefetch only
  336. * ITLB and BPU : Read-only
  337. */
  338. static unsigned long perf_evsel__hw_cache_stat[C(MAX)] = {
  339. [C(L1D)] = (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH),
  340. [C(L1I)] = (CACHE_READ | CACHE_PREFETCH),
  341. [C(LL)] = (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH),
  342. [C(DTLB)] = (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH),
  343. [C(ITLB)] = (CACHE_READ),
  344. [C(BPU)] = (CACHE_READ),
  345. [C(NODE)] = (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH),
  346. };
  347. bool perf_evsel__is_cache_op_valid(u8 type, u8 op)
  348. {
  349. if (perf_evsel__hw_cache_stat[type] & COP(op))
  350. return true; /* valid */
  351. else
  352. return false; /* invalid */
  353. }
  354. int __perf_evsel__hw_cache_type_op_res_name(u8 type, u8 op, u8 result,
  355. char *bf, size_t size)
  356. {
  357. if (result) {
  358. return scnprintf(bf, size, "%s-%s-%s", perf_evsel__hw_cache[type][0],
  359. perf_evsel__hw_cache_op[op][0],
  360. perf_evsel__hw_cache_result[result][0]);
  361. }
  362. return scnprintf(bf, size, "%s-%s", perf_evsel__hw_cache[type][0],
  363. perf_evsel__hw_cache_op[op][1]);
  364. }
  365. static int __perf_evsel__hw_cache_name(u64 config, char *bf, size_t size)
  366. {
  367. u8 op, result, type = (config >> 0) & 0xff;
  368. const char *err = "unknown-ext-hardware-cache-type";
  369. if (type > PERF_COUNT_HW_CACHE_MAX)
  370. goto out_err;
  371. op = (config >> 8) & 0xff;
  372. err = "unknown-ext-hardware-cache-op";
  373. if (op > PERF_COUNT_HW_CACHE_OP_MAX)
  374. goto out_err;
  375. result = (config >> 16) & 0xff;
  376. err = "unknown-ext-hardware-cache-result";
  377. if (result > PERF_COUNT_HW_CACHE_RESULT_MAX)
  378. goto out_err;
  379. err = "invalid-cache";
  380. if (!perf_evsel__is_cache_op_valid(type, op))
  381. goto out_err;
  382. return __perf_evsel__hw_cache_type_op_res_name(type, op, result, bf, size);
  383. out_err:
  384. return scnprintf(bf, size, "%s", err);
  385. }
  386. static int perf_evsel__hw_cache_name(struct perf_evsel *evsel, char *bf, size_t size)
  387. {
  388. int ret = __perf_evsel__hw_cache_name(evsel->attr.config, bf, size);
  389. return ret + perf_evsel__add_modifiers(evsel, bf + ret, size - ret);
  390. }
  391. static int perf_evsel__raw_name(struct perf_evsel *evsel, char *bf, size_t size)
  392. {
  393. int ret = scnprintf(bf, size, "raw 0x%" PRIx64, evsel->attr.config);
  394. return ret + perf_evsel__add_modifiers(evsel, bf + ret, size - ret);
  395. }
  396. const char *perf_evsel__name(struct perf_evsel *evsel)
  397. {
  398. char bf[128];
  399. if (evsel->name)
  400. return evsel->name;
  401. switch (evsel->attr.type) {
  402. case PERF_TYPE_RAW:
  403. perf_evsel__raw_name(evsel, bf, sizeof(bf));
  404. break;
  405. case PERF_TYPE_HARDWARE:
  406. perf_evsel__hw_name(evsel, bf, sizeof(bf));
  407. break;
  408. case PERF_TYPE_HW_CACHE:
  409. perf_evsel__hw_cache_name(evsel, bf, sizeof(bf));
  410. break;
  411. case PERF_TYPE_SOFTWARE:
  412. perf_evsel__sw_name(evsel, bf, sizeof(bf));
  413. break;
  414. case PERF_TYPE_TRACEPOINT:
  415. scnprintf(bf, sizeof(bf), "%s", "unknown tracepoint");
  416. break;
  417. case PERF_TYPE_BREAKPOINT:
  418. perf_evsel__bp_name(evsel, bf, sizeof(bf));
  419. break;
  420. default:
  421. scnprintf(bf, sizeof(bf), "unknown attr type: %d",
  422. evsel->attr.type);
  423. break;
  424. }
  425. evsel->name = strdup(bf);
  426. return evsel->name ?: "unknown";
  427. }
  428. const char *perf_evsel__group_name(struct perf_evsel *evsel)
  429. {
  430. return evsel->group_name ?: "anon group";
  431. }
  432. int perf_evsel__group_desc(struct perf_evsel *evsel, char *buf, size_t size)
  433. {
  434. int ret;
  435. struct perf_evsel *pos;
  436. const char *group_name = perf_evsel__group_name(evsel);
  437. ret = scnprintf(buf, size, "%s", group_name);
  438. ret += scnprintf(buf + ret, size - ret, " { %s",
  439. perf_evsel__name(evsel));
  440. for_each_group_member(pos, evsel)
  441. ret += scnprintf(buf + ret, size - ret, ", %s",
  442. perf_evsel__name(pos));
  443. ret += scnprintf(buf + ret, size - ret, " }");
  444. return ret;
  445. }
  446. static void
  447. perf_evsel__config_callgraph(struct perf_evsel *evsel,
  448. struct record_opts *opts,
  449. struct callchain_param *param)
  450. {
  451. bool function = perf_evsel__is_function_event(evsel);
  452. struct perf_event_attr *attr = &evsel->attr;
  453. perf_evsel__set_sample_bit(evsel, CALLCHAIN);
  454. if (param->record_mode == CALLCHAIN_LBR) {
  455. if (!opts->branch_stack) {
  456. if (attr->exclude_user) {
  457. pr_warning("LBR callstack option is only available "
  458. "to get user callchain information. "
  459. "Falling back to framepointers.\n");
  460. } else {
  461. perf_evsel__set_sample_bit(evsel, BRANCH_STACK);
  462. attr->branch_sample_type = PERF_SAMPLE_BRANCH_USER |
  463. PERF_SAMPLE_BRANCH_CALL_STACK;
  464. }
  465. } else
  466. pr_warning("Cannot use LBR callstack with branch stack. "
  467. "Falling back to framepointers.\n");
  468. }
  469. if (param->record_mode == CALLCHAIN_DWARF) {
  470. if (!function) {
  471. perf_evsel__set_sample_bit(evsel, REGS_USER);
  472. perf_evsel__set_sample_bit(evsel, STACK_USER);
  473. attr->sample_regs_user = PERF_REGS_MASK;
  474. attr->sample_stack_user = param->dump_size;
  475. attr->exclude_callchain_user = 1;
  476. } else {
  477. pr_info("Cannot use DWARF unwind for function trace event,"
  478. " falling back to framepointers.\n");
  479. }
  480. }
  481. if (function) {
  482. pr_info("Disabling user space callchains for function trace event.\n");
  483. attr->exclude_callchain_user = 1;
  484. }
  485. }
  486. static void
  487. perf_evsel__reset_callgraph(struct perf_evsel *evsel,
  488. struct callchain_param *param)
  489. {
  490. struct perf_event_attr *attr = &evsel->attr;
  491. perf_evsel__reset_sample_bit(evsel, CALLCHAIN);
  492. if (param->record_mode == CALLCHAIN_LBR) {
  493. perf_evsel__reset_sample_bit(evsel, BRANCH_STACK);
  494. attr->branch_sample_type &= ~(PERF_SAMPLE_BRANCH_USER |
  495. PERF_SAMPLE_BRANCH_CALL_STACK);
  496. }
  497. if (param->record_mode == CALLCHAIN_DWARF) {
  498. perf_evsel__reset_sample_bit(evsel, REGS_USER);
  499. perf_evsel__reset_sample_bit(evsel, STACK_USER);
  500. }
  501. }
  502. static void apply_config_terms(struct perf_evsel *evsel,
  503. struct record_opts *opts)
  504. {
  505. struct perf_evsel_config_term *term;
  506. struct list_head *config_terms = &evsel->config_terms;
  507. struct perf_event_attr *attr = &evsel->attr;
  508. struct callchain_param param;
  509. u32 dump_size = 0;
  510. char *callgraph_buf = NULL;
  511. /* callgraph default */
  512. param.record_mode = callchain_param.record_mode;
  513. list_for_each_entry(term, config_terms, list) {
  514. switch (term->type) {
  515. case PERF_EVSEL__CONFIG_TERM_PERIOD:
  516. attr->sample_period = term->val.period;
  517. attr->freq = 0;
  518. break;
  519. case PERF_EVSEL__CONFIG_TERM_FREQ:
  520. attr->sample_freq = term->val.freq;
  521. attr->freq = 1;
  522. break;
  523. case PERF_EVSEL__CONFIG_TERM_TIME:
  524. if (term->val.time)
  525. perf_evsel__set_sample_bit(evsel, TIME);
  526. else
  527. perf_evsel__reset_sample_bit(evsel, TIME);
  528. break;
  529. case PERF_EVSEL__CONFIG_TERM_CALLGRAPH:
  530. callgraph_buf = term->val.callgraph;
  531. break;
  532. case PERF_EVSEL__CONFIG_TERM_STACK_USER:
  533. dump_size = term->val.stack_user;
  534. break;
  535. default:
  536. break;
  537. }
  538. }
  539. /* User explicitly set per-event callgraph, clear the old setting and reset. */
  540. if ((callgraph_buf != NULL) || (dump_size > 0)) {
  541. /* parse callgraph parameters */
  542. if (callgraph_buf != NULL) {
  543. if (!strcmp(callgraph_buf, "no")) {
  544. param.enabled = false;
  545. param.record_mode = CALLCHAIN_NONE;
  546. } else {
  547. param.enabled = true;
  548. if (parse_callchain_record(callgraph_buf, &param)) {
  549. pr_err("per-event callgraph setting for %s failed. "
  550. "Apply callgraph global setting for it\n",
  551. evsel->name);
  552. return;
  553. }
  554. }
  555. }
  556. if (dump_size > 0) {
  557. dump_size = round_up(dump_size, sizeof(u64));
  558. param.dump_size = dump_size;
  559. }
  560. /* If global callgraph set, clear it */
  561. if (callchain_param.enabled)
  562. perf_evsel__reset_callgraph(evsel, &callchain_param);
  563. /* set perf-event callgraph */
  564. if (param.enabled)
  565. perf_evsel__config_callgraph(evsel, opts, &param);
  566. }
  567. }
  568. /*
  569. * The enable_on_exec/disabled value strategy:
  570. *
  571. * 1) For any type of traced program:
  572. * - all independent events and group leaders are disabled
  573. * - all group members are enabled
  574. *
  575. * Group members are ruled by group leaders. They need to
  576. * be enabled, because the group scheduling relies on that.
  577. *
  578. * 2) For traced programs executed by perf:
  579. * - all independent events and group leaders have
  580. * enable_on_exec set
  581. * - we don't specifically enable or disable any event during
  582. * the record command
  583. *
  584. * Independent events and group leaders are initially disabled
  585. * and get enabled by exec. Group members are ruled by group
  586. * leaders as stated in 1).
  587. *
  588. * 3) For traced programs attached by perf (pid/tid):
  589. * - we specifically enable or disable all events during
  590. * the record command
  591. *
  592. * When attaching events to already running traced we
  593. * enable/disable events specifically, as there's no
  594. * initial traced exec call.
  595. */
  596. void perf_evsel__config(struct perf_evsel *evsel, struct record_opts *opts)
  597. {
  598. struct perf_evsel *leader = evsel->leader;
  599. struct perf_event_attr *attr = &evsel->attr;
  600. int track = evsel->tracking;
  601. bool per_cpu = opts->target.default_per_cpu && !opts->target.per_thread;
  602. attr->sample_id_all = perf_missing_features.sample_id_all ? 0 : 1;
  603. attr->inherit = !opts->no_inherit;
  604. perf_evsel__set_sample_bit(evsel, IP);
  605. perf_evsel__set_sample_bit(evsel, TID);
  606. if (evsel->sample_read) {
  607. perf_evsel__set_sample_bit(evsel, READ);
  608. /*
  609. * We need ID even in case of single event, because
  610. * PERF_SAMPLE_READ process ID specific data.
  611. */
  612. perf_evsel__set_sample_id(evsel, false);
  613. /*
  614. * Apply group format only if we belong to group
  615. * with more than one members.
  616. */
  617. if (leader->nr_members > 1) {
  618. attr->read_format |= PERF_FORMAT_GROUP;
  619. attr->inherit = 0;
  620. }
  621. }
  622. /*
  623. * We default some events to have a default interval. But keep
  624. * it a weak assumption overridable by the user.
  625. */
  626. if (!attr->sample_period || (opts->user_freq != UINT_MAX ||
  627. opts->user_interval != ULLONG_MAX)) {
  628. if (opts->freq) {
  629. perf_evsel__set_sample_bit(evsel, PERIOD);
  630. attr->freq = 1;
  631. attr->sample_freq = opts->freq;
  632. } else {
  633. attr->sample_period = opts->default_interval;
  634. }
  635. }
  636. /*
  637. * Disable sampling for all group members other
  638. * than leader in case leader 'leads' the sampling.
  639. */
  640. if ((leader != evsel) && leader->sample_read) {
  641. attr->sample_freq = 0;
  642. attr->sample_period = 0;
  643. }
  644. if (opts->no_samples)
  645. attr->sample_freq = 0;
  646. if (opts->inherit_stat)
  647. attr->inherit_stat = 1;
  648. if (opts->sample_address) {
  649. perf_evsel__set_sample_bit(evsel, ADDR);
  650. attr->mmap_data = track;
  651. }
  652. /*
  653. * We don't allow user space callchains for function trace
  654. * event, due to issues with page faults while tracing page
  655. * fault handler and its overall trickiness nature.
  656. */
  657. if (perf_evsel__is_function_event(evsel))
  658. evsel->attr.exclude_callchain_user = 1;
  659. if (callchain_param.enabled && !evsel->no_aux_samples)
  660. perf_evsel__config_callgraph(evsel, opts, &callchain_param);
  661. if (opts->sample_intr_regs) {
  662. attr->sample_regs_intr = opts->sample_intr_regs;
  663. perf_evsel__set_sample_bit(evsel, REGS_INTR);
  664. }
  665. if (target__has_cpu(&opts->target))
  666. perf_evsel__set_sample_bit(evsel, CPU);
  667. if (opts->period)
  668. perf_evsel__set_sample_bit(evsel, PERIOD);
  669. /*
  670. * When the user explicitely disabled time don't force it here.
  671. */
  672. if (opts->sample_time &&
  673. (!perf_missing_features.sample_id_all &&
  674. (!opts->no_inherit || target__has_cpu(&opts->target) || per_cpu ||
  675. opts->sample_time_set)))
  676. perf_evsel__set_sample_bit(evsel, TIME);
  677. if (opts->raw_samples && !evsel->no_aux_samples) {
  678. perf_evsel__set_sample_bit(evsel, TIME);
  679. perf_evsel__set_sample_bit(evsel, RAW);
  680. perf_evsel__set_sample_bit(evsel, CPU);
  681. }
  682. if (opts->sample_address)
  683. perf_evsel__set_sample_bit(evsel, DATA_SRC);
  684. if (opts->no_buffering) {
  685. attr->watermark = 0;
  686. attr->wakeup_events = 1;
  687. }
  688. if (opts->branch_stack && !evsel->no_aux_samples) {
  689. perf_evsel__set_sample_bit(evsel, BRANCH_STACK);
  690. attr->branch_sample_type = opts->branch_stack;
  691. }
  692. if (opts->sample_weight)
  693. perf_evsel__set_sample_bit(evsel, WEIGHT);
  694. attr->task = track;
  695. attr->mmap = track;
  696. attr->mmap2 = track && !perf_missing_features.mmap2;
  697. attr->comm = track;
  698. if (opts->record_switch_events)
  699. attr->context_switch = track;
  700. if (opts->sample_transaction)
  701. perf_evsel__set_sample_bit(evsel, TRANSACTION);
  702. if (opts->running_time) {
  703. evsel->attr.read_format |=
  704. PERF_FORMAT_TOTAL_TIME_ENABLED |
  705. PERF_FORMAT_TOTAL_TIME_RUNNING;
  706. }
  707. /*
  708. * XXX see the function comment above
  709. *
  710. * Disabling only independent events or group leaders,
  711. * keeping group members enabled.
  712. */
  713. if (perf_evsel__is_group_leader(evsel))
  714. attr->disabled = 1;
  715. /*
  716. * Setting enable_on_exec for independent events and
  717. * group leaders for traced executed by perf.
  718. */
  719. if (target__none(&opts->target) && perf_evsel__is_group_leader(evsel) &&
  720. !opts->initial_delay)
  721. attr->enable_on_exec = 1;
  722. if (evsel->immediate) {
  723. attr->disabled = 0;
  724. attr->enable_on_exec = 0;
  725. }
  726. clockid = opts->clockid;
  727. if (opts->use_clockid) {
  728. attr->use_clockid = 1;
  729. attr->clockid = opts->clockid;
  730. }
  731. /*
  732. * Apply event specific term settings,
  733. * it overloads any global configuration.
  734. */
  735. apply_config_terms(evsel, opts);
  736. }
  737. static int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads)
  738. {
  739. int cpu, thread;
  740. if (evsel->system_wide)
  741. nthreads = 1;
  742. evsel->fd = xyarray__new(ncpus, nthreads, sizeof(int));
  743. if (evsel->fd) {
  744. for (cpu = 0; cpu < ncpus; cpu++) {
  745. for (thread = 0; thread < nthreads; thread++) {
  746. FD(evsel, cpu, thread) = -1;
  747. }
  748. }
  749. }
  750. return evsel->fd != NULL ? 0 : -ENOMEM;
  751. }
  752. static int perf_evsel__run_ioctl(struct perf_evsel *evsel, int ncpus, int nthreads,
  753. int ioc, void *arg)
  754. {
  755. int cpu, thread;
  756. if (evsel->system_wide)
  757. nthreads = 1;
  758. for (cpu = 0; cpu < ncpus; cpu++) {
  759. for (thread = 0; thread < nthreads; thread++) {
  760. int fd = FD(evsel, cpu, thread),
  761. err = ioctl(fd, ioc, arg);
  762. if (err)
  763. return err;
  764. }
  765. }
  766. return 0;
  767. }
  768. int perf_evsel__apply_filter(struct perf_evsel *evsel, int ncpus, int nthreads,
  769. const char *filter)
  770. {
  771. return perf_evsel__run_ioctl(evsel, ncpus, nthreads,
  772. PERF_EVENT_IOC_SET_FILTER,
  773. (void *)filter);
  774. }
  775. int perf_evsel__set_filter(struct perf_evsel *evsel, const char *filter)
  776. {
  777. char *new_filter = strdup(filter);
  778. if (new_filter != NULL) {
  779. free(evsel->filter);
  780. evsel->filter = new_filter;
  781. return 0;
  782. }
  783. return -1;
  784. }
  785. int perf_evsel__append_filter(struct perf_evsel *evsel,
  786. const char *op, const char *filter)
  787. {
  788. char *new_filter;
  789. if (evsel->filter == NULL)
  790. return perf_evsel__set_filter(evsel, filter);
  791. if (asprintf(&new_filter,"(%s) %s (%s)", evsel->filter, op, filter) > 0) {
  792. free(evsel->filter);
  793. evsel->filter = new_filter;
  794. return 0;
  795. }
  796. return -1;
  797. }
  798. int perf_evsel__enable(struct perf_evsel *evsel, int ncpus, int nthreads)
  799. {
  800. return perf_evsel__run_ioctl(evsel, ncpus, nthreads,
  801. PERF_EVENT_IOC_ENABLE,
  802. 0);
  803. }
  804. int perf_evsel__alloc_id(struct perf_evsel *evsel, int ncpus, int nthreads)
  805. {
  806. if (ncpus == 0 || nthreads == 0)
  807. return 0;
  808. if (evsel->system_wide)
  809. nthreads = 1;
  810. evsel->sample_id = xyarray__new(ncpus, nthreads, sizeof(struct perf_sample_id));
  811. if (evsel->sample_id == NULL)
  812. return -ENOMEM;
  813. evsel->id = zalloc(ncpus * nthreads * sizeof(u64));
  814. if (evsel->id == NULL) {
  815. xyarray__delete(evsel->sample_id);
  816. evsel->sample_id = NULL;
  817. return -ENOMEM;
  818. }
  819. return 0;
  820. }
  821. static void perf_evsel__free_fd(struct perf_evsel *evsel)
  822. {
  823. xyarray__delete(evsel->fd);
  824. evsel->fd = NULL;
  825. }
  826. static void perf_evsel__free_id(struct perf_evsel *evsel)
  827. {
  828. xyarray__delete(evsel->sample_id);
  829. evsel->sample_id = NULL;
  830. zfree(&evsel->id);
  831. }
  832. static void perf_evsel__free_config_terms(struct perf_evsel *evsel)
  833. {
  834. struct perf_evsel_config_term *term, *h;
  835. list_for_each_entry_safe(term, h, &evsel->config_terms, list) {
  836. list_del(&term->list);
  837. free(term);
  838. }
  839. }
  840. void perf_evsel__close_fd(struct perf_evsel *evsel, int ncpus, int nthreads)
  841. {
  842. int cpu, thread;
  843. if (evsel->system_wide)
  844. nthreads = 1;
  845. for (cpu = 0; cpu < ncpus; cpu++)
  846. for (thread = 0; thread < nthreads; ++thread) {
  847. close(FD(evsel, cpu, thread));
  848. FD(evsel, cpu, thread) = -1;
  849. }
  850. }
  851. void perf_evsel__exit(struct perf_evsel *evsel)
  852. {
  853. assert(list_empty(&evsel->node));
  854. assert(evsel->evlist == NULL);
  855. perf_evsel__free_fd(evsel);
  856. perf_evsel__free_id(evsel);
  857. perf_evsel__free_config_terms(evsel);
  858. close_cgroup(evsel->cgrp);
  859. cpu_map__put(evsel->cpus);
  860. thread_map__put(evsel->threads);
  861. zfree(&evsel->group_name);
  862. zfree(&evsel->name);
  863. perf_evsel__object.fini(evsel);
  864. }
  865. void perf_evsel__delete(struct perf_evsel *evsel)
  866. {
  867. perf_evsel__exit(evsel);
  868. free(evsel);
  869. }
  870. void perf_evsel__compute_deltas(struct perf_evsel *evsel, int cpu, int thread,
  871. struct perf_counts_values *count)
  872. {
  873. struct perf_counts_values tmp;
  874. if (!evsel->prev_raw_counts)
  875. return;
  876. if (cpu == -1) {
  877. tmp = evsel->prev_raw_counts->aggr;
  878. evsel->prev_raw_counts->aggr = *count;
  879. } else {
  880. tmp = *perf_counts(evsel->prev_raw_counts, cpu, thread);
  881. *perf_counts(evsel->prev_raw_counts, cpu, thread) = *count;
  882. }
  883. count->val = count->val - tmp.val;
  884. count->ena = count->ena - tmp.ena;
  885. count->run = count->run - tmp.run;
  886. }
  887. void perf_counts_values__scale(struct perf_counts_values *count,
  888. bool scale, s8 *pscaled)
  889. {
  890. s8 scaled = 0;
  891. if (scale) {
  892. if (count->run == 0) {
  893. scaled = -1;
  894. count->val = 0;
  895. } else if (count->run < count->ena) {
  896. scaled = 1;
  897. count->val = (u64)((double) count->val * count->ena / count->run + 0.5);
  898. }
  899. } else
  900. count->ena = count->run = 0;
  901. if (pscaled)
  902. *pscaled = scaled;
  903. }
  904. int perf_evsel__read(struct perf_evsel *evsel, int cpu, int thread,
  905. struct perf_counts_values *count)
  906. {
  907. memset(count, 0, sizeof(*count));
  908. if (FD(evsel, cpu, thread) < 0)
  909. return -EINVAL;
  910. if (readn(FD(evsel, cpu, thread), count, sizeof(*count)) < 0)
  911. return -errno;
  912. return 0;
  913. }
  914. int __perf_evsel__read_on_cpu(struct perf_evsel *evsel,
  915. int cpu, int thread, bool scale)
  916. {
  917. struct perf_counts_values count;
  918. size_t nv = scale ? 3 : 1;
  919. if (FD(evsel, cpu, thread) < 0)
  920. return -EINVAL;
  921. if (evsel->counts == NULL && perf_evsel__alloc_counts(evsel, cpu + 1, thread + 1) < 0)
  922. return -ENOMEM;
  923. if (readn(FD(evsel, cpu, thread), &count, nv * sizeof(u64)) < 0)
  924. return -errno;
  925. perf_evsel__compute_deltas(evsel, cpu, thread, &count);
  926. perf_counts_values__scale(&count, scale, NULL);
  927. *perf_counts(evsel->counts, cpu, thread) = count;
  928. return 0;
  929. }
  930. static int get_group_fd(struct perf_evsel *evsel, int cpu, int thread)
  931. {
  932. struct perf_evsel *leader = evsel->leader;
  933. int fd;
  934. if (perf_evsel__is_group_leader(evsel))
  935. return -1;
  936. /*
  937. * Leader must be already processed/open,
  938. * if not it's a bug.
  939. */
  940. BUG_ON(!leader->fd);
  941. fd = FD(leader, cpu, thread);
  942. BUG_ON(fd == -1);
  943. return fd;
  944. }
  945. struct bit_names {
  946. int bit;
  947. const char *name;
  948. };
  949. static void __p_bits(char *buf, size_t size, u64 value, struct bit_names *bits)
  950. {
  951. bool first_bit = true;
  952. int i = 0;
  953. do {
  954. if (value & bits[i].bit) {
  955. buf += scnprintf(buf, size, "%s%s", first_bit ? "" : "|", bits[i].name);
  956. first_bit = false;
  957. }
  958. } while (bits[++i].name != NULL);
  959. }
  960. static void __p_sample_type(char *buf, size_t size, u64 value)
  961. {
  962. #define bit_name(n) { PERF_SAMPLE_##n, #n }
  963. struct bit_names bits[] = {
  964. bit_name(IP), bit_name(TID), bit_name(TIME), bit_name(ADDR),
  965. bit_name(READ), bit_name(CALLCHAIN), bit_name(ID), bit_name(CPU),
  966. bit_name(PERIOD), bit_name(STREAM_ID), bit_name(RAW),
  967. bit_name(BRANCH_STACK), bit_name(REGS_USER), bit_name(STACK_USER),
  968. bit_name(IDENTIFIER), bit_name(REGS_INTR),
  969. { .name = NULL, }
  970. };
  971. #undef bit_name
  972. __p_bits(buf, size, value, bits);
  973. }
  974. static void __p_read_format(char *buf, size_t size, u64 value)
  975. {
  976. #define bit_name(n) { PERF_FORMAT_##n, #n }
  977. struct bit_names bits[] = {
  978. bit_name(TOTAL_TIME_ENABLED), bit_name(TOTAL_TIME_RUNNING),
  979. bit_name(ID), bit_name(GROUP),
  980. { .name = NULL, }
  981. };
  982. #undef bit_name
  983. __p_bits(buf, size, value, bits);
  984. }
  985. #define BUF_SIZE 1024
  986. #define p_hex(val) snprintf(buf, BUF_SIZE, "%#"PRIx64, (uint64_t)(val))
  987. #define p_unsigned(val) snprintf(buf, BUF_SIZE, "%"PRIu64, (uint64_t)(val))
  988. #define p_signed(val) snprintf(buf, BUF_SIZE, "%"PRId64, (int64_t)(val))
  989. #define p_sample_type(val) __p_sample_type(buf, BUF_SIZE, val)
  990. #define p_read_format(val) __p_read_format(buf, BUF_SIZE, val)
  991. #define PRINT_ATTRn(_n, _f, _p) \
  992. do { \
  993. if (attr->_f) { \
  994. _p(attr->_f); \
  995. ret += attr__fprintf(fp, _n, buf, priv);\
  996. } \
  997. } while (0)
  998. #define PRINT_ATTRf(_f, _p) PRINT_ATTRn(#_f, _f, _p)
  999. int perf_event_attr__fprintf(FILE *fp, struct perf_event_attr *attr,
  1000. attr__fprintf_f attr__fprintf, void *priv)
  1001. {
  1002. char buf[BUF_SIZE];
  1003. int ret = 0;
  1004. PRINT_ATTRf(type, p_unsigned);
  1005. PRINT_ATTRf(size, p_unsigned);
  1006. PRINT_ATTRf(config, p_hex);
  1007. PRINT_ATTRn("{ sample_period, sample_freq }", sample_period, p_unsigned);
  1008. PRINT_ATTRf(sample_type, p_sample_type);
  1009. PRINT_ATTRf(read_format, p_read_format);
  1010. PRINT_ATTRf(disabled, p_unsigned);
  1011. PRINT_ATTRf(inherit, p_unsigned);
  1012. PRINT_ATTRf(pinned, p_unsigned);
  1013. PRINT_ATTRf(exclusive, p_unsigned);
  1014. PRINT_ATTRf(exclude_user, p_unsigned);
  1015. PRINT_ATTRf(exclude_kernel, p_unsigned);
  1016. PRINT_ATTRf(exclude_hv, p_unsigned);
  1017. PRINT_ATTRf(exclude_idle, p_unsigned);
  1018. PRINT_ATTRf(mmap, p_unsigned);
  1019. PRINT_ATTRf(comm, p_unsigned);
  1020. PRINT_ATTRf(freq, p_unsigned);
  1021. PRINT_ATTRf(inherit_stat, p_unsigned);
  1022. PRINT_ATTRf(enable_on_exec, p_unsigned);
  1023. PRINT_ATTRf(task, p_unsigned);
  1024. PRINT_ATTRf(watermark, p_unsigned);
  1025. PRINT_ATTRf(precise_ip, p_unsigned);
  1026. PRINT_ATTRf(mmap_data, p_unsigned);
  1027. PRINT_ATTRf(sample_id_all, p_unsigned);
  1028. PRINT_ATTRf(exclude_host, p_unsigned);
  1029. PRINT_ATTRf(exclude_guest, p_unsigned);
  1030. PRINT_ATTRf(exclude_callchain_kernel, p_unsigned);
  1031. PRINT_ATTRf(exclude_callchain_user, p_unsigned);
  1032. PRINT_ATTRf(mmap2, p_unsigned);
  1033. PRINT_ATTRf(comm_exec, p_unsigned);
  1034. PRINT_ATTRf(use_clockid, p_unsigned);
  1035. PRINT_ATTRf(context_switch, p_unsigned);
  1036. PRINT_ATTRn("{ wakeup_events, wakeup_watermark }", wakeup_events, p_unsigned);
  1037. PRINT_ATTRf(bp_type, p_unsigned);
  1038. PRINT_ATTRn("{ bp_addr, config1 }", bp_addr, p_hex);
  1039. PRINT_ATTRn("{ bp_len, config2 }", bp_len, p_hex);
  1040. PRINT_ATTRf(sample_regs_user, p_hex);
  1041. PRINT_ATTRf(sample_stack_user, p_unsigned);
  1042. PRINT_ATTRf(clockid, p_signed);
  1043. PRINT_ATTRf(sample_regs_intr, p_hex);
  1044. PRINT_ATTRf(aux_watermark, p_unsigned);
  1045. return ret;
  1046. }
  1047. static int __open_attr__fprintf(FILE *fp, const char *name, const char *val,
  1048. void *priv __attribute__((unused)))
  1049. {
  1050. return fprintf(fp, " %-32s %s\n", name, val);
  1051. }
  1052. static int __perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus,
  1053. struct thread_map *threads)
  1054. {
  1055. int cpu, thread, nthreads;
  1056. unsigned long flags = PERF_FLAG_FD_CLOEXEC;
  1057. int pid = -1, err;
  1058. enum { NO_CHANGE, SET_TO_MAX, INCREASED_MAX } set_rlimit = NO_CHANGE;
  1059. if (evsel->system_wide)
  1060. nthreads = 1;
  1061. else
  1062. nthreads = threads->nr;
  1063. if (evsel->fd == NULL &&
  1064. perf_evsel__alloc_fd(evsel, cpus->nr, nthreads) < 0)
  1065. return -ENOMEM;
  1066. if (evsel->cgrp) {
  1067. flags |= PERF_FLAG_PID_CGROUP;
  1068. pid = evsel->cgrp->fd;
  1069. }
  1070. fallback_missing_features:
  1071. if (perf_missing_features.clockid_wrong)
  1072. evsel->attr.clockid = CLOCK_MONOTONIC; /* should always work */
  1073. if (perf_missing_features.clockid) {
  1074. evsel->attr.use_clockid = 0;
  1075. evsel->attr.clockid = 0;
  1076. }
  1077. if (perf_missing_features.cloexec)
  1078. flags &= ~(unsigned long)PERF_FLAG_FD_CLOEXEC;
  1079. if (perf_missing_features.mmap2)
  1080. evsel->attr.mmap2 = 0;
  1081. if (perf_missing_features.exclude_guest)
  1082. evsel->attr.exclude_guest = evsel->attr.exclude_host = 0;
  1083. retry_sample_id:
  1084. if (perf_missing_features.sample_id_all)
  1085. evsel->attr.sample_id_all = 0;
  1086. if (verbose >= 2) {
  1087. fprintf(stderr, "%.60s\n", graph_dotted_line);
  1088. fprintf(stderr, "perf_event_attr:\n");
  1089. perf_event_attr__fprintf(stderr, &evsel->attr, __open_attr__fprintf, NULL);
  1090. fprintf(stderr, "%.60s\n", graph_dotted_line);
  1091. }
  1092. for (cpu = 0; cpu < cpus->nr; cpu++) {
  1093. for (thread = 0; thread < nthreads; thread++) {
  1094. int group_fd;
  1095. if (!evsel->cgrp && !evsel->system_wide)
  1096. pid = thread_map__pid(threads, thread);
  1097. group_fd = get_group_fd(evsel, cpu, thread);
  1098. retry_open:
  1099. pr_debug2("sys_perf_event_open: pid %d cpu %d group_fd %d flags %#lx\n",
  1100. pid, cpus->map[cpu], group_fd, flags);
  1101. FD(evsel, cpu, thread) = sys_perf_event_open(&evsel->attr,
  1102. pid,
  1103. cpus->map[cpu],
  1104. group_fd, flags);
  1105. if (FD(evsel, cpu, thread) < 0) {
  1106. err = -errno;
  1107. pr_debug2("sys_perf_event_open failed, error %d\n",
  1108. err);
  1109. goto try_fallback;
  1110. }
  1111. set_rlimit = NO_CHANGE;
  1112. /*
  1113. * If we succeeded but had to kill clockid, fail and
  1114. * have perf_evsel__open_strerror() print us a nice
  1115. * error.
  1116. */
  1117. if (perf_missing_features.clockid ||
  1118. perf_missing_features.clockid_wrong) {
  1119. err = -EINVAL;
  1120. goto out_close;
  1121. }
  1122. }
  1123. }
  1124. return 0;
  1125. try_fallback:
  1126. /*
  1127. * perf stat needs between 5 and 22 fds per CPU. When we run out
  1128. * of them try to increase the limits.
  1129. */
  1130. if (err == -EMFILE && set_rlimit < INCREASED_MAX) {
  1131. struct rlimit l;
  1132. int old_errno = errno;
  1133. if (getrlimit(RLIMIT_NOFILE, &l) == 0) {
  1134. if (set_rlimit == NO_CHANGE)
  1135. l.rlim_cur = l.rlim_max;
  1136. else {
  1137. l.rlim_cur = l.rlim_max + 1000;
  1138. l.rlim_max = l.rlim_cur;
  1139. }
  1140. if (setrlimit(RLIMIT_NOFILE, &l) == 0) {
  1141. set_rlimit++;
  1142. errno = old_errno;
  1143. goto retry_open;
  1144. }
  1145. }
  1146. errno = old_errno;
  1147. }
  1148. if (err != -EINVAL || cpu > 0 || thread > 0)
  1149. goto out_close;
  1150. /*
  1151. * Must probe features in the order they were added to the
  1152. * perf_event_attr interface.
  1153. */
  1154. if (!perf_missing_features.clockid_wrong && evsel->attr.use_clockid) {
  1155. perf_missing_features.clockid_wrong = true;
  1156. goto fallback_missing_features;
  1157. } else if (!perf_missing_features.clockid && evsel->attr.use_clockid) {
  1158. perf_missing_features.clockid = true;
  1159. goto fallback_missing_features;
  1160. } else if (!perf_missing_features.cloexec && (flags & PERF_FLAG_FD_CLOEXEC)) {
  1161. perf_missing_features.cloexec = true;
  1162. goto fallback_missing_features;
  1163. } else if (!perf_missing_features.mmap2 && evsel->attr.mmap2) {
  1164. perf_missing_features.mmap2 = true;
  1165. goto fallback_missing_features;
  1166. } else if (!perf_missing_features.exclude_guest &&
  1167. (evsel->attr.exclude_guest || evsel->attr.exclude_host)) {
  1168. perf_missing_features.exclude_guest = true;
  1169. goto fallback_missing_features;
  1170. } else if (!perf_missing_features.sample_id_all) {
  1171. perf_missing_features.sample_id_all = true;
  1172. goto retry_sample_id;
  1173. }
  1174. out_close:
  1175. do {
  1176. while (--thread >= 0) {
  1177. close(FD(evsel, cpu, thread));
  1178. FD(evsel, cpu, thread) = -1;
  1179. }
  1180. thread = nthreads;
  1181. } while (--cpu >= 0);
  1182. return err;
  1183. }
  1184. void perf_evsel__close(struct perf_evsel *evsel, int ncpus, int nthreads)
  1185. {
  1186. if (evsel->fd == NULL)
  1187. return;
  1188. perf_evsel__close_fd(evsel, ncpus, nthreads);
  1189. perf_evsel__free_fd(evsel);
  1190. }
  1191. static struct {
  1192. struct cpu_map map;
  1193. int cpus[1];
  1194. } empty_cpu_map = {
  1195. .map.nr = 1,
  1196. .cpus = { -1, },
  1197. };
  1198. static struct {
  1199. struct thread_map map;
  1200. int threads[1];
  1201. } empty_thread_map = {
  1202. .map.nr = 1,
  1203. .threads = { -1, },
  1204. };
  1205. int perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus,
  1206. struct thread_map *threads)
  1207. {
  1208. if (cpus == NULL) {
  1209. /* Work around old compiler warnings about strict aliasing */
  1210. cpus = &empty_cpu_map.map;
  1211. }
  1212. if (threads == NULL)
  1213. threads = &empty_thread_map.map;
  1214. return __perf_evsel__open(evsel, cpus, threads);
  1215. }
  1216. int perf_evsel__open_per_cpu(struct perf_evsel *evsel,
  1217. struct cpu_map *cpus)
  1218. {
  1219. return __perf_evsel__open(evsel, cpus, &empty_thread_map.map);
  1220. }
  1221. int perf_evsel__open_per_thread(struct perf_evsel *evsel,
  1222. struct thread_map *threads)
  1223. {
  1224. return __perf_evsel__open(evsel, &empty_cpu_map.map, threads);
  1225. }
  1226. static int perf_evsel__parse_id_sample(const struct perf_evsel *evsel,
  1227. const union perf_event *event,
  1228. struct perf_sample *sample)
  1229. {
  1230. u64 type = evsel->attr.sample_type;
  1231. const u64 *array = event->sample.array;
  1232. bool swapped = evsel->needs_swap;
  1233. union u64_swap u;
  1234. array += ((event->header.size -
  1235. sizeof(event->header)) / sizeof(u64)) - 1;
  1236. if (type & PERF_SAMPLE_IDENTIFIER) {
  1237. sample->id = *array;
  1238. array--;
  1239. }
  1240. if (type & PERF_SAMPLE_CPU) {
  1241. u.val64 = *array;
  1242. if (swapped) {
  1243. /* undo swap of u64, then swap on individual u32s */
  1244. u.val64 = bswap_64(u.val64);
  1245. u.val32[0] = bswap_32(u.val32[0]);
  1246. }
  1247. sample->cpu = u.val32[0];
  1248. array--;
  1249. }
  1250. if (type & PERF_SAMPLE_STREAM_ID) {
  1251. sample->stream_id = *array;
  1252. array--;
  1253. }
  1254. if (type & PERF_SAMPLE_ID) {
  1255. sample->id = *array;
  1256. array--;
  1257. }
  1258. if (type & PERF_SAMPLE_TIME) {
  1259. sample->time = *array;
  1260. array--;
  1261. }
  1262. if (type & PERF_SAMPLE_TID) {
  1263. u.val64 = *array;
  1264. if (swapped) {
  1265. /* undo swap of u64, then swap on individual u32s */
  1266. u.val64 = bswap_64(u.val64);
  1267. u.val32[0] = bswap_32(u.val32[0]);
  1268. u.val32[1] = bswap_32(u.val32[1]);
  1269. }
  1270. sample->pid = u.val32[0];
  1271. sample->tid = u.val32[1];
  1272. array--;
  1273. }
  1274. return 0;
  1275. }
  1276. static inline bool overflow(const void *endp, u16 max_size, const void *offset,
  1277. u64 size)
  1278. {
  1279. return size > max_size || offset + size > endp;
  1280. }
  1281. #define OVERFLOW_CHECK(offset, size, max_size) \
  1282. do { \
  1283. if (overflow(endp, (max_size), (offset), (size))) \
  1284. return -EFAULT; \
  1285. } while (0)
  1286. #define OVERFLOW_CHECK_u64(offset) \
  1287. OVERFLOW_CHECK(offset, sizeof(u64), sizeof(u64))
  1288. int perf_evsel__parse_sample(struct perf_evsel *evsel, union perf_event *event,
  1289. struct perf_sample *data)
  1290. {
  1291. u64 type = evsel->attr.sample_type;
  1292. bool swapped = evsel->needs_swap;
  1293. const u64 *array;
  1294. u16 max_size = event->header.size;
  1295. const void *endp = (void *)event + max_size;
  1296. u64 sz;
  1297. /*
  1298. * used for cross-endian analysis. See git commit 65014ab3
  1299. * for why this goofiness is needed.
  1300. */
  1301. union u64_swap u;
  1302. memset(data, 0, sizeof(*data));
  1303. data->cpu = data->pid = data->tid = -1;
  1304. data->stream_id = data->id = data->time = -1ULL;
  1305. data->period = evsel->attr.sample_period;
  1306. data->weight = 0;
  1307. if (event->header.type != PERF_RECORD_SAMPLE) {
  1308. if (!evsel->attr.sample_id_all)
  1309. return 0;
  1310. return perf_evsel__parse_id_sample(evsel, event, data);
  1311. }
  1312. array = event->sample.array;
  1313. /*
  1314. * The evsel's sample_size is based on PERF_SAMPLE_MASK which includes
  1315. * up to PERF_SAMPLE_PERIOD. After that overflow() must be used to
  1316. * check the format does not go past the end of the event.
  1317. */
  1318. if (evsel->sample_size + sizeof(event->header) > event->header.size)
  1319. return -EFAULT;
  1320. data->id = -1ULL;
  1321. if (type & PERF_SAMPLE_IDENTIFIER) {
  1322. data->id = *array;
  1323. array++;
  1324. }
  1325. if (type & PERF_SAMPLE_IP) {
  1326. data->ip = *array;
  1327. array++;
  1328. }
  1329. if (type & PERF_SAMPLE_TID) {
  1330. u.val64 = *array;
  1331. if (swapped) {
  1332. /* undo swap of u64, then swap on individual u32s */
  1333. u.val64 = bswap_64(u.val64);
  1334. u.val32[0] = bswap_32(u.val32[0]);
  1335. u.val32[1] = bswap_32(u.val32[1]);
  1336. }
  1337. data->pid = u.val32[0];
  1338. data->tid = u.val32[1];
  1339. array++;
  1340. }
  1341. if (type & PERF_SAMPLE_TIME) {
  1342. data->time = *array;
  1343. array++;
  1344. }
  1345. data->addr = 0;
  1346. if (type & PERF_SAMPLE_ADDR) {
  1347. data->addr = *array;
  1348. array++;
  1349. }
  1350. if (type & PERF_SAMPLE_ID) {
  1351. data->id = *array;
  1352. array++;
  1353. }
  1354. if (type & PERF_SAMPLE_STREAM_ID) {
  1355. data->stream_id = *array;
  1356. array++;
  1357. }
  1358. if (type & PERF_SAMPLE_CPU) {
  1359. u.val64 = *array;
  1360. if (swapped) {
  1361. /* undo swap of u64, then swap on individual u32s */
  1362. u.val64 = bswap_64(u.val64);
  1363. u.val32[0] = bswap_32(u.val32[0]);
  1364. }
  1365. data->cpu = u.val32[0];
  1366. array++;
  1367. }
  1368. if (type & PERF_SAMPLE_PERIOD) {
  1369. data->period = *array;
  1370. array++;
  1371. }
  1372. if (type & PERF_SAMPLE_READ) {
  1373. u64 read_format = evsel->attr.read_format;
  1374. OVERFLOW_CHECK_u64(array);
  1375. if (read_format & PERF_FORMAT_GROUP)
  1376. data->read.group.nr = *array;
  1377. else
  1378. data->read.one.value = *array;
  1379. array++;
  1380. if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
  1381. OVERFLOW_CHECK_u64(array);
  1382. data->read.time_enabled = *array;
  1383. array++;
  1384. }
  1385. if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
  1386. OVERFLOW_CHECK_u64(array);
  1387. data->read.time_running = *array;
  1388. array++;
  1389. }
  1390. /* PERF_FORMAT_ID is forced for PERF_SAMPLE_READ */
  1391. if (read_format & PERF_FORMAT_GROUP) {
  1392. const u64 max_group_nr = UINT64_MAX /
  1393. sizeof(struct sample_read_value);
  1394. if (data->read.group.nr > max_group_nr)
  1395. return -EFAULT;
  1396. sz = data->read.group.nr *
  1397. sizeof(struct sample_read_value);
  1398. OVERFLOW_CHECK(array, sz, max_size);
  1399. data->read.group.values =
  1400. (struct sample_read_value *)array;
  1401. array = (void *)array + sz;
  1402. } else {
  1403. OVERFLOW_CHECK_u64(array);
  1404. data->read.one.id = *array;
  1405. array++;
  1406. }
  1407. }
  1408. if (type & PERF_SAMPLE_CALLCHAIN) {
  1409. const u64 max_callchain_nr = UINT64_MAX / sizeof(u64);
  1410. OVERFLOW_CHECK_u64(array);
  1411. data->callchain = (struct ip_callchain *)array++;
  1412. if (data->callchain->nr > max_callchain_nr)
  1413. return -EFAULT;
  1414. sz = data->callchain->nr * sizeof(u64);
  1415. OVERFLOW_CHECK(array, sz, max_size);
  1416. array = (void *)array + sz;
  1417. }
  1418. if (type & PERF_SAMPLE_RAW) {
  1419. OVERFLOW_CHECK_u64(array);
  1420. u.val64 = *array;
  1421. if (WARN_ONCE(swapped,
  1422. "Endianness of raw data not corrected!\n")) {
  1423. /* undo swap of u64, then swap on individual u32s */
  1424. u.val64 = bswap_64(u.val64);
  1425. u.val32[0] = bswap_32(u.val32[0]);
  1426. u.val32[1] = bswap_32(u.val32[1]);
  1427. }
  1428. data->raw_size = u.val32[0];
  1429. array = (void *)array + sizeof(u32);
  1430. OVERFLOW_CHECK(array, data->raw_size, max_size);
  1431. data->raw_data = (void *)array;
  1432. array = (void *)array + data->raw_size;
  1433. }
  1434. if (type & PERF_SAMPLE_BRANCH_STACK) {
  1435. const u64 max_branch_nr = UINT64_MAX /
  1436. sizeof(struct branch_entry);
  1437. OVERFLOW_CHECK_u64(array);
  1438. data->branch_stack = (struct branch_stack *)array++;
  1439. if (data->branch_stack->nr > max_branch_nr)
  1440. return -EFAULT;
  1441. sz = data->branch_stack->nr * sizeof(struct branch_entry);
  1442. OVERFLOW_CHECK(array, sz, max_size);
  1443. array = (void *)array + sz;
  1444. }
  1445. if (type & PERF_SAMPLE_REGS_USER) {
  1446. OVERFLOW_CHECK_u64(array);
  1447. data->user_regs.abi = *array;
  1448. array++;
  1449. if (data->user_regs.abi) {
  1450. u64 mask = evsel->attr.sample_regs_user;
  1451. sz = hweight_long(mask) * sizeof(u64);
  1452. OVERFLOW_CHECK(array, sz, max_size);
  1453. data->user_regs.mask = mask;
  1454. data->user_regs.regs = (u64 *)array;
  1455. array = (void *)array + sz;
  1456. }
  1457. }
  1458. if (type & PERF_SAMPLE_STACK_USER) {
  1459. OVERFLOW_CHECK_u64(array);
  1460. sz = *array++;
  1461. data->user_stack.offset = ((char *)(array - 1)
  1462. - (char *) event);
  1463. if (!sz) {
  1464. data->user_stack.size = 0;
  1465. } else {
  1466. OVERFLOW_CHECK(array, sz, max_size);
  1467. data->user_stack.data = (char *)array;
  1468. array = (void *)array + sz;
  1469. OVERFLOW_CHECK_u64(array);
  1470. data->user_stack.size = *array++;
  1471. if (WARN_ONCE(data->user_stack.size > sz,
  1472. "user stack dump failure\n"))
  1473. return -EFAULT;
  1474. }
  1475. }
  1476. data->weight = 0;
  1477. if (type & PERF_SAMPLE_WEIGHT) {
  1478. OVERFLOW_CHECK_u64(array);
  1479. data->weight = *array;
  1480. array++;
  1481. }
  1482. data->data_src = PERF_MEM_DATA_SRC_NONE;
  1483. if (type & PERF_SAMPLE_DATA_SRC) {
  1484. OVERFLOW_CHECK_u64(array);
  1485. data->data_src = *array;
  1486. array++;
  1487. }
  1488. data->transaction = 0;
  1489. if (type & PERF_SAMPLE_TRANSACTION) {
  1490. OVERFLOW_CHECK_u64(array);
  1491. data->transaction = *array;
  1492. array++;
  1493. }
  1494. data->intr_regs.abi = PERF_SAMPLE_REGS_ABI_NONE;
  1495. if (type & PERF_SAMPLE_REGS_INTR) {
  1496. OVERFLOW_CHECK_u64(array);
  1497. data->intr_regs.abi = *array;
  1498. array++;
  1499. if (data->intr_regs.abi != PERF_SAMPLE_REGS_ABI_NONE) {
  1500. u64 mask = evsel->attr.sample_regs_intr;
  1501. sz = hweight_long(mask) * sizeof(u64);
  1502. OVERFLOW_CHECK(array, sz, max_size);
  1503. data->intr_regs.mask = mask;
  1504. data->intr_regs.regs = (u64 *)array;
  1505. array = (void *)array + sz;
  1506. }
  1507. }
  1508. return 0;
  1509. }
  1510. size_t perf_event__sample_event_size(const struct perf_sample *sample, u64 type,
  1511. u64 read_format)
  1512. {
  1513. size_t sz, result = sizeof(struct sample_event);
  1514. if (type & PERF_SAMPLE_IDENTIFIER)
  1515. result += sizeof(u64);
  1516. if (type & PERF_SAMPLE_IP)
  1517. result += sizeof(u64);
  1518. if (type & PERF_SAMPLE_TID)
  1519. result += sizeof(u64);
  1520. if (type & PERF_SAMPLE_TIME)
  1521. result += sizeof(u64);
  1522. if (type & PERF_SAMPLE_ADDR)
  1523. result += sizeof(u64);
  1524. if (type & PERF_SAMPLE_ID)
  1525. result += sizeof(u64);
  1526. if (type & PERF_SAMPLE_STREAM_ID)
  1527. result += sizeof(u64);
  1528. if (type & PERF_SAMPLE_CPU)
  1529. result += sizeof(u64);
  1530. if (type & PERF_SAMPLE_PERIOD)
  1531. result += sizeof(u64);
  1532. if (type & PERF_SAMPLE_READ) {
  1533. result += sizeof(u64);
  1534. if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
  1535. result += sizeof(u64);
  1536. if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
  1537. result += sizeof(u64);
  1538. /* PERF_FORMAT_ID is forced for PERF_SAMPLE_READ */
  1539. if (read_format & PERF_FORMAT_GROUP) {
  1540. sz = sample->read.group.nr *
  1541. sizeof(struct sample_read_value);
  1542. result += sz;
  1543. } else {
  1544. result += sizeof(u64);
  1545. }
  1546. }
  1547. if (type & PERF_SAMPLE_CALLCHAIN) {
  1548. sz = (sample->callchain->nr + 1) * sizeof(u64);
  1549. result += sz;
  1550. }
  1551. if (type & PERF_SAMPLE_RAW) {
  1552. result += sizeof(u32);
  1553. result += sample->raw_size;
  1554. }
  1555. if (type & PERF_SAMPLE_BRANCH_STACK) {
  1556. sz = sample->branch_stack->nr * sizeof(struct branch_entry);
  1557. sz += sizeof(u64);
  1558. result += sz;
  1559. }
  1560. if (type & PERF_SAMPLE_REGS_USER) {
  1561. if (sample->user_regs.abi) {
  1562. result += sizeof(u64);
  1563. sz = hweight_long(sample->user_regs.mask) * sizeof(u64);
  1564. result += sz;
  1565. } else {
  1566. result += sizeof(u64);
  1567. }
  1568. }
  1569. if (type & PERF_SAMPLE_STACK_USER) {
  1570. sz = sample->user_stack.size;
  1571. result += sizeof(u64);
  1572. if (sz) {
  1573. result += sz;
  1574. result += sizeof(u64);
  1575. }
  1576. }
  1577. if (type & PERF_SAMPLE_WEIGHT)
  1578. result += sizeof(u64);
  1579. if (type & PERF_SAMPLE_DATA_SRC)
  1580. result += sizeof(u64);
  1581. if (type & PERF_SAMPLE_TRANSACTION)
  1582. result += sizeof(u64);
  1583. if (type & PERF_SAMPLE_REGS_INTR) {
  1584. if (sample->intr_regs.abi) {
  1585. result += sizeof(u64);
  1586. sz = hweight_long(sample->intr_regs.mask) * sizeof(u64);
  1587. result += sz;
  1588. } else {
  1589. result += sizeof(u64);
  1590. }
  1591. }
  1592. return result;
  1593. }
  1594. int perf_event__synthesize_sample(union perf_event *event, u64 type,
  1595. u64 read_format,
  1596. const struct perf_sample *sample,
  1597. bool swapped)
  1598. {
  1599. u64 *array;
  1600. size_t sz;
  1601. /*
  1602. * used for cross-endian analysis. See git commit 65014ab3
  1603. * for why this goofiness is needed.
  1604. */
  1605. union u64_swap u;
  1606. array = event->sample.array;
  1607. if (type & PERF_SAMPLE_IDENTIFIER) {
  1608. *array = sample->id;
  1609. array++;
  1610. }
  1611. if (type & PERF_SAMPLE_IP) {
  1612. *array = sample->ip;
  1613. array++;
  1614. }
  1615. if (type & PERF_SAMPLE_TID) {
  1616. u.val32[0] = sample->pid;
  1617. u.val32[1] = sample->tid;
  1618. if (swapped) {
  1619. /*
  1620. * Inverse of what is done in perf_evsel__parse_sample
  1621. */
  1622. u.val32[0] = bswap_32(u.val32[0]);
  1623. u.val32[1] = bswap_32(u.val32[1]);
  1624. u.val64 = bswap_64(u.val64);
  1625. }
  1626. *array = u.val64;
  1627. array++;
  1628. }
  1629. if (type & PERF_SAMPLE_TIME) {
  1630. *array = sample->time;
  1631. array++;
  1632. }
  1633. if (type & PERF_SAMPLE_ADDR) {
  1634. *array = sample->addr;
  1635. array++;
  1636. }
  1637. if (type & PERF_SAMPLE_ID) {
  1638. *array = sample->id;
  1639. array++;
  1640. }
  1641. if (type & PERF_SAMPLE_STREAM_ID) {
  1642. *array = sample->stream_id;
  1643. array++;
  1644. }
  1645. if (type & PERF_SAMPLE_CPU) {
  1646. u.val32[0] = sample->cpu;
  1647. if (swapped) {
  1648. /*
  1649. * Inverse of what is done in perf_evsel__parse_sample
  1650. */
  1651. u.val32[0] = bswap_32(u.val32[0]);
  1652. u.val64 = bswap_64(u.val64);
  1653. }
  1654. *array = u.val64;
  1655. array++;
  1656. }
  1657. if (type & PERF_SAMPLE_PERIOD) {
  1658. *array = sample->period;
  1659. array++;
  1660. }
  1661. if (type & PERF_SAMPLE_READ) {
  1662. if (read_format & PERF_FORMAT_GROUP)
  1663. *array = sample->read.group.nr;
  1664. else
  1665. *array = sample->read.one.value;
  1666. array++;
  1667. if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
  1668. *array = sample->read.time_enabled;
  1669. array++;
  1670. }
  1671. if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
  1672. *array = sample->read.time_running;
  1673. array++;
  1674. }
  1675. /* PERF_FORMAT_ID is forced for PERF_SAMPLE_READ */
  1676. if (read_format & PERF_FORMAT_GROUP) {
  1677. sz = sample->read.group.nr *
  1678. sizeof(struct sample_read_value);
  1679. memcpy(array, sample->read.group.values, sz);
  1680. array = (void *)array + sz;
  1681. } else {
  1682. *array = sample->read.one.id;
  1683. array++;
  1684. }
  1685. }
  1686. if (type & PERF_SAMPLE_CALLCHAIN) {
  1687. sz = (sample->callchain->nr + 1) * sizeof(u64);
  1688. memcpy(array, sample->callchain, sz);
  1689. array = (void *)array + sz;
  1690. }
  1691. if (type & PERF_SAMPLE_RAW) {
  1692. u.val32[0] = sample->raw_size;
  1693. if (WARN_ONCE(swapped,
  1694. "Endianness of raw data not corrected!\n")) {
  1695. /*
  1696. * Inverse of what is done in perf_evsel__parse_sample
  1697. */
  1698. u.val32[0] = bswap_32(u.val32[0]);
  1699. u.val32[1] = bswap_32(u.val32[1]);
  1700. u.val64 = bswap_64(u.val64);
  1701. }
  1702. *array = u.val64;
  1703. array = (void *)array + sizeof(u32);
  1704. memcpy(array, sample->raw_data, sample->raw_size);
  1705. array = (void *)array + sample->raw_size;
  1706. }
  1707. if (type & PERF_SAMPLE_BRANCH_STACK) {
  1708. sz = sample->branch_stack->nr * sizeof(struct branch_entry);
  1709. sz += sizeof(u64);
  1710. memcpy(array, sample->branch_stack, sz);
  1711. array = (void *)array + sz;
  1712. }
  1713. if (type & PERF_SAMPLE_REGS_USER) {
  1714. if (sample->user_regs.abi) {
  1715. *array++ = sample->user_regs.abi;
  1716. sz = hweight_long(sample->user_regs.mask) * sizeof(u64);
  1717. memcpy(array, sample->user_regs.regs, sz);
  1718. array = (void *)array + sz;
  1719. } else {
  1720. *array++ = 0;
  1721. }
  1722. }
  1723. if (type & PERF_SAMPLE_STACK_USER) {
  1724. sz = sample->user_stack.size;
  1725. *array++ = sz;
  1726. if (sz) {
  1727. memcpy(array, sample->user_stack.data, sz);
  1728. array = (void *)array + sz;
  1729. *array++ = sz;
  1730. }
  1731. }
  1732. if (type & PERF_SAMPLE_WEIGHT) {
  1733. *array = sample->weight;
  1734. array++;
  1735. }
  1736. if (type & PERF_SAMPLE_DATA_SRC) {
  1737. *array = sample->data_src;
  1738. array++;
  1739. }
  1740. if (type & PERF_SAMPLE_TRANSACTION) {
  1741. *array = sample->transaction;
  1742. array++;
  1743. }
  1744. if (type & PERF_SAMPLE_REGS_INTR) {
  1745. if (sample->intr_regs.abi) {
  1746. *array++ = sample->intr_regs.abi;
  1747. sz = hweight_long(sample->intr_regs.mask) * sizeof(u64);
  1748. memcpy(array, sample->intr_regs.regs, sz);
  1749. array = (void *)array + sz;
  1750. } else {
  1751. *array++ = 0;
  1752. }
  1753. }
  1754. return 0;
  1755. }
  1756. struct format_field *perf_evsel__field(struct perf_evsel *evsel, const char *name)
  1757. {
  1758. return pevent_find_field(evsel->tp_format, name);
  1759. }
  1760. void *perf_evsel__rawptr(struct perf_evsel *evsel, struct perf_sample *sample,
  1761. const char *name)
  1762. {
  1763. struct format_field *field = perf_evsel__field(evsel, name);
  1764. int offset;
  1765. if (!field)
  1766. return NULL;
  1767. offset = field->offset;
  1768. if (field->flags & FIELD_IS_DYNAMIC) {
  1769. offset = *(int *)(sample->raw_data + field->offset);
  1770. offset &= 0xffff;
  1771. }
  1772. return sample->raw_data + offset;
  1773. }
  1774. u64 perf_evsel__intval(struct perf_evsel *evsel, struct perf_sample *sample,
  1775. const char *name)
  1776. {
  1777. struct format_field *field = perf_evsel__field(evsel, name);
  1778. void *ptr;
  1779. u64 value;
  1780. if (!field)
  1781. return 0;
  1782. ptr = sample->raw_data + field->offset;
  1783. switch (field->size) {
  1784. case 1:
  1785. return *(u8 *)ptr;
  1786. case 2:
  1787. value = *(u16 *)ptr;
  1788. break;
  1789. case 4:
  1790. value = *(u32 *)ptr;
  1791. break;
  1792. case 8:
  1793. memcpy(&value, ptr, sizeof(u64));
  1794. break;
  1795. default:
  1796. return 0;
  1797. }
  1798. if (!evsel->needs_swap)
  1799. return value;
  1800. switch (field->size) {
  1801. case 2:
  1802. return bswap_16(value);
  1803. case 4:
  1804. return bswap_32(value);
  1805. case 8:
  1806. return bswap_64(value);
  1807. default:
  1808. return 0;
  1809. }
  1810. return 0;
  1811. }
  1812. static int comma_fprintf(FILE *fp, bool *first, const char *fmt, ...)
  1813. {
  1814. va_list args;
  1815. int ret = 0;
  1816. if (!*first) {
  1817. ret += fprintf(fp, ",");
  1818. } else {
  1819. ret += fprintf(fp, ":");
  1820. *first = false;
  1821. }
  1822. va_start(args, fmt);
  1823. ret += vfprintf(fp, fmt, args);
  1824. va_end(args);
  1825. return ret;
  1826. }
  1827. static int __print_attr__fprintf(FILE *fp, const char *name, const char *val, void *priv)
  1828. {
  1829. return comma_fprintf(fp, (bool *)priv, " %s: %s", name, val);
  1830. }
  1831. int perf_evsel__fprintf(struct perf_evsel *evsel,
  1832. struct perf_attr_details *details, FILE *fp)
  1833. {
  1834. bool first = true;
  1835. int printed = 0;
  1836. if (details->event_group) {
  1837. struct perf_evsel *pos;
  1838. if (!perf_evsel__is_group_leader(evsel))
  1839. return 0;
  1840. if (evsel->nr_members > 1)
  1841. printed += fprintf(fp, "%s{", evsel->group_name ?: "");
  1842. printed += fprintf(fp, "%s", perf_evsel__name(evsel));
  1843. for_each_group_member(pos, evsel)
  1844. printed += fprintf(fp, ",%s", perf_evsel__name(pos));
  1845. if (evsel->nr_members > 1)
  1846. printed += fprintf(fp, "}");
  1847. goto out;
  1848. }
  1849. printed += fprintf(fp, "%s", perf_evsel__name(evsel));
  1850. if (details->verbose) {
  1851. printed += perf_event_attr__fprintf(fp, &evsel->attr,
  1852. __print_attr__fprintf, &first);
  1853. } else if (details->freq) {
  1854. const char *term = "sample_freq";
  1855. if (!evsel->attr.freq)
  1856. term = "sample_period";
  1857. printed += comma_fprintf(fp, &first, " %s=%" PRIu64,
  1858. term, (u64)evsel->attr.sample_freq);
  1859. }
  1860. out:
  1861. fputc('\n', fp);
  1862. return ++printed;
  1863. }
  1864. bool perf_evsel__fallback(struct perf_evsel *evsel, int err,
  1865. char *msg, size_t msgsize)
  1866. {
  1867. if ((err == ENOENT || err == ENXIO || err == ENODEV) &&
  1868. evsel->attr.type == PERF_TYPE_HARDWARE &&
  1869. evsel->attr.config == PERF_COUNT_HW_CPU_CYCLES) {
  1870. /*
  1871. * If it's cycles then fall back to hrtimer based
  1872. * cpu-clock-tick sw counter, which is always available even if
  1873. * no PMU support.
  1874. *
  1875. * PPC returns ENXIO until 2.6.37 (behavior changed with commit
  1876. * b0a873e).
  1877. */
  1878. scnprintf(msg, msgsize, "%s",
  1879. "The cycles event is not supported, trying to fall back to cpu-clock-ticks");
  1880. evsel->attr.type = PERF_TYPE_SOFTWARE;
  1881. evsel->attr.config = PERF_COUNT_SW_CPU_CLOCK;
  1882. zfree(&evsel->name);
  1883. return true;
  1884. }
  1885. return false;
  1886. }
  1887. int perf_evsel__open_strerror(struct perf_evsel *evsel, struct target *target,
  1888. int err, char *msg, size_t size)
  1889. {
  1890. char sbuf[STRERR_BUFSIZE];
  1891. switch (err) {
  1892. case EPERM:
  1893. case EACCES:
  1894. return scnprintf(msg, size,
  1895. "You may not have permission to collect %sstats.\n"
  1896. "Consider tweaking /proc/sys/kernel/perf_event_paranoid:\n"
  1897. " -1 - Not paranoid at all\n"
  1898. " 0 - Disallow raw tracepoint access for unpriv\n"
  1899. " 1 - Disallow cpu events for unpriv\n"
  1900. " 2 - Disallow kernel profiling for unpriv",
  1901. target->system_wide ? "system-wide " : "");
  1902. case ENOENT:
  1903. return scnprintf(msg, size, "The %s event is not supported.",
  1904. perf_evsel__name(evsel));
  1905. case EMFILE:
  1906. return scnprintf(msg, size, "%s",
  1907. "Too many events are opened.\n"
  1908. "Probably the maximum number of open file descriptors has been reached.\n"
  1909. "Hint: Try again after reducing the number of events.\n"
  1910. "Hint: Try increasing the limit with 'ulimit -n <limit>'");
  1911. case ENODEV:
  1912. if (target->cpu_list)
  1913. return scnprintf(msg, size, "%s",
  1914. "No such device - did you specify an out-of-range profile CPU?\n");
  1915. break;
  1916. case EOPNOTSUPP:
  1917. if (evsel->attr.precise_ip)
  1918. return scnprintf(msg, size, "%s",
  1919. "\'precise\' request may not be supported. Try removing 'p' modifier.");
  1920. #if defined(__i386__) || defined(__x86_64__)
  1921. if (evsel->attr.type == PERF_TYPE_HARDWARE)
  1922. return scnprintf(msg, size, "%s",
  1923. "No hardware sampling interrupt available.\n"
  1924. "No APIC? If so then you can boot the kernel with the \"lapic\" boot parameter to force-enable it.");
  1925. #endif
  1926. break;
  1927. case EBUSY:
  1928. if (find_process("oprofiled"))
  1929. return scnprintf(msg, size,
  1930. "The PMU counters are busy/taken by another profiler.\n"
  1931. "We found oprofile daemon running, please stop it and try again.");
  1932. break;
  1933. case EINVAL:
  1934. if (perf_missing_features.clockid)
  1935. return scnprintf(msg, size, "clockid feature not supported.");
  1936. if (perf_missing_features.clockid_wrong)
  1937. return scnprintf(msg, size, "wrong clockid (%d).", clockid);
  1938. break;
  1939. default:
  1940. break;
  1941. }
  1942. return scnprintf(msg, size,
  1943. "The sys_perf_event_open() syscall returned with %d (%s) for event (%s).\n"
  1944. "/bin/dmesg may provide additional information.\n"
  1945. "No CONFIG_PERF_EVENTS=y kernel support configured?\n",
  1946. err, strerror_r(err, sbuf, sizeof(sbuf)),
  1947. perf_evsel__name(evsel));
  1948. }