builtin-trace.c 91 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217
  1. /*
  2. * builtin-trace.c
  3. *
  4. * Builtin 'trace' command:
  5. *
  6. * Display a continuously updated trace of any workload, CPU, specific PID,
  7. * system wide, etc. Default format is loosely strace like, but any other
  8. * event may be specified using --event.
  9. *
  10. * Copyright (C) 2012, 2013, 2014, 2015 Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
  11. *
  12. * Initially based on the 'trace' prototype by Thomas Gleixner:
  13. *
  14. * http://lwn.net/Articles/415728/ ("Announcing a new utility: 'trace'")
  15. *
  16. * Released under the GPL v2. (and only v2, not any later version)
  17. */
  18. #include <traceevent/event-parse.h>
  19. #include <api/fs/tracing_path.h>
  20. #include "builtin.h"
  21. #include "util/color.h"
  22. #include "util/debug.h"
  23. #include "util/evlist.h"
  24. #include <subcmd/exec-cmd.h>
  25. #include "util/machine.h"
  26. #include "util/session.h"
  27. #include "util/thread.h"
  28. #include <subcmd/parse-options.h>
  29. #include "util/strlist.h"
  30. #include "util/intlist.h"
  31. #include "util/thread_map.h"
  32. #include "util/stat.h"
  33. #include "trace-event.h"
  34. #include "util/parse-events.h"
  35. #include "util/bpf-loader.h"
  36. #include "callchain.h"
  37. #include "syscalltbl.h"
  38. #include "rb_resort.h"
  39. #include <libaudit.h> /* FIXME: Still needed for audit_errno_to_name */
  40. #include <stdlib.h>
  41. #include <linux/futex.h>
  42. #include <linux/err.h>
  43. #include <linux/seccomp.h>
  44. #include <linux/filter.h>
  45. #include <linux/audit.h>
  46. #include <sys/ptrace.h>
  47. #include <linux/random.h>
  48. #include <linux/stringify.h>
  49. #ifndef O_CLOEXEC
  50. # define O_CLOEXEC 02000000
  51. #endif
  52. struct trace {
  53. struct perf_tool tool;
  54. struct syscalltbl *sctbl;
  55. struct {
  56. int max;
  57. struct syscall *table;
  58. struct {
  59. struct perf_evsel *sys_enter,
  60. *sys_exit;
  61. } events;
  62. } syscalls;
  63. struct record_opts opts;
  64. struct perf_evlist *evlist;
  65. struct machine *host;
  66. struct thread *current;
  67. u64 base_time;
  68. FILE *output;
  69. unsigned long nr_events;
  70. struct strlist *ev_qualifier;
  71. struct {
  72. size_t nr;
  73. int *entries;
  74. } ev_qualifier_ids;
  75. struct intlist *tid_list;
  76. struct intlist *pid_list;
  77. struct {
  78. size_t nr;
  79. pid_t *entries;
  80. } filter_pids;
  81. double duration_filter;
  82. double runtime_ms;
  83. struct {
  84. u64 vfs_getname,
  85. proc_getname;
  86. } stats;
  87. unsigned int max_stack;
  88. unsigned int min_stack;
  89. bool not_ev_qualifier;
  90. bool live;
  91. bool full_time;
  92. bool sched;
  93. bool multiple_threads;
  94. bool summary;
  95. bool summary_only;
  96. bool show_comm;
  97. bool show_tool_stats;
  98. bool trace_syscalls;
  99. bool kernel_syscallchains;
  100. bool force;
  101. bool vfs_getname;
  102. int trace_pgfaults;
  103. int open_id;
  104. };
  105. struct tp_field {
  106. int offset;
  107. union {
  108. u64 (*integer)(struct tp_field *field, struct perf_sample *sample);
  109. void *(*pointer)(struct tp_field *field, struct perf_sample *sample);
  110. };
  111. };
  112. #define TP_UINT_FIELD(bits) \
  113. static u64 tp_field__u##bits(struct tp_field *field, struct perf_sample *sample) \
  114. { \
  115. u##bits value; \
  116. memcpy(&value, sample->raw_data + field->offset, sizeof(value)); \
  117. return value; \
  118. }
  119. TP_UINT_FIELD(8);
  120. TP_UINT_FIELD(16);
  121. TP_UINT_FIELD(32);
  122. TP_UINT_FIELD(64);
  123. #define TP_UINT_FIELD__SWAPPED(bits) \
  124. static u64 tp_field__swapped_u##bits(struct tp_field *field, struct perf_sample *sample) \
  125. { \
  126. u##bits value; \
  127. memcpy(&value, sample->raw_data + field->offset, sizeof(value)); \
  128. return bswap_##bits(value);\
  129. }
  130. TP_UINT_FIELD__SWAPPED(16);
  131. TP_UINT_FIELD__SWAPPED(32);
  132. TP_UINT_FIELD__SWAPPED(64);
  133. static int tp_field__init_uint(struct tp_field *field,
  134. struct format_field *format_field,
  135. bool needs_swap)
  136. {
  137. field->offset = format_field->offset;
  138. switch (format_field->size) {
  139. case 1:
  140. field->integer = tp_field__u8;
  141. break;
  142. case 2:
  143. field->integer = needs_swap ? tp_field__swapped_u16 : tp_field__u16;
  144. break;
  145. case 4:
  146. field->integer = needs_swap ? tp_field__swapped_u32 : tp_field__u32;
  147. break;
  148. case 8:
  149. field->integer = needs_swap ? tp_field__swapped_u64 : tp_field__u64;
  150. break;
  151. default:
  152. return -1;
  153. }
  154. return 0;
  155. }
  156. static void *tp_field__ptr(struct tp_field *field, struct perf_sample *sample)
  157. {
  158. return sample->raw_data + field->offset;
  159. }
  160. static int tp_field__init_ptr(struct tp_field *field, struct format_field *format_field)
  161. {
  162. field->offset = format_field->offset;
  163. field->pointer = tp_field__ptr;
  164. return 0;
  165. }
  166. struct syscall_tp {
  167. struct tp_field id;
  168. union {
  169. struct tp_field args, ret;
  170. };
  171. };
  172. static int perf_evsel__init_tp_uint_field(struct perf_evsel *evsel,
  173. struct tp_field *field,
  174. const char *name)
  175. {
  176. struct format_field *format_field = perf_evsel__field(evsel, name);
  177. if (format_field == NULL)
  178. return -1;
  179. return tp_field__init_uint(field, format_field, evsel->needs_swap);
  180. }
  181. #define perf_evsel__init_sc_tp_uint_field(evsel, name) \
  182. ({ struct syscall_tp *sc = evsel->priv;\
  183. perf_evsel__init_tp_uint_field(evsel, &sc->name, #name); })
  184. static int perf_evsel__init_tp_ptr_field(struct perf_evsel *evsel,
  185. struct tp_field *field,
  186. const char *name)
  187. {
  188. struct format_field *format_field = perf_evsel__field(evsel, name);
  189. if (format_field == NULL)
  190. return -1;
  191. return tp_field__init_ptr(field, format_field);
  192. }
  193. #define perf_evsel__init_sc_tp_ptr_field(evsel, name) \
  194. ({ struct syscall_tp *sc = evsel->priv;\
  195. perf_evsel__init_tp_ptr_field(evsel, &sc->name, #name); })
  196. static void perf_evsel__delete_priv(struct perf_evsel *evsel)
  197. {
  198. zfree(&evsel->priv);
  199. perf_evsel__delete(evsel);
  200. }
  201. static int perf_evsel__init_syscall_tp(struct perf_evsel *evsel, void *handler)
  202. {
  203. evsel->priv = malloc(sizeof(struct syscall_tp));
  204. if (evsel->priv != NULL) {
  205. if (perf_evsel__init_sc_tp_uint_field(evsel, id))
  206. goto out_delete;
  207. evsel->handler = handler;
  208. return 0;
  209. }
  210. return -ENOMEM;
  211. out_delete:
  212. zfree(&evsel->priv);
  213. return -ENOENT;
  214. }
  215. static struct perf_evsel *perf_evsel__syscall_newtp(const char *direction, void *handler)
  216. {
  217. struct perf_evsel *evsel = perf_evsel__newtp("raw_syscalls", direction);
  218. /* older kernel (e.g., RHEL6) use syscalls:{enter,exit} */
  219. if (IS_ERR(evsel))
  220. evsel = perf_evsel__newtp("syscalls", direction);
  221. if (IS_ERR(evsel))
  222. return NULL;
  223. if (perf_evsel__init_syscall_tp(evsel, handler))
  224. goto out_delete;
  225. return evsel;
  226. out_delete:
  227. perf_evsel__delete_priv(evsel);
  228. return NULL;
  229. }
  230. #define perf_evsel__sc_tp_uint(evsel, name, sample) \
  231. ({ struct syscall_tp *fields = evsel->priv; \
  232. fields->name.integer(&fields->name, sample); })
  233. #define perf_evsel__sc_tp_ptr(evsel, name, sample) \
  234. ({ struct syscall_tp *fields = evsel->priv; \
  235. fields->name.pointer(&fields->name, sample); })
  236. struct syscall_arg {
  237. unsigned long val;
  238. struct thread *thread;
  239. struct trace *trace;
  240. void *parm;
  241. u8 idx;
  242. u8 mask;
  243. };
  244. struct strarray {
  245. int offset;
  246. int nr_entries;
  247. const char **entries;
  248. };
  249. #define DEFINE_STRARRAY(array) struct strarray strarray__##array = { \
  250. .nr_entries = ARRAY_SIZE(array), \
  251. .entries = array, \
  252. }
  253. #define DEFINE_STRARRAY_OFFSET(array, off) struct strarray strarray__##array = { \
  254. .offset = off, \
  255. .nr_entries = ARRAY_SIZE(array), \
  256. .entries = array, \
  257. }
  258. static size_t __syscall_arg__scnprintf_strarray(char *bf, size_t size,
  259. const char *intfmt,
  260. struct syscall_arg *arg)
  261. {
  262. struct strarray *sa = arg->parm;
  263. int idx = arg->val - sa->offset;
  264. if (idx < 0 || idx >= sa->nr_entries)
  265. return scnprintf(bf, size, intfmt, arg->val);
  266. return scnprintf(bf, size, "%s", sa->entries[idx]);
  267. }
  268. static size_t syscall_arg__scnprintf_strarray(char *bf, size_t size,
  269. struct syscall_arg *arg)
  270. {
  271. return __syscall_arg__scnprintf_strarray(bf, size, "%d", arg);
  272. }
  273. #define SCA_STRARRAY syscall_arg__scnprintf_strarray
  274. #if defined(__i386__) || defined(__x86_64__)
  275. /*
  276. * FIXME: Make this available to all arches as soon as the ioctl beautifier
  277. * gets rewritten to support all arches.
  278. */
  279. static size_t syscall_arg__scnprintf_strhexarray(char *bf, size_t size,
  280. struct syscall_arg *arg)
  281. {
  282. return __syscall_arg__scnprintf_strarray(bf, size, "%#x", arg);
  283. }
  284. #define SCA_STRHEXARRAY syscall_arg__scnprintf_strhexarray
  285. #endif /* defined(__i386__) || defined(__x86_64__) */
  286. static size_t syscall_arg__scnprintf_fd(char *bf, size_t size,
  287. struct syscall_arg *arg);
  288. #define SCA_FD syscall_arg__scnprintf_fd
  289. static size_t syscall_arg__scnprintf_fd_at(char *bf, size_t size,
  290. struct syscall_arg *arg)
  291. {
  292. int fd = arg->val;
  293. if (fd == AT_FDCWD)
  294. return scnprintf(bf, size, "CWD");
  295. return syscall_arg__scnprintf_fd(bf, size, arg);
  296. }
  297. #define SCA_FDAT syscall_arg__scnprintf_fd_at
  298. static size_t syscall_arg__scnprintf_close_fd(char *bf, size_t size,
  299. struct syscall_arg *arg);
  300. #define SCA_CLOSE_FD syscall_arg__scnprintf_close_fd
  301. static size_t syscall_arg__scnprintf_hex(char *bf, size_t size,
  302. struct syscall_arg *arg)
  303. {
  304. return scnprintf(bf, size, "%#lx", arg->val);
  305. }
  306. #define SCA_HEX syscall_arg__scnprintf_hex
  307. static size_t syscall_arg__scnprintf_int(char *bf, size_t size,
  308. struct syscall_arg *arg)
  309. {
  310. return scnprintf(bf, size, "%d", arg->val);
  311. }
  312. #define SCA_INT syscall_arg__scnprintf_int
  313. static size_t syscall_arg__scnprintf_flock(char *bf, size_t size,
  314. struct syscall_arg *arg)
  315. {
  316. int printed = 0, op = arg->val;
  317. if (op == 0)
  318. return scnprintf(bf, size, "NONE");
  319. #define P_CMD(cmd) \
  320. if ((op & LOCK_##cmd) == LOCK_##cmd) { \
  321. printed += scnprintf(bf + printed, size - printed, "%s%s", printed ? "|" : "", #cmd); \
  322. op &= ~LOCK_##cmd; \
  323. }
  324. P_CMD(SH);
  325. P_CMD(EX);
  326. P_CMD(NB);
  327. P_CMD(UN);
  328. P_CMD(MAND);
  329. P_CMD(RW);
  330. P_CMD(READ);
  331. P_CMD(WRITE);
  332. #undef P_OP
  333. if (op)
  334. printed += scnprintf(bf + printed, size - printed, "%s%#x", printed ? "|" : "", op);
  335. return printed;
  336. }
  337. #define SCA_FLOCK syscall_arg__scnprintf_flock
  338. static size_t syscall_arg__scnprintf_futex_op(char *bf, size_t size, struct syscall_arg *arg)
  339. {
  340. enum syscall_futex_args {
  341. SCF_UADDR = (1 << 0),
  342. SCF_OP = (1 << 1),
  343. SCF_VAL = (1 << 2),
  344. SCF_TIMEOUT = (1 << 3),
  345. SCF_UADDR2 = (1 << 4),
  346. SCF_VAL3 = (1 << 5),
  347. };
  348. int op = arg->val;
  349. int cmd = op & FUTEX_CMD_MASK;
  350. size_t printed = 0;
  351. switch (cmd) {
  352. #define P_FUTEX_OP(n) case FUTEX_##n: printed = scnprintf(bf, size, #n);
  353. P_FUTEX_OP(WAIT); arg->mask |= SCF_VAL3|SCF_UADDR2; break;
  354. P_FUTEX_OP(WAKE); arg->mask |= SCF_VAL3|SCF_UADDR2|SCF_TIMEOUT; break;
  355. P_FUTEX_OP(FD); arg->mask |= SCF_VAL3|SCF_UADDR2|SCF_TIMEOUT; break;
  356. P_FUTEX_OP(REQUEUE); arg->mask |= SCF_VAL3|SCF_TIMEOUT; break;
  357. P_FUTEX_OP(CMP_REQUEUE); arg->mask |= SCF_TIMEOUT; break;
  358. P_FUTEX_OP(CMP_REQUEUE_PI); arg->mask |= SCF_TIMEOUT; break;
  359. P_FUTEX_OP(WAKE_OP); break;
  360. P_FUTEX_OP(LOCK_PI); arg->mask |= SCF_VAL3|SCF_UADDR2|SCF_TIMEOUT; break;
  361. P_FUTEX_OP(UNLOCK_PI); arg->mask |= SCF_VAL3|SCF_UADDR2|SCF_TIMEOUT; break;
  362. P_FUTEX_OP(TRYLOCK_PI); arg->mask |= SCF_VAL3|SCF_UADDR2; break;
  363. P_FUTEX_OP(WAIT_BITSET); arg->mask |= SCF_UADDR2; break;
  364. P_FUTEX_OP(WAKE_BITSET); arg->mask |= SCF_UADDR2; break;
  365. P_FUTEX_OP(WAIT_REQUEUE_PI); break;
  366. default: printed = scnprintf(bf, size, "%#x", cmd); break;
  367. }
  368. if (op & FUTEX_PRIVATE_FLAG)
  369. printed += scnprintf(bf + printed, size - printed, "|PRIV");
  370. if (op & FUTEX_CLOCK_REALTIME)
  371. printed += scnprintf(bf + printed, size - printed, "|CLKRT");
  372. return printed;
  373. }
  374. #define SCA_FUTEX_OP syscall_arg__scnprintf_futex_op
  375. static const char *bpf_cmd[] = {
  376. "MAP_CREATE", "MAP_LOOKUP_ELEM", "MAP_UPDATE_ELEM", "MAP_DELETE_ELEM",
  377. "MAP_GET_NEXT_KEY", "PROG_LOAD",
  378. };
  379. static DEFINE_STRARRAY(bpf_cmd);
  380. static const char *epoll_ctl_ops[] = { "ADD", "DEL", "MOD", };
  381. static DEFINE_STRARRAY_OFFSET(epoll_ctl_ops, 1);
  382. static const char *itimers[] = { "REAL", "VIRTUAL", "PROF", };
  383. static DEFINE_STRARRAY(itimers);
  384. static const char *keyctl_options[] = {
  385. "GET_KEYRING_ID", "JOIN_SESSION_KEYRING", "UPDATE", "REVOKE", "CHOWN",
  386. "SETPERM", "DESCRIBE", "CLEAR", "LINK", "UNLINK", "SEARCH", "READ",
  387. "INSTANTIATE", "NEGATE", "SET_REQKEY_KEYRING", "SET_TIMEOUT",
  388. "ASSUME_AUTHORITY", "GET_SECURITY", "SESSION_TO_PARENT", "REJECT",
  389. "INSTANTIATE_IOV", "INVALIDATE", "GET_PERSISTENT",
  390. };
  391. static DEFINE_STRARRAY(keyctl_options);
  392. static const char *whences[] = { "SET", "CUR", "END",
  393. #ifdef SEEK_DATA
  394. "DATA",
  395. #endif
  396. #ifdef SEEK_HOLE
  397. "HOLE",
  398. #endif
  399. };
  400. static DEFINE_STRARRAY(whences);
  401. static const char *fcntl_cmds[] = {
  402. "DUPFD", "GETFD", "SETFD", "GETFL", "SETFL", "GETLK", "SETLK",
  403. "SETLKW", "SETOWN", "GETOWN", "SETSIG", "GETSIG", "F_GETLK64",
  404. "F_SETLK64", "F_SETLKW64", "F_SETOWN_EX", "F_GETOWN_EX",
  405. "F_GETOWNER_UIDS",
  406. };
  407. static DEFINE_STRARRAY(fcntl_cmds);
  408. static const char *rlimit_resources[] = {
  409. "CPU", "FSIZE", "DATA", "STACK", "CORE", "RSS", "NPROC", "NOFILE",
  410. "MEMLOCK", "AS", "LOCKS", "SIGPENDING", "MSGQUEUE", "NICE", "RTPRIO",
  411. "RTTIME",
  412. };
  413. static DEFINE_STRARRAY(rlimit_resources);
  414. static const char *sighow[] = { "BLOCK", "UNBLOCK", "SETMASK", };
  415. static DEFINE_STRARRAY(sighow);
  416. static const char *clockid[] = {
  417. "REALTIME", "MONOTONIC", "PROCESS_CPUTIME_ID", "THREAD_CPUTIME_ID",
  418. "MONOTONIC_RAW", "REALTIME_COARSE", "MONOTONIC_COARSE", "BOOTTIME",
  419. "REALTIME_ALARM", "BOOTTIME_ALARM", "SGI_CYCLE", "TAI"
  420. };
  421. static DEFINE_STRARRAY(clockid);
  422. static const char *socket_families[] = {
  423. "UNSPEC", "LOCAL", "INET", "AX25", "IPX", "APPLETALK", "NETROM",
  424. "BRIDGE", "ATMPVC", "X25", "INET6", "ROSE", "DECnet", "NETBEUI",
  425. "SECURITY", "KEY", "NETLINK", "PACKET", "ASH", "ECONET", "ATMSVC",
  426. "RDS", "SNA", "IRDA", "PPPOX", "WANPIPE", "LLC", "IB", "CAN", "TIPC",
  427. "BLUETOOTH", "IUCV", "RXRPC", "ISDN", "PHONET", "IEEE802154", "CAIF",
  428. "ALG", "NFC", "VSOCK",
  429. };
  430. static DEFINE_STRARRAY(socket_families);
  431. static size_t syscall_arg__scnprintf_access_mode(char *bf, size_t size,
  432. struct syscall_arg *arg)
  433. {
  434. size_t printed = 0;
  435. int mode = arg->val;
  436. if (mode == F_OK) /* 0 */
  437. return scnprintf(bf, size, "F");
  438. #define P_MODE(n) \
  439. if (mode & n##_OK) { \
  440. printed += scnprintf(bf + printed, size - printed, "%s", #n); \
  441. mode &= ~n##_OK; \
  442. }
  443. P_MODE(R);
  444. P_MODE(W);
  445. P_MODE(X);
  446. #undef P_MODE
  447. if (mode)
  448. printed += scnprintf(bf + printed, size - printed, "|%#x", mode);
  449. return printed;
  450. }
  451. #define SCA_ACCMODE syscall_arg__scnprintf_access_mode
  452. static size_t syscall_arg__scnprintf_filename(char *bf, size_t size,
  453. struct syscall_arg *arg);
  454. #define SCA_FILENAME syscall_arg__scnprintf_filename
  455. static size_t syscall_arg__scnprintf_open_flags(char *bf, size_t size,
  456. struct syscall_arg *arg)
  457. {
  458. int printed = 0, flags = arg->val;
  459. if (!(flags & O_CREAT))
  460. arg->mask |= 1 << (arg->idx + 1); /* Mask the mode parm */
  461. if (flags == 0)
  462. return scnprintf(bf, size, "RDONLY");
  463. #define P_FLAG(n) \
  464. if (flags & O_##n) { \
  465. printed += scnprintf(bf + printed, size - printed, "%s%s", printed ? "|" : "", #n); \
  466. flags &= ~O_##n; \
  467. }
  468. P_FLAG(APPEND);
  469. P_FLAG(ASYNC);
  470. P_FLAG(CLOEXEC);
  471. P_FLAG(CREAT);
  472. P_FLAG(DIRECT);
  473. P_FLAG(DIRECTORY);
  474. P_FLAG(EXCL);
  475. P_FLAG(LARGEFILE);
  476. P_FLAG(NOATIME);
  477. P_FLAG(NOCTTY);
  478. #ifdef O_NONBLOCK
  479. P_FLAG(NONBLOCK);
  480. #elif O_NDELAY
  481. P_FLAG(NDELAY);
  482. #endif
  483. #ifdef O_PATH
  484. P_FLAG(PATH);
  485. #endif
  486. P_FLAG(RDWR);
  487. #ifdef O_DSYNC
  488. if ((flags & O_SYNC) == O_SYNC)
  489. printed += scnprintf(bf + printed, size - printed, "%s%s", printed ? "|" : "", "SYNC");
  490. else {
  491. P_FLAG(DSYNC);
  492. }
  493. #else
  494. P_FLAG(SYNC);
  495. #endif
  496. P_FLAG(TRUNC);
  497. P_FLAG(WRONLY);
  498. #undef P_FLAG
  499. if (flags)
  500. printed += scnprintf(bf + printed, size - printed, "%s%#x", printed ? "|" : "", flags);
  501. return printed;
  502. }
  503. #define SCA_OPEN_FLAGS syscall_arg__scnprintf_open_flags
  504. static size_t syscall_arg__scnprintf_pipe_flags(char *bf, size_t size,
  505. struct syscall_arg *arg)
  506. {
  507. int printed = 0, flags = arg->val;
  508. #define P_FLAG(n) \
  509. if (flags & O_##n) { \
  510. printed += scnprintf(bf + printed, size - printed, "%s%s", printed ? "|" : "", #n); \
  511. flags &= ~O_##n; \
  512. }
  513. P_FLAG(CLOEXEC);
  514. P_FLAG(NONBLOCK);
  515. #undef P_FLAG
  516. if (flags)
  517. printed += scnprintf(bf + printed, size - printed, "%s%#x", printed ? "|" : "", flags);
  518. return printed;
  519. }
  520. #define SCA_PIPE_FLAGS syscall_arg__scnprintf_pipe_flags
  521. static size_t syscall_arg__scnprintf_signum(char *bf, size_t size, struct syscall_arg *arg)
  522. {
  523. int sig = arg->val;
  524. switch (sig) {
  525. #define P_SIGNUM(n) case SIG##n: return scnprintf(bf, size, #n)
  526. P_SIGNUM(HUP);
  527. P_SIGNUM(INT);
  528. P_SIGNUM(QUIT);
  529. P_SIGNUM(ILL);
  530. P_SIGNUM(TRAP);
  531. P_SIGNUM(ABRT);
  532. P_SIGNUM(BUS);
  533. P_SIGNUM(FPE);
  534. P_SIGNUM(KILL);
  535. P_SIGNUM(USR1);
  536. P_SIGNUM(SEGV);
  537. P_SIGNUM(USR2);
  538. P_SIGNUM(PIPE);
  539. P_SIGNUM(ALRM);
  540. P_SIGNUM(TERM);
  541. P_SIGNUM(CHLD);
  542. P_SIGNUM(CONT);
  543. P_SIGNUM(STOP);
  544. P_SIGNUM(TSTP);
  545. P_SIGNUM(TTIN);
  546. P_SIGNUM(TTOU);
  547. P_SIGNUM(URG);
  548. P_SIGNUM(XCPU);
  549. P_SIGNUM(XFSZ);
  550. P_SIGNUM(VTALRM);
  551. P_SIGNUM(PROF);
  552. P_SIGNUM(WINCH);
  553. P_SIGNUM(IO);
  554. P_SIGNUM(PWR);
  555. P_SIGNUM(SYS);
  556. #ifdef SIGEMT
  557. P_SIGNUM(EMT);
  558. #endif
  559. #ifdef SIGSTKFLT
  560. P_SIGNUM(STKFLT);
  561. #endif
  562. #ifdef SIGSWI
  563. P_SIGNUM(SWI);
  564. #endif
  565. default: break;
  566. }
  567. return scnprintf(bf, size, "%#x", sig);
  568. }
  569. #define SCA_SIGNUM syscall_arg__scnprintf_signum
  570. #if defined(__i386__) || defined(__x86_64__)
  571. /*
  572. * FIXME: Make this available to all arches.
  573. */
  574. #define TCGETS 0x5401
  575. static const char *tioctls[] = {
  576. "TCGETS", "TCSETS", "TCSETSW", "TCSETSF", "TCGETA", "TCSETA", "TCSETAW",
  577. "TCSETAF", "TCSBRK", "TCXONC", "TCFLSH", "TIOCEXCL", "TIOCNXCL",
  578. "TIOCSCTTY", "TIOCGPGRP", "TIOCSPGRP", "TIOCOUTQ", "TIOCSTI",
  579. "TIOCGWINSZ", "TIOCSWINSZ", "TIOCMGET", "TIOCMBIS", "TIOCMBIC",
  580. "TIOCMSET", "TIOCGSOFTCAR", "TIOCSSOFTCAR", "FIONREAD", "TIOCLINUX",
  581. "TIOCCONS", "TIOCGSERIAL", "TIOCSSERIAL", "TIOCPKT", "FIONBIO",
  582. "TIOCNOTTY", "TIOCSETD", "TIOCGETD", "TCSBRKP", [0x27] = "TIOCSBRK",
  583. "TIOCCBRK", "TIOCGSID", "TCGETS2", "TCSETS2", "TCSETSW2", "TCSETSF2",
  584. "TIOCGRS485", "TIOCSRS485", "TIOCGPTN", "TIOCSPTLCK",
  585. "TIOCGDEV||TCGETX", "TCSETX", "TCSETXF", "TCSETXW", "TIOCSIG",
  586. "TIOCVHANGUP", "TIOCGPKT", "TIOCGPTLCK", "TIOCGEXCL",
  587. [0x50] = "FIONCLEX", "FIOCLEX", "FIOASYNC", "TIOCSERCONFIG",
  588. "TIOCSERGWILD", "TIOCSERSWILD", "TIOCGLCKTRMIOS", "TIOCSLCKTRMIOS",
  589. "TIOCSERGSTRUCT", "TIOCSERGETLSR", "TIOCSERGETMULTI", "TIOCSERSETMULTI",
  590. "TIOCMIWAIT", "TIOCGICOUNT", [0x60] = "FIOQSIZE",
  591. };
  592. static DEFINE_STRARRAY_OFFSET(tioctls, 0x5401);
  593. #endif /* defined(__i386__) || defined(__x86_64__) */
  594. #ifndef SECCOMP_SET_MODE_STRICT
  595. #define SECCOMP_SET_MODE_STRICT 0
  596. #endif
  597. #ifndef SECCOMP_SET_MODE_FILTER
  598. #define SECCOMP_SET_MODE_FILTER 1
  599. #endif
  600. static size_t syscall_arg__scnprintf_seccomp_op(char *bf, size_t size, struct syscall_arg *arg)
  601. {
  602. int op = arg->val;
  603. size_t printed = 0;
  604. switch (op) {
  605. #define P_SECCOMP_SET_MODE_OP(n) case SECCOMP_SET_MODE_##n: printed = scnprintf(bf, size, #n); break
  606. P_SECCOMP_SET_MODE_OP(STRICT);
  607. P_SECCOMP_SET_MODE_OP(FILTER);
  608. #undef P_SECCOMP_SET_MODE_OP
  609. default: printed = scnprintf(bf, size, "%#x", op); break;
  610. }
  611. return printed;
  612. }
  613. #define SCA_SECCOMP_OP syscall_arg__scnprintf_seccomp_op
  614. #ifndef SECCOMP_FILTER_FLAG_TSYNC
  615. #define SECCOMP_FILTER_FLAG_TSYNC 1
  616. #endif
  617. static size_t syscall_arg__scnprintf_seccomp_flags(char *bf, size_t size,
  618. struct syscall_arg *arg)
  619. {
  620. int printed = 0, flags = arg->val;
  621. #define P_FLAG(n) \
  622. if (flags & SECCOMP_FILTER_FLAG_##n) { \
  623. printed += scnprintf(bf + printed, size - printed, "%s%s", printed ? "|" : "", #n); \
  624. flags &= ~SECCOMP_FILTER_FLAG_##n; \
  625. }
  626. P_FLAG(TSYNC);
  627. #undef P_FLAG
  628. if (flags)
  629. printed += scnprintf(bf + printed, size - printed, "%s%#x", printed ? "|" : "", flags);
  630. return printed;
  631. }
  632. #define SCA_SECCOMP_FLAGS syscall_arg__scnprintf_seccomp_flags
  633. #ifndef GRND_NONBLOCK
  634. #define GRND_NONBLOCK 0x0001
  635. #endif
  636. #ifndef GRND_RANDOM
  637. #define GRND_RANDOM 0x0002
  638. #endif
  639. static size_t syscall_arg__scnprintf_getrandom_flags(char *bf, size_t size,
  640. struct syscall_arg *arg)
  641. {
  642. int printed = 0, flags = arg->val;
  643. #define P_FLAG(n) \
  644. if (flags & GRND_##n) { \
  645. printed += scnprintf(bf + printed, size - printed, "%s%s", printed ? "|" : "", #n); \
  646. flags &= ~GRND_##n; \
  647. }
  648. P_FLAG(RANDOM);
  649. P_FLAG(NONBLOCK);
  650. #undef P_FLAG
  651. if (flags)
  652. printed += scnprintf(bf + printed, size - printed, "%s%#x", printed ? "|" : "", flags);
  653. return printed;
  654. }
  655. #define SCA_GETRANDOM_FLAGS syscall_arg__scnprintf_getrandom_flags
  656. #define STRARRAY(arg, name, array) \
  657. .arg_scnprintf = { [arg] = SCA_STRARRAY, }, \
  658. .arg_parm = { [arg] = &strarray__##array, }
  659. #include "trace/beauty/eventfd.c"
  660. #include "trace/beauty/pid.c"
  661. #include "trace/beauty/mmap.c"
  662. #include "trace/beauty/mode_t.c"
  663. #include "trace/beauty/msg_flags.c"
  664. #include "trace/beauty/perf_event_open.c"
  665. #include "trace/beauty/sched_policy.c"
  666. #include "trace/beauty/socket_type.c"
  667. #include "trace/beauty/waitid_options.c"
  668. static struct syscall_fmt {
  669. const char *name;
  670. const char *alias;
  671. size_t (*arg_scnprintf[6])(char *bf, size_t size, struct syscall_arg *arg);
  672. void *arg_parm[6];
  673. bool errmsg;
  674. bool errpid;
  675. bool timeout;
  676. bool hexret;
  677. } syscall_fmts[] = {
  678. { .name = "access", .errmsg = true,
  679. .arg_scnprintf = { [0] = SCA_FILENAME, /* filename */
  680. [1] = SCA_ACCMODE, /* mode */ }, },
  681. { .name = "arch_prctl", .errmsg = true, .alias = "prctl", },
  682. { .name = "bpf", .errmsg = true, STRARRAY(0, cmd, bpf_cmd), },
  683. { .name = "brk", .hexret = true,
  684. .arg_scnprintf = { [0] = SCA_HEX, /* brk */ }, },
  685. { .name = "chdir", .errmsg = true,
  686. .arg_scnprintf = { [0] = SCA_FILENAME, /* filename */ }, },
  687. { .name = "chmod", .errmsg = true,
  688. .arg_scnprintf = { [0] = SCA_FILENAME, /* filename */ }, },
  689. { .name = "chroot", .errmsg = true,
  690. .arg_scnprintf = { [0] = SCA_FILENAME, /* filename */ }, },
  691. { .name = "clock_gettime", .errmsg = true, STRARRAY(0, clk_id, clockid), },
  692. { .name = "clone", .errpid = true, },
  693. { .name = "close", .errmsg = true,
  694. .arg_scnprintf = { [0] = SCA_CLOSE_FD, /* fd */ }, },
  695. { .name = "connect", .errmsg = true, },
  696. { .name = "creat", .errmsg = true,
  697. .arg_scnprintf = { [0] = SCA_FILENAME, /* pathname */ }, },
  698. { .name = "dup", .errmsg = true,
  699. .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
  700. { .name = "dup2", .errmsg = true,
  701. .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
  702. { .name = "dup3", .errmsg = true,
  703. .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
  704. { .name = "epoll_ctl", .errmsg = true, STRARRAY(1, op, epoll_ctl_ops), },
  705. { .name = "eventfd2", .errmsg = true,
  706. .arg_scnprintf = { [1] = SCA_EFD_FLAGS, /* flags */ }, },
  707. { .name = "faccessat", .errmsg = true,
  708. .arg_scnprintf = { [0] = SCA_FDAT, /* dfd */
  709. [1] = SCA_FILENAME, /* filename */ }, },
  710. { .name = "fadvise64", .errmsg = true,
  711. .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
  712. { .name = "fallocate", .errmsg = true,
  713. .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
  714. { .name = "fchdir", .errmsg = true,
  715. .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
  716. { .name = "fchmod", .errmsg = true,
  717. .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
  718. { .name = "fchmodat", .errmsg = true,
  719. .arg_scnprintf = { [0] = SCA_FDAT, /* fd */
  720. [1] = SCA_FILENAME, /* filename */ }, },
  721. { .name = "fchown", .errmsg = true,
  722. .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
  723. { .name = "fchownat", .errmsg = true,
  724. .arg_scnprintf = { [0] = SCA_FDAT, /* fd */
  725. [1] = SCA_FILENAME, /* filename */ }, },
  726. { .name = "fcntl", .errmsg = true,
  727. .arg_scnprintf = { [0] = SCA_FD, /* fd */
  728. [1] = SCA_STRARRAY, /* cmd */ },
  729. .arg_parm = { [1] = &strarray__fcntl_cmds, /* cmd */ }, },
  730. { .name = "fdatasync", .errmsg = true,
  731. .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
  732. { .name = "flock", .errmsg = true,
  733. .arg_scnprintf = { [0] = SCA_FD, /* fd */
  734. [1] = SCA_FLOCK, /* cmd */ }, },
  735. { .name = "fsetxattr", .errmsg = true,
  736. .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
  737. { .name = "fstat", .errmsg = true, .alias = "newfstat",
  738. .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
  739. { .name = "fstatat", .errmsg = true, .alias = "newfstatat",
  740. .arg_scnprintf = { [0] = SCA_FDAT, /* dfd */
  741. [1] = SCA_FILENAME, /* filename */ }, },
  742. { .name = "fstatfs", .errmsg = true,
  743. .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
  744. { .name = "fsync", .errmsg = true,
  745. .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
  746. { .name = "ftruncate", .errmsg = true,
  747. .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
  748. { .name = "futex", .errmsg = true,
  749. .arg_scnprintf = { [1] = SCA_FUTEX_OP, /* op */ }, },
  750. { .name = "futimesat", .errmsg = true,
  751. .arg_scnprintf = { [0] = SCA_FDAT, /* fd */
  752. [1] = SCA_FILENAME, /* filename */ }, },
  753. { .name = "getdents", .errmsg = true,
  754. .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
  755. { .name = "getdents64", .errmsg = true,
  756. .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
  757. { .name = "getitimer", .errmsg = true, STRARRAY(0, which, itimers), },
  758. { .name = "getpid", .errpid = true, },
  759. { .name = "getpgid", .errpid = true, },
  760. { .name = "getppid", .errpid = true, },
  761. { .name = "getrandom", .errmsg = true,
  762. .arg_scnprintf = { [2] = SCA_GETRANDOM_FLAGS, /* flags */ }, },
  763. { .name = "getrlimit", .errmsg = true, STRARRAY(0, resource, rlimit_resources), },
  764. { .name = "getxattr", .errmsg = true,
  765. .arg_scnprintf = { [0] = SCA_FILENAME, /* pathname */ }, },
  766. { .name = "inotify_add_watch", .errmsg = true,
  767. .arg_scnprintf = { [1] = SCA_FILENAME, /* pathname */ }, },
  768. { .name = "ioctl", .errmsg = true,
  769. .arg_scnprintf = { [0] = SCA_FD, /* fd */
  770. #if defined(__i386__) || defined(__x86_64__)
  771. /*
  772. * FIXME: Make this available to all arches.
  773. */
  774. [1] = SCA_STRHEXARRAY, /* cmd */
  775. [2] = SCA_HEX, /* arg */ },
  776. .arg_parm = { [1] = &strarray__tioctls, /* cmd */ }, },
  777. #else
  778. [2] = SCA_HEX, /* arg */ }, },
  779. #endif
  780. { .name = "keyctl", .errmsg = true, STRARRAY(0, option, keyctl_options), },
  781. { .name = "kill", .errmsg = true,
  782. .arg_scnprintf = { [1] = SCA_SIGNUM, /* sig */ }, },
  783. { .name = "lchown", .errmsg = true,
  784. .arg_scnprintf = { [0] = SCA_FILENAME, /* filename */ }, },
  785. { .name = "lgetxattr", .errmsg = true,
  786. .arg_scnprintf = { [0] = SCA_FILENAME, /* pathname */ }, },
  787. { .name = "linkat", .errmsg = true,
  788. .arg_scnprintf = { [0] = SCA_FDAT, /* fd */ }, },
  789. { .name = "listxattr", .errmsg = true,
  790. .arg_scnprintf = { [0] = SCA_FILENAME, /* pathname */ }, },
  791. { .name = "llistxattr", .errmsg = true,
  792. .arg_scnprintf = { [0] = SCA_FILENAME, /* pathname */ }, },
  793. { .name = "lremovexattr", .errmsg = true,
  794. .arg_scnprintf = { [0] = SCA_FILENAME, /* pathname */ }, },
  795. { .name = "lseek", .errmsg = true,
  796. .arg_scnprintf = { [0] = SCA_FD, /* fd */
  797. [2] = SCA_STRARRAY, /* whence */ },
  798. .arg_parm = { [2] = &strarray__whences, /* whence */ }, },
  799. { .name = "lsetxattr", .errmsg = true,
  800. .arg_scnprintf = { [0] = SCA_FILENAME, /* pathname */ }, },
  801. { .name = "lstat", .errmsg = true, .alias = "newlstat",
  802. .arg_scnprintf = { [0] = SCA_FILENAME, /* filename */ }, },
  803. { .name = "lsxattr", .errmsg = true,
  804. .arg_scnprintf = { [0] = SCA_FILENAME, /* pathname */ }, },
  805. { .name = "madvise", .errmsg = true,
  806. .arg_scnprintf = { [0] = SCA_HEX, /* start */
  807. [2] = SCA_MADV_BHV, /* behavior */ }, },
  808. { .name = "mkdir", .errmsg = true,
  809. .arg_scnprintf = { [0] = SCA_FILENAME, /* pathname */ }, },
  810. { .name = "mkdirat", .errmsg = true,
  811. .arg_scnprintf = { [0] = SCA_FDAT, /* fd */
  812. [1] = SCA_FILENAME, /* pathname */ }, },
  813. { .name = "mknod", .errmsg = true,
  814. .arg_scnprintf = { [0] = SCA_FILENAME, /* filename */ }, },
  815. { .name = "mknodat", .errmsg = true,
  816. .arg_scnprintf = { [0] = SCA_FDAT, /* fd */
  817. [1] = SCA_FILENAME, /* filename */ }, },
  818. { .name = "mlock", .errmsg = true,
  819. .arg_scnprintf = { [0] = SCA_HEX, /* addr */ }, },
  820. { .name = "mlockall", .errmsg = true,
  821. .arg_scnprintf = { [0] = SCA_HEX, /* addr */ }, },
  822. { .name = "mmap", .hexret = true,
  823. .arg_scnprintf = { [0] = SCA_HEX, /* addr */
  824. [2] = SCA_MMAP_PROT, /* prot */
  825. [3] = SCA_MMAP_FLAGS, /* flags */
  826. [4] = SCA_FD, /* fd */ }, },
  827. { .name = "mprotect", .errmsg = true,
  828. .arg_scnprintf = { [0] = SCA_HEX, /* start */
  829. [2] = SCA_MMAP_PROT, /* prot */ }, },
  830. { .name = "mq_unlink", .errmsg = true,
  831. .arg_scnprintf = { [0] = SCA_FILENAME, /* u_name */ }, },
  832. { .name = "mremap", .hexret = true,
  833. .arg_scnprintf = { [0] = SCA_HEX, /* addr */
  834. [3] = SCA_MREMAP_FLAGS, /* flags */
  835. [4] = SCA_HEX, /* new_addr */ }, },
  836. { .name = "munlock", .errmsg = true,
  837. .arg_scnprintf = { [0] = SCA_HEX, /* addr */ }, },
  838. { .name = "munmap", .errmsg = true,
  839. .arg_scnprintf = { [0] = SCA_HEX, /* addr */ }, },
  840. { .name = "name_to_handle_at", .errmsg = true,
  841. .arg_scnprintf = { [0] = SCA_FDAT, /* dfd */ }, },
  842. { .name = "newfstatat", .errmsg = true,
  843. .arg_scnprintf = { [0] = SCA_FDAT, /* dfd */
  844. [1] = SCA_FILENAME, /* filename */ }, },
  845. { .name = "open", .errmsg = true,
  846. .arg_scnprintf = { [0] = SCA_FILENAME, /* filename */
  847. [1] = SCA_OPEN_FLAGS, /* flags */ }, },
  848. { .name = "open_by_handle_at", .errmsg = true,
  849. .arg_scnprintf = { [0] = SCA_FDAT, /* dfd */
  850. [2] = SCA_OPEN_FLAGS, /* flags */ }, },
  851. { .name = "openat", .errmsg = true,
  852. .arg_scnprintf = { [0] = SCA_FDAT, /* dfd */
  853. [1] = SCA_FILENAME, /* filename */
  854. [2] = SCA_OPEN_FLAGS, /* flags */ }, },
  855. { .name = "perf_event_open", .errmsg = true,
  856. .arg_scnprintf = { [2] = SCA_INT, /* cpu */
  857. [3] = SCA_FD, /* group_fd */
  858. [4] = SCA_PERF_FLAGS, /* flags */ }, },
  859. { .name = "pipe2", .errmsg = true,
  860. .arg_scnprintf = { [1] = SCA_PIPE_FLAGS, /* flags */ }, },
  861. { .name = "poll", .errmsg = true, .timeout = true, },
  862. { .name = "ppoll", .errmsg = true, .timeout = true, },
  863. { .name = "pread", .errmsg = true, .alias = "pread64",
  864. .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
  865. { .name = "preadv", .errmsg = true, .alias = "pread",
  866. .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
  867. { .name = "prlimit64", .errmsg = true, STRARRAY(1, resource, rlimit_resources), },
  868. { .name = "pwrite", .errmsg = true, .alias = "pwrite64",
  869. .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
  870. { .name = "pwritev", .errmsg = true,
  871. .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
  872. { .name = "read", .errmsg = true,
  873. .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
  874. { .name = "readlink", .errmsg = true,
  875. .arg_scnprintf = { [0] = SCA_FILENAME, /* path */ }, },
  876. { .name = "readlinkat", .errmsg = true,
  877. .arg_scnprintf = { [0] = SCA_FDAT, /* dfd */
  878. [1] = SCA_FILENAME, /* pathname */ }, },
  879. { .name = "readv", .errmsg = true,
  880. .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
  881. { .name = "recvfrom", .errmsg = true,
  882. .arg_scnprintf = { [0] = SCA_FD, /* fd */
  883. [3] = SCA_MSG_FLAGS, /* flags */ }, },
  884. { .name = "recvmmsg", .errmsg = true,
  885. .arg_scnprintf = { [0] = SCA_FD, /* fd */
  886. [3] = SCA_MSG_FLAGS, /* flags */ }, },
  887. { .name = "recvmsg", .errmsg = true,
  888. .arg_scnprintf = { [0] = SCA_FD, /* fd */
  889. [2] = SCA_MSG_FLAGS, /* flags */ }, },
  890. { .name = "removexattr", .errmsg = true,
  891. .arg_scnprintf = { [0] = SCA_FILENAME, /* pathname */ }, },
  892. { .name = "renameat", .errmsg = true,
  893. .arg_scnprintf = { [0] = SCA_FDAT, /* dfd */ }, },
  894. { .name = "rmdir", .errmsg = true,
  895. .arg_scnprintf = { [0] = SCA_FILENAME, /* pathname */ }, },
  896. { .name = "rt_sigaction", .errmsg = true,
  897. .arg_scnprintf = { [0] = SCA_SIGNUM, /* sig */ }, },
  898. { .name = "rt_sigprocmask", .errmsg = true, STRARRAY(0, how, sighow), },
  899. { .name = "rt_sigqueueinfo", .errmsg = true,
  900. .arg_scnprintf = { [1] = SCA_SIGNUM, /* sig */ }, },
  901. { .name = "rt_tgsigqueueinfo", .errmsg = true,
  902. .arg_scnprintf = { [2] = SCA_SIGNUM, /* sig */ }, },
  903. { .name = "sched_setscheduler", .errmsg = true,
  904. .arg_scnprintf = { [1] = SCA_SCHED_POLICY, /* policy */ }, },
  905. { .name = "seccomp", .errmsg = true,
  906. .arg_scnprintf = { [0] = SCA_SECCOMP_OP, /* op */
  907. [1] = SCA_SECCOMP_FLAGS, /* flags */ }, },
  908. { .name = "select", .errmsg = true, .timeout = true, },
  909. { .name = "sendmmsg", .errmsg = true,
  910. .arg_scnprintf = { [0] = SCA_FD, /* fd */
  911. [3] = SCA_MSG_FLAGS, /* flags */ }, },
  912. { .name = "sendmsg", .errmsg = true,
  913. .arg_scnprintf = { [0] = SCA_FD, /* fd */
  914. [2] = SCA_MSG_FLAGS, /* flags */ }, },
  915. { .name = "sendto", .errmsg = true,
  916. .arg_scnprintf = { [0] = SCA_FD, /* fd */
  917. [3] = SCA_MSG_FLAGS, /* flags */ }, },
  918. { .name = "set_tid_address", .errpid = true, },
  919. { .name = "setitimer", .errmsg = true, STRARRAY(0, which, itimers), },
  920. { .name = "setpgid", .errmsg = true, },
  921. { .name = "setrlimit", .errmsg = true, STRARRAY(0, resource, rlimit_resources), },
  922. { .name = "setxattr", .errmsg = true,
  923. .arg_scnprintf = { [0] = SCA_FILENAME, /* pathname */ }, },
  924. { .name = "shutdown", .errmsg = true,
  925. .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
  926. { .name = "socket", .errmsg = true,
  927. .arg_scnprintf = { [0] = SCA_STRARRAY, /* family */
  928. [1] = SCA_SK_TYPE, /* type */ },
  929. .arg_parm = { [0] = &strarray__socket_families, /* family */ }, },
  930. { .name = "socketpair", .errmsg = true,
  931. .arg_scnprintf = { [0] = SCA_STRARRAY, /* family */
  932. [1] = SCA_SK_TYPE, /* type */ },
  933. .arg_parm = { [0] = &strarray__socket_families, /* family */ }, },
  934. { .name = "stat", .errmsg = true, .alias = "newstat",
  935. .arg_scnprintf = { [0] = SCA_FILENAME, /* pathname */ }, },
  936. { .name = "statfs", .errmsg = true,
  937. .arg_scnprintf = { [0] = SCA_FILENAME, /* pathname */ }, },
  938. { .name = "swapoff", .errmsg = true,
  939. .arg_scnprintf = { [0] = SCA_FILENAME, /* specialfile */ }, },
  940. { .name = "swapon", .errmsg = true,
  941. .arg_scnprintf = { [0] = SCA_FILENAME, /* specialfile */ }, },
  942. { .name = "symlinkat", .errmsg = true,
  943. .arg_scnprintf = { [0] = SCA_FDAT, /* dfd */ }, },
  944. { .name = "tgkill", .errmsg = true,
  945. .arg_scnprintf = { [2] = SCA_SIGNUM, /* sig */ }, },
  946. { .name = "tkill", .errmsg = true,
  947. .arg_scnprintf = { [1] = SCA_SIGNUM, /* sig */ }, },
  948. { .name = "truncate", .errmsg = true,
  949. .arg_scnprintf = { [0] = SCA_FILENAME, /* path */ }, },
  950. { .name = "uname", .errmsg = true, .alias = "newuname", },
  951. { .name = "unlinkat", .errmsg = true,
  952. .arg_scnprintf = { [0] = SCA_FDAT, /* dfd */
  953. [1] = SCA_FILENAME, /* pathname */ }, },
  954. { .name = "utime", .errmsg = true,
  955. .arg_scnprintf = { [0] = SCA_FILENAME, /* filename */ }, },
  956. { .name = "utimensat", .errmsg = true,
  957. .arg_scnprintf = { [0] = SCA_FDAT, /* dirfd */
  958. [1] = SCA_FILENAME, /* filename */ }, },
  959. { .name = "utimes", .errmsg = true,
  960. .arg_scnprintf = { [0] = SCA_FILENAME, /* filename */ }, },
  961. { .name = "vmsplice", .errmsg = true,
  962. .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
  963. { .name = "wait4", .errpid = true,
  964. .arg_scnprintf = { [2] = SCA_WAITID_OPTIONS, /* options */ }, },
  965. { .name = "waitid", .errpid = true,
  966. .arg_scnprintf = { [3] = SCA_WAITID_OPTIONS, /* options */ }, },
  967. { .name = "write", .errmsg = true,
  968. .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
  969. { .name = "writev", .errmsg = true,
  970. .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
  971. };
  972. static int syscall_fmt__cmp(const void *name, const void *fmtp)
  973. {
  974. const struct syscall_fmt *fmt = fmtp;
  975. return strcmp(name, fmt->name);
  976. }
  977. static struct syscall_fmt *syscall_fmt__find(const char *name)
  978. {
  979. const int nmemb = ARRAY_SIZE(syscall_fmts);
  980. return bsearch(name, syscall_fmts, nmemb, sizeof(struct syscall_fmt), syscall_fmt__cmp);
  981. }
  982. struct syscall {
  983. struct event_format *tp_format;
  984. int nr_args;
  985. struct format_field *args;
  986. const char *name;
  987. bool is_exit;
  988. struct syscall_fmt *fmt;
  989. size_t (**arg_scnprintf)(char *bf, size_t size, struct syscall_arg *arg);
  990. void **arg_parm;
  991. };
  992. static size_t fprintf_duration(unsigned long t, FILE *fp)
  993. {
  994. double duration = (double)t / NSEC_PER_MSEC;
  995. size_t printed = fprintf(fp, "(");
  996. if (duration >= 1.0)
  997. printed += color_fprintf(fp, PERF_COLOR_RED, "%6.3f ms", duration);
  998. else if (duration >= 0.01)
  999. printed += color_fprintf(fp, PERF_COLOR_YELLOW, "%6.3f ms", duration);
  1000. else
  1001. printed += color_fprintf(fp, PERF_COLOR_NORMAL, "%6.3f ms", duration);
  1002. return printed + fprintf(fp, "): ");
  1003. }
  1004. /**
  1005. * filename.ptr: The filename char pointer that will be vfs_getname'd
  1006. * filename.entry_str_pos: Where to insert the string translated from
  1007. * filename.ptr by the vfs_getname tracepoint/kprobe.
  1008. */
  1009. struct thread_trace {
  1010. u64 entry_time;
  1011. u64 exit_time;
  1012. bool entry_pending;
  1013. unsigned long nr_events;
  1014. unsigned long pfmaj, pfmin;
  1015. char *entry_str;
  1016. double runtime_ms;
  1017. struct {
  1018. unsigned long ptr;
  1019. short int entry_str_pos;
  1020. bool pending_open;
  1021. unsigned int namelen;
  1022. char *name;
  1023. } filename;
  1024. struct {
  1025. int max;
  1026. char **table;
  1027. } paths;
  1028. struct intlist *syscall_stats;
  1029. };
  1030. static struct thread_trace *thread_trace__new(void)
  1031. {
  1032. struct thread_trace *ttrace = zalloc(sizeof(struct thread_trace));
  1033. if (ttrace)
  1034. ttrace->paths.max = -1;
  1035. ttrace->syscall_stats = intlist__new(NULL);
  1036. return ttrace;
  1037. }
  1038. static struct thread_trace *thread__trace(struct thread *thread, FILE *fp)
  1039. {
  1040. struct thread_trace *ttrace;
  1041. if (thread == NULL)
  1042. goto fail;
  1043. if (thread__priv(thread) == NULL)
  1044. thread__set_priv(thread, thread_trace__new());
  1045. if (thread__priv(thread) == NULL)
  1046. goto fail;
  1047. ttrace = thread__priv(thread);
  1048. ++ttrace->nr_events;
  1049. return ttrace;
  1050. fail:
  1051. color_fprintf(fp, PERF_COLOR_RED,
  1052. "WARNING: not enough memory, dropping samples!\n");
  1053. return NULL;
  1054. }
  1055. #define TRACE_PFMAJ (1 << 0)
  1056. #define TRACE_PFMIN (1 << 1)
  1057. static const size_t trace__entry_str_size = 2048;
  1058. static int trace__set_fd_pathname(struct thread *thread, int fd, const char *pathname)
  1059. {
  1060. struct thread_trace *ttrace = thread__priv(thread);
  1061. if (fd > ttrace->paths.max) {
  1062. char **npath = realloc(ttrace->paths.table, (fd + 1) * sizeof(char *));
  1063. if (npath == NULL)
  1064. return -1;
  1065. if (ttrace->paths.max != -1) {
  1066. memset(npath + ttrace->paths.max + 1, 0,
  1067. (fd - ttrace->paths.max) * sizeof(char *));
  1068. } else {
  1069. memset(npath, 0, (fd + 1) * sizeof(char *));
  1070. }
  1071. ttrace->paths.table = npath;
  1072. ttrace->paths.max = fd;
  1073. }
  1074. ttrace->paths.table[fd] = strdup(pathname);
  1075. return ttrace->paths.table[fd] != NULL ? 0 : -1;
  1076. }
  1077. static int thread__read_fd_path(struct thread *thread, int fd)
  1078. {
  1079. char linkname[PATH_MAX], pathname[PATH_MAX];
  1080. struct stat st;
  1081. int ret;
  1082. if (thread->pid_ == thread->tid) {
  1083. scnprintf(linkname, sizeof(linkname),
  1084. "/proc/%d/fd/%d", thread->pid_, fd);
  1085. } else {
  1086. scnprintf(linkname, sizeof(linkname),
  1087. "/proc/%d/task/%d/fd/%d", thread->pid_, thread->tid, fd);
  1088. }
  1089. if (lstat(linkname, &st) < 0 || st.st_size + 1 > (off_t)sizeof(pathname))
  1090. return -1;
  1091. ret = readlink(linkname, pathname, sizeof(pathname));
  1092. if (ret < 0 || ret > st.st_size)
  1093. return -1;
  1094. pathname[ret] = '\0';
  1095. return trace__set_fd_pathname(thread, fd, pathname);
  1096. }
  1097. static const char *thread__fd_path(struct thread *thread, int fd,
  1098. struct trace *trace)
  1099. {
  1100. struct thread_trace *ttrace = thread__priv(thread);
  1101. if (ttrace == NULL)
  1102. return NULL;
  1103. if (fd < 0)
  1104. return NULL;
  1105. if ((fd > ttrace->paths.max || ttrace->paths.table[fd] == NULL)) {
  1106. if (!trace->live)
  1107. return NULL;
  1108. ++trace->stats.proc_getname;
  1109. if (thread__read_fd_path(thread, fd))
  1110. return NULL;
  1111. }
  1112. return ttrace->paths.table[fd];
  1113. }
  1114. static size_t syscall_arg__scnprintf_fd(char *bf, size_t size,
  1115. struct syscall_arg *arg)
  1116. {
  1117. int fd = arg->val;
  1118. size_t printed = scnprintf(bf, size, "%d", fd);
  1119. const char *path = thread__fd_path(arg->thread, fd, arg->trace);
  1120. if (path)
  1121. printed += scnprintf(bf + printed, size - printed, "<%s>", path);
  1122. return printed;
  1123. }
  1124. static size_t syscall_arg__scnprintf_close_fd(char *bf, size_t size,
  1125. struct syscall_arg *arg)
  1126. {
  1127. int fd = arg->val;
  1128. size_t printed = syscall_arg__scnprintf_fd(bf, size, arg);
  1129. struct thread_trace *ttrace = thread__priv(arg->thread);
  1130. if (ttrace && fd >= 0 && fd <= ttrace->paths.max)
  1131. zfree(&ttrace->paths.table[fd]);
  1132. return printed;
  1133. }
  1134. static void thread__set_filename_pos(struct thread *thread, const char *bf,
  1135. unsigned long ptr)
  1136. {
  1137. struct thread_trace *ttrace = thread__priv(thread);
  1138. ttrace->filename.ptr = ptr;
  1139. ttrace->filename.entry_str_pos = bf - ttrace->entry_str;
  1140. }
  1141. static size_t syscall_arg__scnprintf_filename(char *bf, size_t size,
  1142. struct syscall_arg *arg)
  1143. {
  1144. unsigned long ptr = arg->val;
  1145. if (!arg->trace->vfs_getname)
  1146. return scnprintf(bf, size, "%#x", ptr);
  1147. thread__set_filename_pos(arg->thread, bf, ptr);
  1148. return 0;
  1149. }
  1150. static bool trace__filter_duration(struct trace *trace, double t)
  1151. {
  1152. return t < (trace->duration_filter * NSEC_PER_MSEC);
  1153. }
  1154. static size_t trace__fprintf_tstamp(struct trace *trace, u64 tstamp, FILE *fp)
  1155. {
  1156. double ts = (double)(tstamp - trace->base_time) / NSEC_PER_MSEC;
  1157. return fprintf(fp, "%10.3f ", ts);
  1158. }
  1159. static bool done = false;
  1160. static bool interrupted = false;
  1161. static void sig_handler(int sig)
  1162. {
  1163. done = true;
  1164. interrupted = sig == SIGINT;
  1165. }
  1166. static size_t trace__fprintf_entry_head(struct trace *trace, struct thread *thread,
  1167. u64 duration, u64 tstamp, FILE *fp)
  1168. {
  1169. size_t printed = trace__fprintf_tstamp(trace, tstamp, fp);
  1170. printed += fprintf_duration(duration, fp);
  1171. if (trace->multiple_threads) {
  1172. if (trace->show_comm)
  1173. printed += fprintf(fp, "%.14s/", thread__comm_str(thread));
  1174. printed += fprintf(fp, "%d ", thread->tid);
  1175. }
  1176. return printed;
  1177. }
  1178. static int trace__process_event(struct trace *trace, struct machine *machine,
  1179. union perf_event *event, struct perf_sample *sample)
  1180. {
  1181. int ret = 0;
  1182. switch (event->header.type) {
  1183. case PERF_RECORD_LOST:
  1184. color_fprintf(trace->output, PERF_COLOR_RED,
  1185. "LOST %" PRIu64 " events!\n", event->lost.lost);
  1186. ret = machine__process_lost_event(machine, event, sample);
  1187. break;
  1188. default:
  1189. ret = machine__process_event(machine, event, sample);
  1190. break;
  1191. }
  1192. return ret;
  1193. }
  1194. static int trace__tool_process(struct perf_tool *tool,
  1195. union perf_event *event,
  1196. struct perf_sample *sample,
  1197. struct machine *machine)
  1198. {
  1199. struct trace *trace = container_of(tool, struct trace, tool);
  1200. return trace__process_event(trace, machine, event, sample);
  1201. }
  1202. static int trace__symbols_init(struct trace *trace, struct perf_evlist *evlist)
  1203. {
  1204. int err = symbol__init(NULL);
  1205. if (err)
  1206. return err;
  1207. trace->host = machine__new_host();
  1208. if (trace->host == NULL)
  1209. return -ENOMEM;
  1210. if (trace_event__register_resolver(trace->host, machine__resolve_kernel_addr) < 0)
  1211. return -errno;
  1212. err = __machine__synthesize_threads(trace->host, &trace->tool, &trace->opts.target,
  1213. evlist->threads, trace__tool_process, false,
  1214. trace->opts.proc_map_timeout);
  1215. if (err)
  1216. symbol__exit();
  1217. return err;
  1218. }
  1219. static int syscall__set_arg_fmts(struct syscall *sc)
  1220. {
  1221. struct format_field *field;
  1222. int idx = 0;
  1223. sc->arg_scnprintf = calloc(sc->nr_args, sizeof(void *));
  1224. if (sc->arg_scnprintf == NULL)
  1225. return -1;
  1226. if (sc->fmt)
  1227. sc->arg_parm = sc->fmt->arg_parm;
  1228. for (field = sc->args; field; field = field->next) {
  1229. if (sc->fmt && sc->fmt->arg_scnprintf[idx])
  1230. sc->arg_scnprintf[idx] = sc->fmt->arg_scnprintf[idx];
  1231. else if (field->flags & FIELD_IS_POINTER)
  1232. sc->arg_scnprintf[idx] = syscall_arg__scnprintf_hex;
  1233. else if (strcmp(field->type, "pid_t") == 0)
  1234. sc->arg_scnprintf[idx] = SCA_PID;
  1235. else if (strcmp(field->type, "umode_t") == 0)
  1236. sc->arg_scnprintf[idx] = SCA_MODE_T;
  1237. ++idx;
  1238. }
  1239. return 0;
  1240. }
  1241. static int trace__read_syscall_info(struct trace *trace, int id)
  1242. {
  1243. char tp_name[128];
  1244. struct syscall *sc;
  1245. const char *name = syscalltbl__name(trace->sctbl, id);
  1246. if (name == NULL)
  1247. return -1;
  1248. if (id > trace->syscalls.max) {
  1249. struct syscall *nsyscalls = realloc(trace->syscalls.table, (id + 1) * sizeof(*sc));
  1250. if (nsyscalls == NULL)
  1251. return -1;
  1252. if (trace->syscalls.max != -1) {
  1253. memset(nsyscalls + trace->syscalls.max + 1, 0,
  1254. (id - trace->syscalls.max) * sizeof(*sc));
  1255. } else {
  1256. memset(nsyscalls, 0, (id + 1) * sizeof(*sc));
  1257. }
  1258. trace->syscalls.table = nsyscalls;
  1259. trace->syscalls.max = id;
  1260. }
  1261. sc = trace->syscalls.table + id;
  1262. sc->name = name;
  1263. sc->fmt = syscall_fmt__find(sc->name);
  1264. snprintf(tp_name, sizeof(tp_name), "sys_enter_%s", sc->name);
  1265. sc->tp_format = trace_event__tp_format("syscalls", tp_name);
  1266. if (IS_ERR(sc->tp_format) && sc->fmt && sc->fmt->alias) {
  1267. snprintf(tp_name, sizeof(tp_name), "sys_enter_%s", sc->fmt->alias);
  1268. sc->tp_format = trace_event__tp_format("syscalls", tp_name);
  1269. }
  1270. if (IS_ERR(sc->tp_format))
  1271. return -1;
  1272. sc->args = sc->tp_format->format.fields;
  1273. sc->nr_args = sc->tp_format->format.nr_fields;
  1274. /*
  1275. * We need to check and discard the first variable '__syscall_nr'
  1276. * or 'nr' that mean the syscall number. It is needless here.
  1277. * So drop '__syscall_nr' or 'nr' field but does not exist on older kernels.
  1278. */
  1279. if (sc->args && (!strcmp(sc->args->name, "__syscall_nr") || !strcmp(sc->args->name, "nr"))) {
  1280. sc->args = sc->args->next;
  1281. --sc->nr_args;
  1282. }
  1283. sc->is_exit = !strcmp(name, "exit_group") || !strcmp(name, "exit");
  1284. return syscall__set_arg_fmts(sc);
  1285. }
  1286. static int trace__validate_ev_qualifier(struct trace *trace)
  1287. {
  1288. int err = 0, i;
  1289. struct str_node *pos;
  1290. trace->ev_qualifier_ids.nr = strlist__nr_entries(trace->ev_qualifier);
  1291. trace->ev_qualifier_ids.entries = malloc(trace->ev_qualifier_ids.nr *
  1292. sizeof(trace->ev_qualifier_ids.entries[0]));
  1293. if (trace->ev_qualifier_ids.entries == NULL) {
  1294. fputs("Error:\tNot enough memory for allocating events qualifier ids\n",
  1295. trace->output);
  1296. err = -EINVAL;
  1297. goto out;
  1298. }
  1299. i = 0;
  1300. strlist__for_each(pos, trace->ev_qualifier) {
  1301. const char *sc = pos->s;
  1302. int id = syscalltbl__id(trace->sctbl, sc);
  1303. if (id < 0) {
  1304. if (err == 0) {
  1305. fputs("Error:\tInvalid syscall ", trace->output);
  1306. err = -EINVAL;
  1307. } else {
  1308. fputs(", ", trace->output);
  1309. }
  1310. fputs(sc, trace->output);
  1311. }
  1312. trace->ev_qualifier_ids.entries[i++] = id;
  1313. }
  1314. if (err < 0) {
  1315. fputs("\nHint:\ttry 'perf list syscalls:sys_enter_*'"
  1316. "\nHint:\tand: 'man syscalls'\n", trace->output);
  1317. zfree(&trace->ev_qualifier_ids.entries);
  1318. trace->ev_qualifier_ids.nr = 0;
  1319. }
  1320. out:
  1321. return err;
  1322. }
  1323. /*
  1324. * args is to be interpreted as a series of longs but we need to handle
  1325. * 8-byte unaligned accesses. args points to raw_data within the event
  1326. * and raw_data is guaranteed to be 8-byte unaligned because it is
  1327. * preceded by raw_size which is a u32. So we need to copy args to a temp
  1328. * variable to read it. Most notably this avoids extended load instructions
  1329. * on unaligned addresses
  1330. */
  1331. static size_t syscall__scnprintf_args(struct syscall *sc, char *bf, size_t size,
  1332. unsigned char *args, struct trace *trace,
  1333. struct thread *thread)
  1334. {
  1335. size_t printed = 0;
  1336. unsigned char *p;
  1337. unsigned long val;
  1338. if (sc->args != NULL) {
  1339. struct format_field *field;
  1340. u8 bit = 1;
  1341. struct syscall_arg arg = {
  1342. .idx = 0,
  1343. .mask = 0,
  1344. .trace = trace,
  1345. .thread = thread,
  1346. };
  1347. for (field = sc->args; field;
  1348. field = field->next, ++arg.idx, bit <<= 1) {
  1349. if (arg.mask & bit)
  1350. continue;
  1351. /* special care for unaligned accesses */
  1352. p = args + sizeof(unsigned long) * arg.idx;
  1353. memcpy(&val, p, sizeof(val));
  1354. /*
  1355. * Suppress this argument if its value is zero and
  1356. * and we don't have a string associated in an
  1357. * strarray for it.
  1358. */
  1359. if (val == 0 &&
  1360. !(sc->arg_scnprintf &&
  1361. sc->arg_scnprintf[arg.idx] == SCA_STRARRAY &&
  1362. sc->arg_parm[arg.idx]))
  1363. continue;
  1364. printed += scnprintf(bf + printed, size - printed,
  1365. "%s%s: ", printed ? ", " : "", field->name);
  1366. if (sc->arg_scnprintf && sc->arg_scnprintf[arg.idx]) {
  1367. arg.val = val;
  1368. if (sc->arg_parm)
  1369. arg.parm = sc->arg_parm[arg.idx];
  1370. printed += sc->arg_scnprintf[arg.idx](bf + printed,
  1371. size - printed, &arg);
  1372. } else {
  1373. printed += scnprintf(bf + printed, size - printed,
  1374. "%ld", val);
  1375. }
  1376. }
  1377. } else if (IS_ERR(sc->tp_format)) {
  1378. /*
  1379. * If we managed to read the tracepoint /format file, then we
  1380. * may end up not having any args, like with gettid(), so only
  1381. * print the raw args when we didn't manage to read it.
  1382. */
  1383. int i = 0;
  1384. while (i < 6) {
  1385. /* special care for unaligned accesses */
  1386. p = args + sizeof(unsigned long) * i;
  1387. memcpy(&val, p, sizeof(val));
  1388. printed += scnprintf(bf + printed, size - printed,
  1389. "%sarg%d: %ld",
  1390. printed ? ", " : "", i, val);
  1391. ++i;
  1392. }
  1393. }
  1394. return printed;
  1395. }
  1396. typedef int (*tracepoint_handler)(struct trace *trace, struct perf_evsel *evsel,
  1397. union perf_event *event,
  1398. struct perf_sample *sample);
  1399. static struct syscall *trace__syscall_info(struct trace *trace,
  1400. struct perf_evsel *evsel, int id)
  1401. {
  1402. if (id < 0) {
  1403. /*
  1404. * XXX: Noticed on x86_64, reproduced as far back as 3.0.36, haven't tried
  1405. * before that, leaving at a higher verbosity level till that is
  1406. * explained. Reproduced with plain ftrace with:
  1407. *
  1408. * echo 1 > /t/events/raw_syscalls/sys_exit/enable
  1409. * grep "NR -1 " /t/trace_pipe
  1410. *
  1411. * After generating some load on the machine.
  1412. */
  1413. if (verbose > 1) {
  1414. static u64 n;
  1415. fprintf(trace->output, "Invalid syscall %d id, skipping (%s, %" PRIu64 ") ...\n",
  1416. id, perf_evsel__name(evsel), ++n);
  1417. }
  1418. return NULL;
  1419. }
  1420. if ((id > trace->syscalls.max || trace->syscalls.table[id].name == NULL) &&
  1421. trace__read_syscall_info(trace, id))
  1422. goto out_cant_read;
  1423. if ((id > trace->syscalls.max || trace->syscalls.table[id].name == NULL))
  1424. goto out_cant_read;
  1425. return &trace->syscalls.table[id];
  1426. out_cant_read:
  1427. if (verbose) {
  1428. fprintf(trace->output, "Problems reading syscall %d", id);
  1429. if (id <= trace->syscalls.max && trace->syscalls.table[id].name != NULL)
  1430. fprintf(trace->output, "(%s)", trace->syscalls.table[id].name);
  1431. fputs(" information\n", trace->output);
  1432. }
  1433. return NULL;
  1434. }
  1435. static void thread__update_stats(struct thread_trace *ttrace,
  1436. int id, struct perf_sample *sample)
  1437. {
  1438. struct int_node *inode;
  1439. struct stats *stats;
  1440. u64 duration = 0;
  1441. inode = intlist__findnew(ttrace->syscall_stats, id);
  1442. if (inode == NULL)
  1443. return;
  1444. stats = inode->priv;
  1445. if (stats == NULL) {
  1446. stats = malloc(sizeof(struct stats));
  1447. if (stats == NULL)
  1448. return;
  1449. init_stats(stats);
  1450. inode->priv = stats;
  1451. }
  1452. if (ttrace->entry_time && sample->time > ttrace->entry_time)
  1453. duration = sample->time - ttrace->entry_time;
  1454. update_stats(stats, duration);
  1455. }
  1456. static int trace__printf_interrupted_entry(struct trace *trace, struct perf_sample *sample)
  1457. {
  1458. struct thread_trace *ttrace;
  1459. u64 duration;
  1460. size_t printed;
  1461. if (trace->current == NULL)
  1462. return 0;
  1463. ttrace = thread__priv(trace->current);
  1464. if (!ttrace->entry_pending)
  1465. return 0;
  1466. duration = sample->time - ttrace->entry_time;
  1467. printed = trace__fprintf_entry_head(trace, trace->current, duration, sample->time, trace->output);
  1468. printed += fprintf(trace->output, "%-70s) ...\n", ttrace->entry_str);
  1469. ttrace->entry_pending = false;
  1470. return printed;
  1471. }
  1472. static int trace__sys_enter(struct trace *trace, struct perf_evsel *evsel,
  1473. union perf_event *event __maybe_unused,
  1474. struct perf_sample *sample)
  1475. {
  1476. char *msg;
  1477. void *args;
  1478. size_t printed = 0;
  1479. struct thread *thread;
  1480. int id = perf_evsel__sc_tp_uint(evsel, id, sample), err = -1;
  1481. struct syscall *sc = trace__syscall_info(trace, evsel, id);
  1482. struct thread_trace *ttrace;
  1483. if (sc == NULL)
  1484. return -1;
  1485. thread = machine__findnew_thread(trace->host, sample->pid, sample->tid);
  1486. ttrace = thread__trace(thread, trace->output);
  1487. if (ttrace == NULL)
  1488. goto out_put;
  1489. args = perf_evsel__sc_tp_ptr(evsel, args, sample);
  1490. if (ttrace->entry_str == NULL) {
  1491. ttrace->entry_str = malloc(trace__entry_str_size);
  1492. if (!ttrace->entry_str)
  1493. goto out_put;
  1494. }
  1495. if (!(trace->duration_filter || trace->summary_only || trace->min_stack))
  1496. trace__printf_interrupted_entry(trace, sample);
  1497. ttrace->entry_time = sample->time;
  1498. msg = ttrace->entry_str;
  1499. printed += scnprintf(msg + printed, trace__entry_str_size - printed, "%s(", sc->name);
  1500. printed += syscall__scnprintf_args(sc, msg + printed, trace__entry_str_size - printed,
  1501. args, trace, thread);
  1502. if (sc->is_exit) {
  1503. if (!(trace->duration_filter || trace->summary_only || trace->min_stack)) {
  1504. trace__fprintf_entry_head(trace, thread, 1, sample->time, trace->output);
  1505. fprintf(trace->output, "%-70s\n", ttrace->entry_str);
  1506. }
  1507. } else {
  1508. ttrace->entry_pending = true;
  1509. /* See trace__vfs_getname & trace__sys_exit */
  1510. ttrace->filename.pending_open = false;
  1511. }
  1512. if (trace->current != thread) {
  1513. thread__put(trace->current);
  1514. trace->current = thread__get(thread);
  1515. }
  1516. err = 0;
  1517. out_put:
  1518. thread__put(thread);
  1519. return err;
  1520. }
  1521. static int trace__resolve_callchain(struct trace *trace, struct perf_evsel *evsel,
  1522. struct perf_sample *sample,
  1523. struct callchain_cursor *cursor)
  1524. {
  1525. struct addr_location al;
  1526. if (machine__resolve(trace->host, &al, sample) < 0 ||
  1527. thread__resolve_callchain(al.thread, cursor, evsel, sample, NULL, NULL, trace->max_stack))
  1528. return -1;
  1529. return 0;
  1530. }
  1531. static int trace__fprintf_callchain(struct trace *trace, struct perf_sample *sample)
  1532. {
  1533. /* TODO: user-configurable print_opts */
  1534. const unsigned int print_opts = EVSEL__PRINT_SYM |
  1535. EVSEL__PRINT_DSO |
  1536. EVSEL__PRINT_UNKNOWN_AS_ADDR;
  1537. return sample__fprintf_callchain(sample, 38, print_opts, &callchain_cursor, trace->output);
  1538. }
  1539. static int trace__sys_exit(struct trace *trace, struct perf_evsel *evsel,
  1540. union perf_event *event __maybe_unused,
  1541. struct perf_sample *sample)
  1542. {
  1543. long ret;
  1544. u64 duration = 0;
  1545. struct thread *thread;
  1546. int id = perf_evsel__sc_tp_uint(evsel, id, sample), err = -1, callchain_ret = 0;
  1547. struct syscall *sc = trace__syscall_info(trace, evsel, id);
  1548. struct thread_trace *ttrace;
  1549. if (sc == NULL)
  1550. return -1;
  1551. thread = machine__findnew_thread(trace->host, sample->pid, sample->tid);
  1552. ttrace = thread__trace(thread, trace->output);
  1553. if (ttrace == NULL)
  1554. goto out_put;
  1555. if (trace->summary)
  1556. thread__update_stats(ttrace, id, sample);
  1557. ret = perf_evsel__sc_tp_uint(evsel, ret, sample);
  1558. if (id == trace->open_id && ret >= 0 && ttrace->filename.pending_open) {
  1559. trace__set_fd_pathname(thread, ret, ttrace->filename.name);
  1560. ttrace->filename.pending_open = false;
  1561. ++trace->stats.vfs_getname;
  1562. }
  1563. ttrace->exit_time = sample->time;
  1564. if (ttrace->entry_time) {
  1565. duration = sample->time - ttrace->entry_time;
  1566. if (trace__filter_duration(trace, duration))
  1567. goto out;
  1568. } else if (trace->duration_filter)
  1569. goto out;
  1570. if (sample->callchain) {
  1571. callchain_ret = trace__resolve_callchain(trace, evsel, sample, &callchain_cursor);
  1572. if (callchain_ret == 0) {
  1573. if (callchain_cursor.nr < trace->min_stack)
  1574. goto out;
  1575. callchain_ret = 1;
  1576. }
  1577. }
  1578. if (trace->summary_only)
  1579. goto out;
  1580. trace__fprintf_entry_head(trace, thread, duration, sample->time, trace->output);
  1581. if (ttrace->entry_pending) {
  1582. fprintf(trace->output, "%-70s", ttrace->entry_str);
  1583. } else {
  1584. fprintf(trace->output, " ... [");
  1585. color_fprintf(trace->output, PERF_COLOR_YELLOW, "continued");
  1586. fprintf(trace->output, "]: %s()", sc->name);
  1587. }
  1588. if (sc->fmt == NULL) {
  1589. signed_print:
  1590. fprintf(trace->output, ") = %ld", ret);
  1591. } else if (ret < 0 && (sc->fmt->errmsg || sc->fmt->errpid)) {
  1592. char bf[STRERR_BUFSIZE];
  1593. const char *emsg = strerror_r(-ret, bf, sizeof(bf)),
  1594. *e = audit_errno_to_name(-ret);
  1595. fprintf(trace->output, ") = -1 %s %s", e, emsg);
  1596. } else if (ret == 0 && sc->fmt->timeout)
  1597. fprintf(trace->output, ") = 0 Timeout");
  1598. else if (sc->fmt->hexret)
  1599. fprintf(trace->output, ") = %#lx", ret);
  1600. else if (sc->fmt->errpid) {
  1601. struct thread *child = machine__find_thread(trace->host, ret, ret);
  1602. if (child != NULL) {
  1603. fprintf(trace->output, ") = %ld", ret);
  1604. if (child->comm_set)
  1605. fprintf(trace->output, " (%s)", thread__comm_str(child));
  1606. thread__put(child);
  1607. }
  1608. } else
  1609. goto signed_print;
  1610. fputc('\n', trace->output);
  1611. if (callchain_ret > 0)
  1612. trace__fprintf_callchain(trace, sample);
  1613. else if (callchain_ret < 0)
  1614. pr_err("Problem processing %s callchain, skipping...\n", perf_evsel__name(evsel));
  1615. out:
  1616. ttrace->entry_pending = false;
  1617. err = 0;
  1618. out_put:
  1619. thread__put(thread);
  1620. return err;
  1621. }
  1622. static int trace__vfs_getname(struct trace *trace, struct perf_evsel *evsel,
  1623. union perf_event *event __maybe_unused,
  1624. struct perf_sample *sample)
  1625. {
  1626. struct thread *thread = machine__findnew_thread(trace->host, sample->pid, sample->tid);
  1627. struct thread_trace *ttrace;
  1628. size_t filename_len, entry_str_len, to_move;
  1629. ssize_t remaining_space;
  1630. char *pos;
  1631. const char *filename = perf_evsel__rawptr(evsel, sample, "pathname");
  1632. if (!thread)
  1633. goto out;
  1634. ttrace = thread__priv(thread);
  1635. if (!ttrace)
  1636. goto out;
  1637. filename_len = strlen(filename);
  1638. if (ttrace->filename.namelen < filename_len) {
  1639. char *f = realloc(ttrace->filename.name, filename_len + 1);
  1640. if (f == NULL)
  1641. goto out;
  1642. ttrace->filename.namelen = filename_len;
  1643. ttrace->filename.name = f;
  1644. }
  1645. strcpy(ttrace->filename.name, filename);
  1646. ttrace->filename.pending_open = true;
  1647. if (!ttrace->filename.ptr)
  1648. goto out;
  1649. entry_str_len = strlen(ttrace->entry_str);
  1650. remaining_space = trace__entry_str_size - entry_str_len - 1; /* \0 */
  1651. if (remaining_space <= 0)
  1652. goto out;
  1653. if (filename_len > (size_t)remaining_space) {
  1654. filename += filename_len - remaining_space;
  1655. filename_len = remaining_space;
  1656. }
  1657. to_move = entry_str_len - ttrace->filename.entry_str_pos + 1; /* \0 */
  1658. pos = ttrace->entry_str + ttrace->filename.entry_str_pos;
  1659. memmove(pos + filename_len, pos, to_move);
  1660. memcpy(pos, filename, filename_len);
  1661. ttrace->filename.ptr = 0;
  1662. ttrace->filename.entry_str_pos = 0;
  1663. out:
  1664. return 0;
  1665. }
  1666. static int trace__sched_stat_runtime(struct trace *trace, struct perf_evsel *evsel,
  1667. union perf_event *event __maybe_unused,
  1668. struct perf_sample *sample)
  1669. {
  1670. u64 runtime = perf_evsel__intval(evsel, sample, "runtime");
  1671. double runtime_ms = (double)runtime / NSEC_PER_MSEC;
  1672. struct thread *thread = machine__findnew_thread(trace->host,
  1673. sample->pid,
  1674. sample->tid);
  1675. struct thread_trace *ttrace = thread__trace(thread, trace->output);
  1676. if (ttrace == NULL)
  1677. goto out_dump;
  1678. ttrace->runtime_ms += runtime_ms;
  1679. trace->runtime_ms += runtime_ms;
  1680. thread__put(thread);
  1681. return 0;
  1682. out_dump:
  1683. fprintf(trace->output, "%s: comm=%s,pid=%u,runtime=%" PRIu64 ",vruntime=%" PRIu64 ")\n",
  1684. evsel->name,
  1685. perf_evsel__strval(evsel, sample, "comm"),
  1686. (pid_t)perf_evsel__intval(evsel, sample, "pid"),
  1687. runtime,
  1688. perf_evsel__intval(evsel, sample, "vruntime"));
  1689. thread__put(thread);
  1690. return 0;
  1691. }
  1692. static void bpf_output__printer(enum binary_printer_ops op,
  1693. unsigned int val, void *extra)
  1694. {
  1695. FILE *output = extra;
  1696. unsigned char ch = (unsigned char)val;
  1697. switch (op) {
  1698. case BINARY_PRINT_CHAR_DATA:
  1699. fprintf(output, "%c", isprint(ch) ? ch : '.');
  1700. break;
  1701. case BINARY_PRINT_DATA_BEGIN:
  1702. case BINARY_PRINT_LINE_BEGIN:
  1703. case BINARY_PRINT_ADDR:
  1704. case BINARY_PRINT_NUM_DATA:
  1705. case BINARY_PRINT_NUM_PAD:
  1706. case BINARY_PRINT_SEP:
  1707. case BINARY_PRINT_CHAR_PAD:
  1708. case BINARY_PRINT_LINE_END:
  1709. case BINARY_PRINT_DATA_END:
  1710. default:
  1711. break;
  1712. }
  1713. }
  1714. static void bpf_output__fprintf(struct trace *trace,
  1715. struct perf_sample *sample)
  1716. {
  1717. print_binary(sample->raw_data, sample->raw_size, 8,
  1718. bpf_output__printer, trace->output);
  1719. }
  1720. static int trace__event_handler(struct trace *trace, struct perf_evsel *evsel,
  1721. union perf_event *event __maybe_unused,
  1722. struct perf_sample *sample)
  1723. {
  1724. int callchain_ret = 0;
  1725. if (sample->callchain) {
  1726. callchain_ret = trace__resolve_callchain(trace, evsel, sample, &callchain_cursor);
  1727. if (callchain_ret == 0) {
  1728. if (callchain_cursor.nr < trace->min_stack)
  1729. goto out;
  1730. callchain_ret = 1;
  1731. }
  1732. }
  1733. trace__printf_interrupted_entry(trace, sample);
  1734. trace__fprintf_tstamp(trace, sample->time, trace->output);
  1735. if (trace->trace_syscalls)
  1736. fprintf(trace->output, "( ): ");
  1737. fprintf(trace->output, "%s:", evsel->name);
  1738. if (perf_evsel__is_bpf_output(evsel)) {
  1739. bpf_output__fprintf(trace, sample);
  1740. } else if (evsel->tp_format) {
  1741. event_format__fprintf(evsel->tp_format, sample->cpu,
  1742. sample->raw_data, sample->raw_size,
  1743. trace->output);
  1744. }
  1745. fprintf(trace->output, ")\n");
  1746. if (callchain_ret > 0)
  1747. trace__fprintf_callchain(trace, sample);
  1748. else if (callchain_ret < 0)
  1749. pr_err("Problem processing %s callchain, skipping...\n", perf_evsel__name(evsel));
  1750. out:
  1751. return 0;
  1752. }
  1753. static void print_location(FILE *f, struct perf_sample *sample,
  1754. struct addr_location *al,
  1755. bool print_dso, bool print_sym)
  1756. {
  1757. if ((verbose || print_dso) && al->map)
  1758. fprintf(f, "%s@", al->map->dso->long_name);
  1759. if ((verbose || print_sym) && al->sym)
  1760. fprintf(f, "%s+0x%" PRIx64, al->sym->name,
  1761. al->addr - al->sym->start);
  1762. else if (al->map)
  1763. fprintf(f, "0x%" PRIx64, al->addr);
  1764. else
  1765. fprintf(f, "0x%" PRIx64, sample->addr);
  1766. }
  1767. static int trace__pgfault(struct trace *trace,
  1768. struct perf_evsel *evsel,
  1769. union perf_event *event __maybe_unused,
  1770. struct perf_sample *sample)
  1771. {
  1772. struct thread *thread;
  1773. struct addr_location al;
  1774. char map_type = 'd';
  1775. struct thread_trace *ttrace;
  1776. int err = -1;
  1777. int callchain_ret = 0;
  1778. thread = machine__findnew_thread(trace->host, sample->pid, sample->tid);
  1779. if (sample->callchain) {
  1780. callchain_ret = trace__resolve_callchain(trace, evsel, sample, &callchain_cursor);
  1781. if (callchain_ret == 0) {
  1782. if (callchain_cursor.nr < trace->min_stack)
  1783. goto out_put;
  1784. callchain_ret = 1;
  1785. }
  1786. }
  1787. ttrace = thread__trace(thread, trace->output);
  1788. if (ttrace == NULL)
  1789. goto out_put;
  1790. if (evsel->attr.config == PERF_COUNT_SW_PAGE_FAULTS_MAJ)
  1791. ttrace->pfmaj++;
  1792. else
  1793. ttrace->pfmin++;
  1794. if (trace->summary_only)
  1795. goto out;
  1796. thread__find_addr_location(thread, sample->cpumode, MAP__FUNCTION,
  1797. sample->ip, &al);
  1798. trace__fprintf_entry_head(trace, thread, 0, sample->time, trace->output);
  1799. fprintf(trace->output, "%sfault [",
  1800. evsel->attr.config == PERF_COUNT_SW_PAGE_FAULTS_MAJ ?
  1801. "maj" : "min");
  1802. print_location(trace->output, sample, &al, false, true);
  1803. fprintf(trace->output, "] => ");
  1804. thread__find_addr_location(thread, sample->cpumode, MAP__VARIABLE,
  1805. sample->addr, &al);
  1806. if (!al.map) {
  1807. thread__find_addr_location(thread, sample->cpumode,
  1808. MAP__FUNCTION, sample->addr, &al);
  1809. if (al.map)
  1810. map_type = 'x';
  1811. else
  1812. map_type = '?';
  1813. }
  1814. print_location(trace->output, sample, &al, true, false);
  1815. fprintf(trace->output, " (%c%c)\n", map_type, al.level);
  1816. if (callchain_ret > 0)
  1817. trace__fprintf_callchain(trace, sample);
  1818. else if (callchain_ret < 0)
  1819. pr_err("Problem processing %s callchain, skipping...\n", perf_evsel__name(evsel));
  1820. out:
  1821. err = 0;
  1822. out_put:
  1823. thread__put(thread);
  1824. return err;
  1825. }
  1826. static bool skip_sample(struct trace *trace, struct perf_sample *sample)
  1827. {
  1828. if ((trace->pid_list && intlist__find(trace->pid_list, sample->pid)) ||
  1829. (trace->tid_list && intlist__find(trace->tid_list, sample->tid)))
  1830. return false;
  1831. if (trace->pid_list || trace->tid_list)
  1832. return true;
  1833. return false;
  1834. }
  1835. static void trace__set_base_time(struct trace *trace,
  1836. struct perf_evsel *evsel,
  1837. struct perf_sample *sample)
  1838. {
  1839. /*
  1840. * BPF events were not setting PERF_SAMPLE_TIME, so be more robust
  1841. * and don't use sample->time unconditionally, we may end up having
  1842. * some other event in the future without PERF_SAMPLE_TIME for good
  1843. * reason, i.e. we may not be interested in its timestamps, just in
  1844. * it taking place, picking some piece of information when it
  1845. * appears in our event stream (vfs_getname comes to mind).
  1846. */
  1847. if (trace->base_time == 0 && !trace->full_time &&
  1848. (evsel->attr.sample_type & PERF_SAMPLE_TIME))
  1849. trace->base_time = sample->time;
  1850. }
  1851. static int trace__process_sample(struct perf_tool *tool,
  1852. union perf_event *event,
  1853. struct perf_sample *sample,
  1854. struct perf_evsel *evsel,
  1855. struct machine *machine __maybe_unused)
  1856. {
  1857. struct trace *trace = container_of(tool, struct trace, tool);
  1858. int err = 0;
  1859. tracepoint_handler handler = evsel->handler;
  1860. if (skip_sample(trace, sample))
  1861. return 0;
  1862. trace__set_base_time(trace, evsel, sample);
  1863. if (handler) {
  1864. ++trace->nr_events;
  1865. handler(trace, evsel, event, sample);
  1866. }
  1867. return err;
  1868. }
  1869. static int parse_target_str(struct trace *trace)
  1870. {
  1871. if (trace->opts.target.pid) {
  1872. trace->pid_list = intlist__new(trace->opts.target.pid);
  1873. if (trace->pid_list == NULL) {
  1874. pr_err("Error parsing process id string\n");
  1875. return -EINVAL;
  1876. }
  1877. }
  1878. if (trace->opts.target.tid) {
  1879. trace->tid_list = intlist__new(trace->opts.target.tid);
  1880. if (trace->tid_list == NULL) {
  1881. pr_err("Error parsing thread id string\n");
  1882. return -EINVAL;
  1883. }
  1884. }
  1885. return 0;
  1886. }
  1887. static int trace__record(struct trace *trace, int argc, const char **argv)
  1888. {
  1889. unsigned int rec_argc, i, j;
  1890. const char **rec_argv;
  1891. const char * const record_args[] = {
  1892. "record",
  1893. "-R",
  1894. "-m", "1024",
  1895. "-c", "1",
  1896. };
  1897. const char * const sc_args[] = { "-e", };
  1898. unsigned int sc_args_nr = ARRAY_SIZE(sc_args);
  1899. const char * const majpf_args[] = { "-e", "major-faults" };
  1900. unsigned int majpf_args_nr = ARRAY_SIZE(majpf_args);
  1901. const char * const minpf_args[] = { "-e", "minor-faults" };
  1902. unsigned int minpf_args_nr = ARRAY_SIZE(minpf_args);
  1903. /* +1 is for the event string below */
  1904. rec_argc = ARRAY_SIZE(record_args) + sc_args_nr + 1 +
  1905. majpf_args_nr + minpf_args_nr + argc;
  1906. rec_argv = calloc(rec_argc + 1, sizeof(char *));
  1907. if (rec_argv == NULL)
  1908. return -ENOMEM;
  1909. j = 0;
  1910. for (i = 0; i < ARRAY_SIZE(record_args); i++)
  1911. rec_argv[j++] = record_args[i];
  1912. if (trace->trace_syscalls) {
  1913. for (i = 0; i < sc_args_nr; i++)
  1914. rec_argv[j++] = sc_args[i];
  1915. /* event string may be different for older kernels - e.g., RHEL6 */
  1916. if (is_valid_tracepoint("raw_syscalls:sys_enter"))
  1917. rec_argv[j++] = "raw_syscalls:sys_enter,raw_syscalls:sys_exit";
  1918. else if (is_valid_tracepoint("syscalls:sys_enter"))
  1919. rec_argv[j++] = "syscalls:sys_enter,syscalls:sys_exit";
  1920. else {
  1921. pr_err("Neither raw_syscalls nor syscalls events exist.\n");
  1922. return -1;
  1923. }
  1924. }
  1925. if (trace->trace_pgfaults & TRACE_PFMAJ)
  1926. for (i = 0; i < majpf_args_nr; i++)
  1927. rec_argv[j++] = majpf_args[i];
  1928. if (trace->trace_pgfaults & TRACE_PFMIN)
  1929. for (i = 0; i < minpf_args_nr; i++)
  1930. rec_argv[j++] = minpf_args[i];
  1931. for (i = 0; i < (unsigned int)argc; i++)
  1932. rec_argv[j++] = argv[i];
  1933. return cmd_record(j, rec_argv, NULL);
  1934. }
  1935. static size_t trace__fprintf_thread_summary(struct trace *trace, FILE *fp);
  1936. static bool perf_evlist__add_vfs_getname(struct perf_evlist *evlist)
  1937. {
  1938. struct perf_evsel *evsel = perf_evsel__newtp("probe", "vfs_getname");
  1939. if (IS_ERR(evsel))
  1940. return false;
  1941. if (perf_evsel__field(evsel, "pathname") == NULL) {
  1942. perf_evsel__delete(evsel);
  1943. return false;
  1944. }
  1945. evsel->handler = trace__vfs_getname;
  1946. perf_evlist__add(evlist, evsel);
  1947. return true;
  1948. }
  1949. static struct perf_evsel *perf_evsel__new_pgfault(u64 config)
  1950. {
  1951. struct perf_evsel *evsel;
  1952. struct perf_event_attr attr = {
  1953. .type = PERF_TYPE_SOFTWARE,
  1954. .mmap_data = 1,
  1955. };
  1956. attr.config = config;
  1957. attr.sample_period = 1;
  1958. event_attr_init(&attr);
  1959. evsel = perf_evsel__new(&attr);
  1960. if (evsel)
  1961. evsel->handler = trace__pgfault;
  1962. return evsel;
  1963. }
  1964. static void trace__handle_event(struct trace *trace, union perf_event *event, struct perf_sample *sample)
  1965. {
  1966. const u32 type = event->header.type;
  1967. struct perf_evsel *evsel;
  1968. if (type != PERF_RECORD_SAMPLE) {
  1969. trace__process_event(trace, trace->host, event, sample);
  1970. return;
  1971. }
  1972. evsel = perf_evlist__id2evsel(trace->evlist, sample->id);
  1973. if (evsel == NULL) {
  1974. fprintf(trace->output, "Unknown tp ID %" PRIu64 ", skipping...\n", sample->id);
  1975. return;
  1976. }
  1977. trace__set_base_time(trace, evsel, sample);
  1978. if (evsel->attr.type == PERF_TYPE_TRACEPOINT &&
  1979. sample->raw_data == NULL) {
  1980. fprintf(trace->output, "%s sample with no payload for tid: %d, cpu %d, raw_size=%d, skipping...\n",
  1981. perf_evsel__name(evsel), sample->tid,
  1982. sample->cpu, sample->raw_size);
  1983. } else {
  1984. tracepoint_handler handler = evsel->handler;
  1985. handler(trace, evsel, event, sample);
  1986. }
  1987. }
  1988. static int trace__add_syscall_newtp(struct trace *trace)
  1989. {
  1990. int ret = -1;
  1991. struct perf_evlist *evlist = trace->evlist;
  1992. struct perf_evsel *sys_enter, *sys_exit;
  1993. sys_enter = perf_evsel__syscall_newtp("sys_enter", trace__sys_enter);
  1994. if (sys_enter == NULL)
  1995. goto out;
  1996. if (perf_evsel__init_sc_tp_ptr_field(sys_enter, args))
  1997. goto out_delete_sys_enter;
  1998. sys_exit = perf_evsel__syscall_newtp("sys_exit", trace__sys_exit);
  1999. if (sys_exit == NULL)
  2000. goto out_delete_sys_enter;
  2001. if (perf_evsel__init_sc_tp_uint_field(sys_exit, ret))
  2002. goto out_delete_sys_exit;
  2003. perf_evlist__add(evlist, sys_enter);
  2004. perf_evlist__add(evlist, sys_exit);
  2005. if (callchain_param.enabled && !trace->kernel_syscallchains) {
  2006. /*
  2007. * We're interested only in the user space callchain
  2008. * leading to the syscall, allow overriding that for
  2009. * debugging reasons using --kernel_syscall_callchains
  2010. */
  2011. sys_exit->attr.exclude_callchain_kernel = 1;
  2012. }
  2013. trace->syscalls.events.sys_enter = sys_enter;
  2014. trace->syscalls.events.sys_exit = sys_exit;
  2015. ret = 0;
  2016. out:
  2017. return ret;
  2018. out_delete_sys_exit:
  2019. perf_evsel__delete_priv(sys_exit);
  2020. out_delete_sys_enter:
  2021. perf_evsel__delete_priv(sys_enter);
  2022. goto out;
  2023. }
  2024. static int trace__set_ev_qualifier_filter(struct trace *trace)
  2025. {
  2026. int err = -1;
  2027. char *filter = asprintf_expr_inout_ints("id", !trace->not_ev_qualifier,
  2028. trace->ev_qualifier_ids.nr,
  2029. trace->ev_qualifier_ids.entries);
  2030. if (filter == NULL)
  2031. goto out_enomem;
  2032. if (!perf_evsel__append_filter(trace->syscalls.events.sys_enter, "&&", filter))
  2033. err = perf_evsel__append_filter(trace->syscalls.events.sys_exit, "&&", filter);
  2034. free(filter);
  2035. out:
  2036. return err;
  2037. out_enomem:
  2038. errno = ENOMEM;
  2039. goto out;
  2040. }
  2041. static int trace__run(struct trace *trace, int argc, const char **argv)
  2042. {
  2043. struct perf_evlist *evlist = trace->evlist;
  2044. struct perf_evsel *evsel, *pgfault_maj = NULL, *pgfault_min = NULL;
  2045. int err = -1, i;
  2046. unsigned long before;
  2047. const bool forks = argc > 0;
  2048. bool draining = false;
  2049. trace->live = true;
  2050. if (trace->trace_syscalls && trace__add_syscall_newtp(trace))
  2051. goto out_error_raw_syscalls;
  2052. if (trace->trace_syscalls)
  2053. trace->vfs_getname = perf_evlist__add_vfs_getname(evlist);
  2054. if ((trace->trace_pgfaults & TRACE_PFMAJ)) {
  2055. pgfault_maj = perf_evsel__new_pgfault(PERF_COUNT_SW_PAGE_FAULTS_MAJ);
  2056. if (pgfault_maj == NULL)
  2057. goto out_error_mem;
  2058. perf_evlist__add(evlist, pgfault_maj);
  2059. }
  2060. if ((trace->trace_pgfaults & TRACE_PFMIN)) {
  2061. pgfault_min = perf_evsel__new_pgfault(PERF_COUNT_SW_PAGE_FAULTS_MIN);
  2062. if (pgfault_min == NULL)
  2063. goto out_error_mem;
  2064. perf_evlist__add(evlist, pgfault_min);
  2065. }
  2066. if (trace->sched &&
  2067. perf_evlist__add_newtp(evlist, "sched", "sched_stat_runtime",
  2068. trace__sched_stat_runtime))
  2069. goto out_error_sched_stat_runtime;
  2070. err = perf_evlist__create_maps(evlist, &trace->opts.target);
  2071. if (err < 0) {
  2072. fprintf(trace->output, "Problems parsing the target to trace, check your options!\n");
  2073. goto out_delete_evlist;
  2074. }
  2075. err = trace__symbols_init(trace, evlist);
  2076. if (err < 0) {
  2077. fprintf(trace->output, "Problems initializing symbol libraries!\n");
  2078. goto out_delete_evlist;
  2079. }
  2080. perf_evlist__config(evlist, &trace->opts, NULL);
  2081. if (callchain_param.enabled) {
  2082. bool use_identifier = false;
  2083. if (trace->syscalls.events.sys_exit) {
  2084. perf_evsel__config_callchain(trace->syscalls.events.sys_exit,
  2085. &trace->opts, &callchain_param);
  2086. use_identifier = true;
  2087. }
  2088. if (pgfault_maj) {
  2089. perf_evsel__config_callchain(pgfault_maj, &trace->opts, &callchain_param);
  2090. use_identifier = true;
  2091. }
  2092. if (pgfault_min) {
  2093. perf_evsel__config_callchain(pgfault_min, &trace->opts, &callchain_param);
  2094. use_identifier = true;
  2095. }
  2096. if (use_identifier) {
  2097. /*
  2098. * Now we have evsels with different sample_ids, use
  2099. * PERF_SAMPLE_IDENTIFIER to map from sample to evsel
  2100. * from a fixed position in each ring buffer record.
  2101. *
  2102. * As of this the changeset introducing this comment, this
  2103. * isn't strictly needed, as the fields that can come before
  2104. * PERF_SAMPLE_ID are all used, but we'll probably disable
  2105. * some of those for things like copying the payload of
  2106. * pointer syscall arguments, and for vfs_getname we don't
  2107. * need PERF_SAMPLE_ADDR and PERF_SAMPLE_IP, so do this
  2108. * here as a warning we need to use PERF_SAMPLE_IDENTIFIER.
  2109. */
  2110. perf_evlist__set_sample_bit(evlist, IDENTIFIER);
  2111. perf_evlist__reset_sample_bit(evlist, ID);
  2112. }
  2113. }
  2114. signal(SIGCHLD, sig_handler);
  2115. signal(SIGINT, sig_handler);
  2116. if (forks) {
  2117. err = perf_evlist__prepare_workload(evlist, &trace->opts.target,
  2118. argv, false, NULL);
  2119. if (err < 0) {
  2120. fprintf(trace->output, "Couldn't run the workload!\n");
  2121. goto out_delete_evlist;
  2122. }
  2123. }
  2124. err = perf_evlist__open(evlist);
  2125. if (err < 0)
  2126. goto out_error_open;
  2127. err = bpf__apply_obj_config();
  2128. if (err) {
  2129. char errbuf[BUFSIZ];
  2130. bpf__strerror_apply_obj_config(err, errbuf, sizeof(errbuf));
  2131. pr_err("ERROR: Apply config to BPF failed: %s\n",
  2132. errbuf);
  2133. goto out_error_open;
  2134. }
  2135. /*
  2136. * Better not use !target__has_task() here because we need to cover the
  2137. * case where no threads were specified in the command line, but a
  2138. * workload was, and in that case we will fill in the thread_map when
  2139. * we fork the workload in perf_evlist__prepare_workload.
  2140. */
  2141. if (trace->filter_pids.nr > 0)
  2142. err = perf_evlist__set_filter_pids(evlist, trace->filter_pids.nr, trace->filter_pids.entries);
  2143. else if (thread_map__pid(evlist->threads, 0) == -1)
  2144. err = perf_evlist__set_filter_pid(evlist, getpid());
  2145. if (err < 0)
  2146. goto out_error_mem;
  2147. if (trace->ev_qualifier_ids.nr > 0) {
  2148. err = trace__set_ev_qualifier_filter(trace);
  2149. if (err < 0)
  2150. goto out_errno;
  2151. pr_debug("event qualifier tracepoint filter: %s\n",
  2152. trace->syscalls.events.sys_exit->filter);
  2153. }
  2154. err = perf_evlist__apply_filters(evlist, &evsel);
  2155. if (err < 0)
  2156. goto out_error_apply_filters;
  2157. err = perf_evlist__mmap(evlist, trace->opts.mmap_pages, false);
  2158. if (err < 0)
  2159. goto out_error_mmap;
  2160. if (!target__none(&trace->opts.target))
  2161. perf_evlist__enable(evlist);
  2162. if (forks)
  2163. perf_evlist__start_workload(evlist);
  2164. trace->multiple_threads = thread_map__pid(evlist->threads, 0) == -1 ||
  2165. evlist->threads->nr > 1 ||
  2166. perf_evlist__first(evlist)->attr.inherit;
  2167. again:
  2168. before = trace->nr_events;
  2169. for (i = 0; i < evlist->nr_mmaps; i++) {
  2170. union perf_event *event;
  2171. while ((event = perf_evlist__mmap_read(evlist, i)) != NULL) {
  2172. struct perf_sample sample;
  2173. ++trace->nr_events;
  2174. err = perf_evlist__parse_sample(evlist, event, &sample);
  2175. if (err) {
  2176. fprintf(trace->output, "Can't parse sample, err = %d, skipping...\n", err);
  2177. goto next_event;
  2178. }
  2179. trace__handle_event(trace, event, &sample);
  2180. next_event:
  2181. perf_evlist__mmap_consume(evlist, i);
  2182. if (interrupted)
  2183. goto out_disable;
  2184. if (done && !draining) {
  2185. perf_evlist__disable(evlist);
  2186. draining = true;
  2187. }
  2188. }
  2189. }
  2190. if (trace->nr_events == before) {
  2191. int timeout = done ? 100 : -1;
  2192. if (!draining && perf_evlist__poll(evlist, timeout) > 0) {
  2193. if (perf_evlist__filter_pollfd(evlist, POLLERR | POLLHUP) == 0)
  2194. draining = true;
  2195. goto again;
  2196. }
  2197. } else {
  2198. goto again;
  2199. }
  2200. out_disable:
  2201. thread__zput(trace->current);
  2202. perf_evlist__disable(evlist);
  2203. if (!err) {
  2204. if (trace->summary)
  2205. trace__fprintf_thread_summary(trace, trace->output);
  2206. if (trace->show_tool_stats) {
  2207. fprintf(trace->output, "Stats:\n "
  2208. " vfs_getname : %" PRIu64 "\n"
  2209. " proc_getname: %" PRIu64 "\n",
  2210. trace->stats.vfs_getname,
  2211. trace->stats.proc_getname);
  2212. }
  2213. }
  2214. out_delete_evlist:
  2215. perf_evlist__delete(evlist);
  2216. trace->evlist = NULL;
  2217. trace->live = false;
  2218. return err;
  2219. {
  2220. char errbuf[BUFSIZ];
  2221. out_error_sched_stat_runtime:
  2222. tracing_path__strerror_open_tp(errno, errbuf, sizeof(errbuf), "sched", "sched_stat_runtime");
  2223. goto out_error;
  2224. out_error_raw_syscalls:
  2225. tracing_path__strerror_open_tp(errno, errbuf, sizeof(errbuf), "raw_syscalls", "sys_(enter|exit)");
  2226. goto out_error;
  2227. out_error_mmap:
  2228. perf_evlist__strerror_mmap(evlist, errno, errbuf, sizeof(errbuf));
  2229. goto out_error;
  2230. out_error_open:
  2231. perf_evlist__strerror_open(evlist, errno, errbuf, sizeof(errbuf));
  2232. out_error:
  2233. fprintf(trace->output, "%s\n", errbuf);
  2234. goto out_delete_evlist;
  2235. out_error_apply_filters:
  2236. fprintf(trace->output,
  2237. "Failed to set filter \"%s\" on event %s with %d (%s)\n",
  2238. evsel->filter, perf_evsel__name(evsel), errno,
  2239. strerror_r(errno, errbuf, sizeof(errbuf)));
  2240. goto out_delete_evlist;
  2241. }
  2242. out_error_mem:
  2243. fprintf(trace->output, "Not enough memory to run!\n");
  2244. goto out_delete_evlist;
  2245. out_errno:
  2246. fprintf(trace->output, "errno=%d,%s\n", errno, strerror(errno));
  2247. goto out_delete_evlist;
  2248. }
  2249. static int trace__replay(struct trace *trace)
  2250. {
  2251. const struct perf_evsel_str_handler handlers[] = {
  2252. { "probe:vfs_getname", trace__vfs_getname, },
  2253. };
  2254. struct perf_data_file file = {
  2255. .path = input_name,
  2256. .mode = PERF_DATA_MODE_READ,
  2257. .force = trace->force,
  2258. };
  2259. struct perf_session *session;
  2260. struct perf_evsel *evsel;
  2261. int err = -1;
  2262. trace->tool.sample = trace__process_sample;
  2263. trace->tool.mmap = perf_event__process_mmap;
  2264. trace->tool.mmap2 = perf_event__process_mmap2;
  2265. trace->tool.comm = perf_event__process_comm;
  2266. trace->tool.exit = perf_event__process_exit;
  2267. trace->tool.fork = perf_event__process_fork;
  2268. trace->tool.attr = perf_event__process_attr;
  2269. trace->tool.tracing_data = perf_event__process_tracing_data;
  2270. trace->tool.build_id = perf_event__process_build_id;
  2271. trace->tool.ordered_events = true;
  2272. trace->tool.ordering_requires_timestamps = true;
  2273. /* add tid to output */
  2274. trace->multiple_threads = true;
  2275. session = perf_session__new(&file, false, &trace->tool);
  2276. if (session == NULL)
  2277. return -1;
  2278. if (symbol__init(&session->header.env) < 0)
  2279. goto out;
  2280. trace->host = &session->machines.host;
  2281. err = perf_session__set_tracepoints_handlers(session, handlers);
  2282. if (err)
  2283. goto out;
  2284. evsel = perf_evlist__find_tracepoint_by_name(session->evlist,
  2285. "raw_syscalls:sys_enter");
  2286. /* older kernels have syscalls tp versus raw_syscalls */
  2287. if (evsel == NULL)
  2288. evsel = perf_evlist__find_tracepoint_by_name(session->evlist,
  2289. "syscalls:sys_enter");
  2290. if (evsel &&
  2291. (perf_evsel__init_syscall_tp(evsel, trace__sys_enter) < 0 ||
  2292. perf_evsel__init_sc_tp_ptr_field(evsel, args))) {
  2293. pr_err("Error during initialize raw_syscalls:sys_enter event\n");
  2294. goto out;
  2295. }
  2296. evsel = perf_evlist__find_tracepoint_by_name(session->evlist,
  2297. "raw_syscalls:sys_exit");
  2298. if (evsel == NULL)
  2299. evsel = perf_evlist__find_tracepoint_by_name(session->evlist,
  2300. "syscalls:sys_exit");
  2301. if (evsel &&
  2302. (perf_evsel__init_syscall_tp(evsel, trace__sys_exit) < 0 ||
  2303. perf_evsel__init_sc_tp_uint_field(evsel, ret))) {
  2304. pr_err("Error during initialize raw_syscalls:sys_exit event\n");
  2305. goto out;
  2306. }
  2307. evlist__for_each(session->evlist, evsel) {
  2308. if (evsel->attr.type == PERF_TYPE_SOFTWARE &&
  2309. (evsel->attr.config == PERF_COUNT_SW_PAGE_FAULTS_MAJ ||
  2310. evsel->attr.config == PERF_COUNT_SW_PAGE_FAULTS_MIN ||
  2311. evsel->attr.config == PERF_COUNT_SW_PAGE_FAULTS))
  2312. evsel->handler = trace__pgfault;
  2313. }
  2314. err = parse_target_str(trace);
  2315. if (err != 0)
  2316. goto out;
  2317. setup_pager();
  2318. err = perf_session__process_events(session);
  2319. if (err)
  2320. pr_err("Failed to process events, error %d", err);
  2321. else if (trace->summary)
  2322. trace__fprintf_thread_summary(trace, trace->output);
  2323. out:
  2324. perf_session__delete(session);
  2325. return err;
  2326. }
  2327. static size_t trace__fprintf_threads_header(FILE *fp)
  2328. {
  2329. size_t printed;
  2330. printed = fprintf(fp, "\n Summary of events:\n\n");
  2331. return printed;
  2332. }
  2333. DEFINE_RESORT_RB(syscall_stats, a->msecs > b->msecs,
  2334. struct stats *stats;
  2335. double msecs;
  2336. int syscall;
  2337. )
  2338. {
  2339. struct int_node *source = rb_entry(nd, struct int_node, rb_node);
  2340. struct stats *stats = source->priv;
  2341. entry->syscall = source->i;
  2342. entry->stats = stats;
  2343. entry->msecs = stats ? (u64)stats->n * (avg_stats(stats) / NSEC_PER_MSEC) : 0;
  2344. }
  2345. static size_t thread__dump_stats(struct thread_trace *ttrace,
  2346. struct trace *trace, FILE *fp)
  2347. {
  2348. size_t printed = 0;
  2349. struct syscall *sc;
  2350. struct rb_node *nd;
  2351. DECLARE_RESORT_RB_INTLIST(syscall_stats, ttrace->syscall_stats);
  2352. if (syscall_stats == NULL)
  2353. return 0;
  2354. printed += fprintf(fp, "\n");
  2355. printed += fprintf(fp, " syscall calls total min avg max stddev\n");
  2356. printed += fprintf(fp, " (msec) (msec) (msec) (msec) (%%)\n");
  2357. printed += fprintf(fp, " --------------- -------- --------- --------- --------- --------- ------\n");
  2358. resort_rb__for_each(nd, syscall_stats) {
  2359. struct stats *stats = syscall_stats_entry->stats;
  2360. if (stats) {
  2361. double min = (double)(stats->min) / NSEC_PER_MSEC;
  2362. double max = (double)(stats->max) / NSEC_PER_MSEC;
  2363. double avg = avg_stats(stats);
  2364. double pct;
  2365. u64 n = (u64) stats->n;
  2366. pct = avg ? 100.0 * stddev_stats(stats)/avg : 0.0;
  2367. avg /= NSEC_PER_MSEC;
  2368. sc = &trace->syscalls.table[syscall_stats_entry->syscall];
  2369. printed += fprintf(fp, " %-15s", sc->name);
  2370. printed += fprintf(fp, " %8" PRIu64 " %9.3f %9.3f %9.3f",
  2371. n, syscall_stats_entry->msecs, min, avg);
  2372. printed += fprintf(fp, " %9.3f %9.2f%%\n", max, pct);
  2373. }
  2374. }
  2375. resort_rb__delete(syscall_stats);
  2376. printed += fprintf(fp, "\n\n");
  2377. return printed;
  2378. }
  2379. static size_t trace__fprintf_thread(FILE *fp, struct thread *thread, struct trace *trace)
  2380. {
  2381. size_t printed = 0;
  2382. struct thread_trace *ttrace = thread__priv(thread);
  2383. double ratio;
  2384. if (ttrace == NULL)
  2385. return 0;
  2386. ratio = (double)ttrace->nr_events / trace->nr_events * 100.0;
  2387. printed += fprintf(fp, " %s (%d), ", thread__comm_str(thread), thread->tid);
  2388. printed += fprintf(fp, "%lu events, ", ttrace->nr_events);
  2389. printed += fprintf(fp, "%.1f%%", ratio);
  2390. if (ttrace->pfmaj)
  2391. printed += fprintf(fp, ", %lu majfaults", ttrace->pfmaj);
  2392. if (ttrace->pfmin)
  2393. printed += fprintf(fp, ", %lu minfaults", ttrace->pfmin);
  2394. if (trace->sched)
  2395. printed += fprintf(fp, ", %.3f msec\n", ttrace->runtime_ms);
  2396. else if (fputc('\n', fp) != EOF)
  2397. ++printed;
  2398. printed += thread__dump_stats(ttrace, trace, fp);
  2399. return printed;
  2400. }
  2401. static unsigned long thread__nr_events(struct thread_trace *ttrace)
  2402. {
  2403. return ttrace ? ttrace->nr_events : 0;
  2404. }
  2405. DEFINE_RESORT_RB(threads, (thread__nr_events(a->thread->priv) < thread__nr_events(b->thread->priv)),
  2406. struct thread *thread;
  2407. )
  2408. {
  2409. entry->thread = rb_entry(nd, struct thread, rb_node);
  2410. }
  2411. static size_t trace__fprintf_thread_summary(struct trace *trace, FILE *fp)
  2412. {
  2413. DECLARE_RESORT_RB_MACHINE_THREADS(threads, trace->host);
  2414. size_t printed = trace__fprintf_threads_header(fp);
  2415. struct rb_node *nd;
  2416. if (threads == NULL) {
  2417. fprintf(fp, "%s", "Error sorting output by nr_events!\n");
  2418. return 0;
  2419. }
  2420. resort_rb__for_each(nd, threads)
  2421. printed += trace__fprintf_thread(fp, threads_entry->thread, trace);
  2422. resort_rb__delete(threads);
  2423. return printed;
  2424. }
  2425. static int trace__set_duration(const struct option *opt, const char *str,
  2426. int unset __maybe_unused)
  2427. {
  2428. struct trace *trace = opt->value;
  2429. trace->duration_filter = atof(str);
  2430. return 0;
  2431. }
  2432. static int trace__set_filter_pids(const struct option *opt, const char *str,
  2433. int unset __maybe_unused)
  2434. {
  2435. int ret = -1;
  2436. size_t i;
  2437. struct trace *trace = opt->value;
  2438. /*
  2439. * FIXME: introduce a intarray class, plain parse csv and create a
  2440. * { int nr, int entries[] } struct...
  2441. */
  2442. struct intlist *list = intlist__new(str);
  2443. if (list == NULL)
  2444. return -1;
  2445. i = trace->filter_pids.nr = intlist__nr_entries(list) + 1;
  2446. trace->filter_pids.entries = calloc(i, sizeof(pid_t));
  2447. if (trace->filter_pids.entries == NULL)
  2448. goto out;
  2449. trace->filter_pids.entries[0] = getpid();
  2450. for (i = 1; i < trace->filter_pids.nr; ++i)
  2451. trace->filter_pids.entries[i] = intlist__entry(list, i - 1)->i;
  2452. intlist__delete(list);
  2453. ret = 0;
  2454. out:
  2455. return ret;
  2456. }
  2457. static int trace__open_output(struct trace *trace, const char *filename)
  2458. {
  2459. struct stat st;
  2460. if (!stat(filename, &st) && st.st_size) {
  2461. char oldname[PATH_MAX];
  2462. scnprintf(oldname, sizeof(oldname), "%s.old", filename);
  2463. unlink(oldname);
  2464. rename(filename, oldname);
  2465. }
  2466. trace->output = fopen(filename, "w");
  2467. return trace->output == NULL ? -errno : 0;
  2468. }
  2469. static int parse_pagefaults(const struct option *opt, const char *str,
  2470. int unset __maybe_unused)
  2471. {
  2472. int *trace_pgfaults = opt->value;
  2473. if (strcmp(str, "all") == 0)
  2474. *trace_pgfaults |= TRACE_PFMAJ | TRACE_PFMIN;
  2475. else if (strcmp(str, "maj") == 0)
  2476. *trace_pgfaults |= TRACE_PFMAJ;
  2477. else if (strcmp(str, "min") == 0)
  2478. *trace_pgfaults |= TRACE_PFMIN;
  2479. else
  2480. return -1;
  2481. return 0;
  2482. }
  2483. static void evlist__set_evsel_handler(struct perf_evlist *evlist, void *handler)
  2484. {
  2485. struct perf_evsel *evsel;
  2486. evlist__for_each(evlist, evsel)
  2487. evsel->handler = handler;
  2488. }
  2489. int cmd_trace(int argc, const char **argv, const char *prefix __maybe_unused)
  2490. {
  2491. const char *trace_usage[] = {
  2492. "perf trace [<options>] [<command>]",
  2493. "perf trace [<options>] -- <command> [<options>]",
  2494. "perf trace record [<options>] [<command>]",
  2495. "perf trace record [<options>] -- <command> [<options>]",
  2496. NULL
  2497. };
  2498. struct trace trace = {
  2499. .syscalls = {
  2500. . max = -1,
  2501. },
  2502. .opts = {
  2503. .target = {
  2504. .uid = UINT_MAX,
  2505. .uses_mmap = true,
  2506. },
  2507. .user_freq = UINT_MAX,
  2508. .user_interval = ULLONG_MAX,
  2509. .no_buffering = true,
  2510. .mmap_pages = UINT_MAX,
  2511. .proc_map_timeout = 500,
  2512. },
  2513. .output = stderr,
  2514. .show_comm = true,
  2515. .trace_syscalls = true,
  2516. .kernel_syscallchains = false,
  2517. .max_stack = UINT_MAX,
  2518. };
  2519. const char *output_name = NULL;
  2520. const char *ev_qualifier_str = NULL;
  2521. const struct option trace_options[] = {
  2522. OPT_CALLBACK(0, "event", &trace.evlist, "event",
  2523. "event selector. use 'perf list' to list available events",
  2524. parse_events_option),
  2525. OPT_BOOLEAN(0, "comm", &trace.show_comm,
  2526. "show the thread COMM next to its id"),
  2527. OPT_BOOLEAN(0, "tool_stats", &trace.show_tool_stats, "show tool stats"),
  2528. OPT_STRING('e', "expr", &ev_qualifier_str, "expr", "list of syscalls to trace"),
  2529. OPT_STRING('o', "output", &output_name, "file", "output file name"),
  2530. OPT_STRING('i', "input", &input_name, "file", "Analyze events in file"),
  2531. OPT_STRING('p', "pid", &trace.opts.target.pid, "pid",
  2532. "trace events on existing process id"),
  2533. OPT_STRING('t', "tid", &trace.opts.target.tid, "tid",
  2534. "trace events on existing thread id"),
  2535. OPT_CALLBACK(0, "filter-pids", &trace, "CSV list of pids",
  2536. "pids to filter (by the kernel)", trace__set_filter_pids),
  2537. OPT_BOOLEAN('a', "all-cpus", &trace.opts.target.system_wide,
  2538. "system-wide collection from all CPUs"),
  2539. OPT_STRING('C', "cpu", &trace.opts.target.cpu_list, "cpu",
  2540. "list of cpus to monitor"),
  2541. OPT_BOOLEAN(0, "no-inherit", &trace.opts.no_inherit,
  2542. "child tasks do not inherit counters"),
  2543. OPT_CALLBACK('m', "mmap-pages", &trace.opts.mmap_pages, "pages",
  2544. "number of mmap data pages",
  2545. perf_evlist__parse_mmap_pages),
  2546. OPT_STRING('u', "uid", &trace.opts.target.uid_str, "user",
  2547. "user to profile"),
  2548. OPT_CALLBACK(0, "duration", &trace, "float",
  2549. "show only events with duration > N.M ms",
  2550. trace__set_duration),
  2551. OPT_BOOLEAN(0, "sched", &trace.sched, "show blocking scheduler events"),
  2552. OPT_INCR('v', "verbose", &verbose, "be more verbose"),
  2553. OPT_BOOLEAN('T', "time", &trace.full_time,
  2554. "Show full timestamp, not time relative to first start"),
  2555. OPT_BOOLEAN('s', "summary", &trace.summary_only,
  2556. "Show only syscall summary with statistics"),
  2557. OPT_BOOLEAN('S', "with-summary", &trace.summary,
  2558. "Show all syscalls and summary with statistics"),
  2559. OPT_CALLBACK_DEFAULT('F', "pf", &trace.trace_pgfaults, "all|maj|min",
  2560. "Trace pagefaults", parse_pagefaults, "maj"),
  2561. OPT_BOOLEAN(0, "syscalls", &trace.trace_syscalls, "Trace syscalls"),
  2562. OPT_BOOLEAN('f', "force", &trace.force, "don't complain, do it"),
  2563. OPT_CALLBACK(0, "call-graph", &trace.opts,
  2564. "record_mode[,record_size]", record_callchain_help,
  2565. &record_parse_callchain_opt),
  2566. OPT_BOOLEAN(0, "kernel-syscall-graph", &trace.kernel_syscallchains,
  2567. "Show the kernel callchains on the syscall exit path"),
  2568. OPT_UINTEGER(0, "min-stack", &trace.min_stack,
  2569. "Set the minimum stack depth when parsing the callchain, "
  2570. "anything below the specified depth will be ignored."),
  2571. OPT_UINTEGER(0, "max-stack", &trace.max_stack,
  2572. "Set the maximum stack depth when parsing the callchain, "
  2573. "anything beyond the specified depth will be ignored. "
  2574. "Default: kernel.perf_event_max_stack or " __stringify(PERF_MAX_STACK_DEPTH)),
  2575. OPT_UINTEGER(0, "proc-map-timeout", &trace.opts.proc_map_timeout,
  2576. "per thread proc mmap processing timeout in ms"),
  2577. OPT_END()
  2578. };
  2579. bool __maybe_unused max_stack_user_set = true;
  2580. bool mmap_pages_user_set = true;
  2581. const char * const trace_subcommands[] = { "record", NULL };
  2582. int err;
  2583. char bf[BUFSIZ];
  2584. signal(SIGSEGV, sighandler_dump_stack);
  2585. signal(SIGFPE, sighandler_dump_stack);
  2586. trace.evlist = perf_evlist__new();
  2587. trace.sctbl = syscalltbl__new();
  2588. if (trace.evlist == NULL || trace.sctbl == NULL) {
  2589. pr_err("Not enough memory to run!\n");
  2590. err = -ENOMEM;
  2591. goto out;
  2592. }
  2593. argc = parse_options_subcommand(argc, argv, trace_options, trace_subcommands,
  2594. trace_usage, PARSE_OPT_STOP_AT_NON_OPTION);
  2595. err = bpf__setup_stdout(trace.evlist);
  2596. if (err) {
  2597. bpf__strerror_setup_stdout(trace.evlist, err, bf, sizeof(bf));
  2598. pr_err("ERROR: Setup BPF stdout failed: %s\n", bf);
  2599. goto out;
  2600. }
  2601. err = -1;
  2602. if (trace.trace_pgfaults) {
  2603. trace.opts.sample_address = true;
  2604. trace.opts.sample_time = true;
  2605. }
  2606. if (trace.opts.mmap_pages == UINT_MAX)
  2607. mmap_pages_user_set = false;
  2608. if (trace.max_stack == UINT_MAX) {
  2609. trace.max_stack = sysctl_perf_event_max_stack;
  2610. max_stack_user_set = false;
  2611. }
  2612. #ifdef HAVE_DWARF_UNWIND_SUPPORT
  2613. if ((trace.min_stack || max_stack_user_set) && !callchain_param.enabled)
  2614. record_opts__parse_callchain(&trace.opts, &callchain_param, "dwarf", false);
  2615. #endif
  2616. if (callchain_param.enabled) {
  2617. if (!mmap_pages_user_set && geteuid() == 0)
  2618. trace.opts.mmap_pages = perf_event_mlock_kb_in_pages() * 4;
  2619. symbol_conf.use_callchain = true;
  2620. }
  2621. if (trace.evlist->nr_entries > 0)
  2622. evlist__set_evsel_handler(trace.evlist, trace__event_handler);
  2623. if ((argc >= 1) && (strcmp(argv[0], "record") == 0))
  2624. return trace__record(&trace, argc-1, &argv[1]);
  2625. /* summary_only implies summary option, but don't overwrite summary if set */
  2626. if (trace.summary_only)
  2627. trace.summary = trace.summary_only;
  2628. if (!trace.trace_syscalls && !trace.trace_pgfaults &&
  2629. trace.evlist->nr_entries == 0 /* Was --events used? */) {
  2630. pr_err("Please specify something to trace.\n");
  2631. return -1;
  2632. }
  2633. if (!trace.trace_syscalls && ev_qualifier_str) {
  2634. pr_err("The -e option can't be used with --no-syscalls.\n");
  2635. goto out;
  2636. }
  2637. if (output_name != NULL) {
  2638. err = trace__open_output(&trace, output_name);
  2639. if (err < 0) {
  2640. perror("failed to create output file");
  2641. goto out;
  2642. }
  2643. }
  2644. trace.open_id = syscalltbl__id(trace.sctbl, "open");
  2645. if (ev_qualifier_str != NULL) {
  2646. const char *s = ev_qualifier_str;
  2647. struct strlist_config slist_config = {
  2648. .dirname = system_path(STRACE_GROUPS_DIR),
  2649. };
  2650. trace.not_ev_qualifier = *s == '!';
  2651. if (trace.not_ev_qualifier)
  2652. ++s;
  2653. trace.ev_qualifier = strlist__new(s, &slist_config);
  2654. if (trace.ev_qualifier == NULL) {
  2655. fputs("Not enough memory to parse event qualifier",
  2656. trace.output);
  2657. err = -ENOMEM;
  2658. goto out_close;
  2659. }
  2660. err = trace__validate_ev_qualifier(&trace);
  2661. if (err)
  2662. goto out_close;
  2663. }
  2664. err = target__validate(&trace.opts.target);
  2665. if (err) {
  2666. target__strerror(&trace.opts.target, err, bf, sizeof(bf));
  2667. fprintf(trace.output, "%s", bf);
  2668. goto out_close;
  2669. }
  2670. err = target__parse_uid(&trace.opts.target);
  2671. if (err) {
  2672. target__strerror(&trace.opts.target, err, bf, sizeof(bf));
  2673. fprintf(trace.output, "%s", bf);
  2674. goto out_close;
  2675. }
  2676. if (!argc && target__none(&trace.opts.target))
  2677. trace.opts.target.system_wide = true;
  2678. if (input_name)
  2679. err = trace__replay(&trace);
  2680. else
  2681. err = trace__run(&trace, argc, argv);
  2682. out_close:
  2683. if (output_name != NULL)
  2684. fclose(trace.output);
  2685. out:
  2686. return err;
  2687. }