builtin-trace.c 79 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974
  1. #include <traceevent/event-parse.h>
  2. #include "builtin.h"
  3. #include "util/color.h"
  4. #include "util/debug.h"
  5. #include "util/evlist.h"
  6. #include "util/machine.h"
  7. #include "util/session.h"
  8. #include "util/thread.h"
  9. #include "util/parse-options.h"
  10. #include "util/strlist.h"
  11. #include "util/intlist.h"
  12. #include "util/thread_map.h"
  13. #include "util/stat.h"
  14. #include "trace-event.h"
  15. #include "util/parse-events.h"
  16. #include <libaudit.h>
  17. #include <stdlib.h>
  18. #include <sys/mman.h>
  19. #include <linux/futex.h>
  20. /* For older distros: */
  21. #ifndef MAP_STACK
  22. # define MAP_STACK 0x20000
  23. #endif
  24. #ifndef MADV_HWPOISON
  25. # define MADV_HWPOISON 100
  26. #endif
  27. #ifndef MADV_MERGEABLE
  28. # define MADV_MERGEABLE 12
  29. #endif
  30. #ifndef MADV_UNMERGEABLE
  31. # define MADV_UNMERGEABLE 13
  32. #endif
  33. #ifndef EFD_SEMAPHORE
  34. # define EFD_SEMAPHORE 1
  35. #endif
  36. #ifndef EFD_NONBLOCK
  37. # define EFD_NONBLOCK 00004000
  38. #endif
  39. #ifndef EFD_CLOEXEC
  40. # define EFD_CLOEXEC 02000000
  41. #endif
  42. #ifndef O_CLOEXEC
  43. # define O_CLOEXEC 02000000
  44. #endif
  45. #ifndef SOCK_DCCP
  46. # define SOCK_DCCP 6
  47. #endif
  48. #ifndef SOCK_CLOEXEC
  49. # define SOCK_CLOEXEC 02000000
  50. #endif
  51. #ifndef SOCK_NONBLOCK
  52. # define SOCK_NONBLOCK 00004000
  53. #endif
  54. #ifndef MSG_CMSG_CLOEXEC
  55. # define MSG_CMSG_CLOEXEC 0x40000000
  56. #endif
  57. #ifndef PERF_FLAG_FD_NO_GROUP
  58. # define PERF_FLAG_FD_NO_GROUP (1UL << 0)
  59. #endif
  60. #ifndef PERF_FLAG_FD_OUTPUT
  61. # define PERF_FLAG_FD_OUTPUT (1UL << 1)
  62. #endif
  63. #ifndef PERF_FLAG_PID_CGROUP
  64. # define PERF_FLAG_PID_CGROUP (1UL << 2) /* pid=cgroup id, per-cpu mode only */
  65. #endif
  66. #ifndef PERF_FLAG_FD_CLOEXEC
  67. # define PERF_FLAG_FD_CLOEXEC (1UL << 3) /* O_CLOEXEC */
  68. #endif
  69. struct tp_field {
  70. int offset;
  71. union {
  72. u64 (*integer)(struct tp_field *field, struct perf_sample *sample);
  73. void *(*pointer)(struct tp_field *field, struct perf_sample *sample);
  74. };
  75. };
  76. #define TP_UINT_FIELD(bits) \
  77. static u64 tp_field__u##bits(struct tp_field *field, struct perf_sample *sample) \
  78. { \
  79. u##bits value; \
  80. memcpy(&value, sample->raw_data + field->offset, sizeof(value)); \
  81. return value; \
  82. }
  83. TP_UINT_FIELD(8);
  84. TP_UINT_FIELD(16);
  85. TP_UINT_FIELD(32);
  86. TP_UINT_FIELD(64);
  87. #define TP_UINT_FIELD__SWAPPED(bits) \
  88. static u64 tp_field__swapped_u##bits(struct tp_field *field, struct perf_sample *sample) \
  89. { \
  90. u##bits value; \
  91. memcpy(&value, sample->raw_data + field->offset, sizeof(value)); \
  92. return bswap_##bits(value);\
  93. }
  94. TP_UINT_FIELD__SWAPPED(16);
  95. TP_UINT_FIELD__SWAPPED(32);
  96. TP_UINT_FIELD__SWAPPED(64);
  97. static int tp_field__init_uint(struct tp_field *field,
  98. struct format_field *format_field,
  99. bool needs_swap)
  100. {
  101. field->offset = format_field->offset;
  102. switch (format_field->size) {
  103. case 1:
  104. field->integer = tp_field__u8;
  105. break;
  106. case 2:
  107. field->integer = needs_swap ? tp_field__swapped_u16 : tp_field__u16;
  108. break;
  109. case 4:
  110. field->integer = needs_swap ? tp_field__swapped_u32 : tp_field__u32;
  111. break;
  112. case 8:
  113. field->integer = needs_swap ? tp_field__swapped_u64 : tp_field__u64;
  114. break;
  115. default:
  116. return -1;
  117. }
  118. return 0;
  119. }
  120. static void *tp_field__ptr(struct tp_field *field, struct perf_sample *sample)
  121. {
  122. return sample->raw_data + field->offset;
  123. }
  124. static int tp_field__init_ptr(struct tp_field *field, struct format_field *format_field)
  125. {
  126. field->offset = format_field->offset;
  127. field->pointer = tp_field__ptr;
  128. return 0;
  129. }
  130. struct syscall_tp {
  131. struct tp_field id;
  132. union {
  133. struct tp_field args, ret;
  134. };
  135. };
  136. static int perf_evsel__init_tp_uint_field(struct perf_evsel *evsel,
  137. struct tp_field *field,
  138. const char *name)
  139. {
  140. struct format_field *format_field = perf_evsel__field(evsel, name);
  141. if (format_field == NULL)
  142. return -1;
  143. return tp_field__init_uint(field, format_field, evsel->needs_swap);
  144. }
  145. #define perf_evsel__init_sc_tp_uint_field(evsel, name) \
  146. ({ struct syscall_tp *sc = evsel->priv;\
  147. perf_evsel__init_tp_uint_field(evsel, &sc->name, #name); })
  148. static int perf_evsel__init_tp_ptr_field(struct perf_evsel *evsel,
  149. struct tp_field *field,
  150. const char *name)
  151. {
  152. struct format_field *format_field = perf_evsel__field(evsel, name);
  153. if (format_field == NULL)
  154. return -1;
  155. return tp_field__init_ptr(field, format_field);
  156. }
  157. #define perf_evsel__init_sc_tp_ptr_field(evsel, name) \
  158. ({ struct syscall_tp *sc = evsel->priv;\
  159. perf_evsel__init_tp_ptr_field(evsel, &sc->name, #name); })
  160. static void perf_evsel__delete_priv(struct perf_evsel *evsel)
  161. {
  162. zfree(&evsel->priv);
  163. perf_evsel__delete(evsel);
  164. }
  165. static int perf_evsel__init_syscall_tp(struct perf_evsel *evsel, void *handler)
  166. {
  167. evsel->priv = malloc(sizeof(struct syscall_tp));
  168. if (evsel->priv != NULL) {
  169. if (perf_evsel__init_sc_tp_uint_field(evsel, id))
  170. goto out_delete;
  171. evsel->handler = handler;
  172. return 0;
  173. }
  174. return -ENOMEM;
  175. out_delete:
  176. zfree(&evsel->priv);
  177. return -ENOENT;
  178. }
  179. static struct perf_evsel *perf_evsel__syscall_newtp(const char *direction, void *handler)
  180. {
  181. struct perf_evsel *evsel = perf_evsel__newtp("raw_syscalls", direction);
  182. /* older kernel (e.g., RHEL6) use syscalls:{enter,exit} */
  183. if (evsel == NULL)
  184. evsel = perf_evsel__newtp("syscalls", direction);
  185. if (evsel) {
  186. if (perf_evsel__init_syscall_tp(evsel, handler))
  187. goto out_delete;
  188. }
  189. return evsel;
  190. out_delete:
  191. perf_evsel__delete_priv(evsel);
  192. return NULL;
  193. }
  194. #define perf_evsel__sc_tp_uint(evsel, name, sample) \
  195. ({ struct syscall_tp *fields = evsel->priv; \
  196. fields->name.integer(&fields->name, sample); })
  197. #define perf_evsel__sc_tp_ptr(evsel, name, sample) \
  198. ({ struct syscall_tp *fields = evsel->priv; \
  199. fields->name.pointer(&fields->name, sample); })
  200. struct syscall_arg {
  201. unsigned long val;
  202. struct thread *thread;
  203. struct trace *trace;
  204. void *parm;
  205. u8 idx;
  206. u8 mask;
  207. };
  208. struct strarray {
  209. int offset;
  210. int nr_entries;
  211. const char **entries;
  212. };
  213. #define DEFINE_STRARRAY(array) struct strarray strarray__##array = { \
  214. .nr_entries = ARRAY_SIZE(array), \
  215. .entries = array, \
  216. }
  217. #define DEFINE_STRARRAY_OFFSET(array, off) struct strarray strarray__##array = { \
  218. .offset = off, \
  219. .nr_entries = ARRAY_SIZE(array), \
  220. .entries = array, \
  221. }
  222. static size_t __syscall_arg__scnprintf_strarray(char *bf, size_t size,
  223. const char *intfmt,
  224. struct syscall_arg *arg)
  225. {
  226. struct strarray *sa = arg->parm;
  227. int idx = arg->val - sa->offset;
  228. if (idx < 0 || idx >= sa->nr_entries)
  229. return scnprintf(bf, size, intfmt, arg->val);
  230. return scnprintf(bf, size, "%s", sa->entries[idx]);
  231. }
  232. static size_t syscall_arg__scnprintf_strarray(char *bf, size_t size,
  233. struct syscall_arg *arg)
  234. {
  235. return __syscall_arg__scnprintf_strarray(bf, size, "%d", arg);
  236. }
  237. #define SCA_STRARRAY syscall_arg__scnprintf_strarray
  238. #if defined(__i386__) || defined(__x86_64__)
  239. /*
  240. * FIXME: Make this available to all arches as soon as the ioctl beautifier
  241. * gets rewritten to support all arches.
  242. */
  243. static size_t syscall_arg__scnprintf_strhexarray(char *bf, size_t size,
  244. struct syscall_arg *arg)
  245. {
  246. return __syscall_arg__scnprintf_strarray(bf, size, "%#x", arg);
  247. }
  248. #define SCA_STRHEXARRAY syscall_arg__scnprintf_strhexarray
  249. #endif /* defined(__i386__) || defined(__x86_64__) */
  250. static size_t syscall_arg__scnprintf_fd(char *bf, size_t size,
  251. struct syscall_arg *arg);
  252. #define SCA_FD syscall_arg__scnprintf_fd
  253. static size_t syscall_arg__scnprintf_fd_at(char *bf, size_t size,
  254. struct syscall_arg *arg)
  255. {
  256. int fd = arg->val;
  257. if (fd == AT_FDCWD)
  258. return scnprintf(bf, size, "CWD");
  259. return syscall_arg__scnprintf_fd(bf, size, arg);
  260. }
  261. #define SCA_FDAT syscall_arg__scnprintf_fd_at
  262. static size_t syscall_arg__scnprintf_close_fd(char *bf, size_t size,
  263. struct syscall_arg *arg);
  264. #define SCA_CLOSE_FD syscall_arg__scnprintf_close_fd
  265. static size_t syscall_arg__scnprintf_hex(char *bf, size_t size,
  266. struct syscall_arg *arg)
  267. {
  268. return scnprintf(bf, size, "%#lx", arg->val);
  269. }
  270. #define SCA_HEX syscall_arg__scnprintf_hex
  271. static size_t syscall_arg__scnprintf_int(char *bf, size_t size,
  272. struct syscall_arg *arg)
  273. {
  274. return scnprintf(bf, size, "%d", arg->val);
  275. }
  276. #define SCA_INT syscall_arg__scnprintf_int
  277. static size_t syscall_arg__scnprintf_mmap_prot(char *bf, size_t size,
  278. struct syscall_arg *arg)
  279. {
  280. int printed = 0, prot = arg->val;
  281. if (prot == PROT_NONE)
  282. return scnprintf(bf, size, "NONE");
  283. #define P_MMAP_PROT(n) \
  284. if (prot & PROT_##n) { \
  285. printed += scnprintf(bf + printed, size - printed, "%s%s", printed ? "|" : "", #n); \
  286. prot &= ~PROT_##n; \
  287. }
  288. P_MMAP_PROT(EXEC);
  289. P_MMAP_PROT(READ);
  290. P_MMAP_PROT(WRITE);
  291. #ifdef PROT_SEM
  292. P_MMAP_PROT(SEM);
  293. #endif
  294. P_MMAP_PROT(GROWSDOWN);
  295. P_MMAP_PROT(GROWSUP);
  296. #undef P_MMAP_PROT
  297. if (prot)
  298. printed += scnprintf(bf + printed, size - printed, "%s%#x", printed ? "|" : "", prot);
  299. return printed;
  300. }
  301. #define SCA_MMAP_PROT syscall_arg__scnprintf_mmap_prot
  302. static size_t syscall_arg__scnprintf_mmap_flags(char *bf, size_t size,
  303. struct syscall_arg *arg)
  304. {
  305. int printed = 0, flags = arg->val;
  306. #define P_MMAP_FLAG(n) \
  307. if (flags & MAP_##n) { \
  308. printed += scnprintf(bf + printed, size - printed, "%s%s", printed ? "|" : "", #n); \
  309. flags &= ~MAP_##n; \
  310. }
  311. P_MMAP_FLAG(SHARED);
  312. P_MMAP_FLAG(PRIVATE);
  313. #ifdef MAP_32BIT
  314. P_MMAP_FLAG(32BIT);
  315. #endif
  316. P_MMAP_FLAG(ANONYMOUS);
  317. P_MMAP_FLAG(DENYWRITE);
  318. P_MMAP_FLAG(EXECUTABLE);
  319. P_MMAP_FLAG(FILE);
  320. P_MMAP_FLAG(FIXED);
  321. P_MMAP_FLAG(GROWSDOWN);
  322. #ifdef MAP_HUGETLB
  323. P_MMAP_FLAG(HUGETLB);
  324. #endif
  325. P_MMAP_FLAG(LOCKED);
  326. P_MMAP_FLAG(NONBLOCK);
  327. P_MMAP_FLAG(NORESERVE);
  328. P_MMAP_FLAG(POPULATE);
  329. P_MMAP_FLAG(STACK);
  330. #ifdef MAP_UNINITIALIZED
  331. P_MMAP_FLAG(UNINITIALIZED);
  332. #endif
  333. #undef P_MMAP_FLAG
  334. if (flags)
  335. printed += scnprintf(bf + printed, size - printed, "%s%#x", printed ? "|" : "", flags);
  336. return printed;
  337. }
  338. #define SCA_MMAP_FLAGS syscall_arg__scnprintf_mmap_flags
  339. static size_t syscall_arg__scnprintf_mremap_flags(char *bf, size_t size,
  340. struct syscall_arg *arg)
  341. {
  342. int printed = 0, flags = arg->val;
  343. #define P_MREMAP_FLAG(n) \
  344. if (flags & MREMAP_##n) { \
  345. printed += scnprintf(bf + printed, size - printed, "%s%s", printed ? "|" : "", #n); \
  346. flags &= ~MREMAP_##n; \
  347. }
  348. P_MREMAP_FLAG(MAYMOVE);
  349. #ifdef MREMAP_FIXED
  350. P_MREMAP_FLAG(FIXED);
  351. #endif
  352. #undef P_MREMAP_FLAG
  353. if (flags)
  354. printed += scnprintf(bf + printed, size - printed, "%s%#x", printed ? "|" : "", flags);
  355. return printed;
  356. }
  357. #define SCA_MREMAP_FLAGS syscall_arg__scnprintf_mremap_flags
  358. static size_t syscall_arg__scnprintf_madvise_behavior(char *bf, size_t size,
  359. struct syscall_arg *arg)
  360. {
  361. int behavior = arg->val;
  362. switch (behavior) {
  363. #define P_MADV_BHV(n) case MADV_##n: return scnprintf(bf, size, #n)
  364. P_MADV_BHV(NORMAL);
  365. P_MADV_BHV(RANDOM);
  366. P_MADV_BHV(SEQUENTIAL);
  367. P_MADV_BHV(WILLNEED);
  368. P_MADV_BHV(DONTNEED);
  369. P_MADV_BHV(REMOVE);
  370. P_MADV_BHV(DONTFORK);
  371. P_MADV_BHV(DOFORK);
  372. P_MADV_BHV(HWPOISON);
  373. #ifdef MADV_SOFT_OFFLINE
  374. P_MADV_BHV(SOFT_OFFLINE);
  375. #endif
  376. P_MADV_BHV(MERGEABLE);
  377. P_MADV_BHV(UNMERGEABLE);
  378. #ifdef MADV_HUGEPAGE
  379. P_MADV_BHV(HUGEPAGE);
  380. #endif
  381. #ifdef MADV_NOHUGEPAGE
  382. P_MADV_BHV(NOHUGEPAGE);
  383. #endif
  384. #ifdef MADV_DONTDUMP
  385. P_MADV_BHV(DONTDUMP);
  386. #endif
  387. #ifdef MADV_DODUMP
  388. P_MADV_BHV(DODUMP);
  389. #endif
  390. #undef P_MADV_PHV
  391. default: break;
  392. }
  393. return scnprintf(bf, size, "%#x", behavior);
  394. }
  395. #define SCA_MADV_BHV syscall_arg__scnprintf_madvise_behavior
  396. static size_t syscall_arg__scnprintf_flock(char *bf, size_t size,
  397. struct syscall_arg *arg)
  398. {
  399. int printed = 0, op = arg->val;
  400. if (op == 0)
  401. return scnprintf(bf, size, "NONE");
  402. #define P_CMD(cmd) \
  403. if ((op & LOCK_##cmd) == LOCK_##cmd) { \
  404. printed += scnprintf(bf + printed, size - printed, "%s%s", printed ? "|" : "", #cmd); \
  405. op &= ~LOCK_##cmd; \
  406. }
  407. P_CMD(SH);
  408. P_CMD(EX);
  409. P_CMD(NB);
  410. P_CMD(UN);
  411. P_CMD(MAND);
  412. P_CMD(RW);
  413. P_CMD(READ);
  414. P_CMD(WRITE);
  415. #undef P_OP
  416. if (op)
  417. printed += scnprintf(bf + printed, size - printed, "%s%#x", printed ? "|" : "", op);
  418. return printed;
  419. }
  420. #define SCA_FLOCK syscall_arg__scnprintf_flock
  421. static size_t syscall_arg__scnprintf_futex_op(char *bf, size_t size, struct syscall_arg *arg)
  422. {
  423. enum syscall_futex_args {
  424. SCF_UADDR = (1 << 0),
  425. SCF_OP = (1 << 1),
  426. SCF_VAL = (1 << 2),
  427. SCF_TIMEOUT = (1 << 3),
  428. SCF_UADDR2 = (1 << 4),
  429. SCF_VAL3 = (1 << 5),
  430. };
  431. int op = arg->val;
  432. int cmd = op & FUTEX_CMD_MASK;
  433. size_t printed = 0;
  434. switch (cmd) {
  435. #define P_FUTEX_OP(n) case FUTEX_##n: printed = scnprintf(bf, size, #n);
  436. P_FUTEX_OP(WAIT); arg->mask |= SCF_VAL3|SCF_UADDR2; break;
  437. P_FUTEX_OP(WAKE); arg->mask |= SCF_VAL3|SCF_UADDR2|SCF_TIMEOUT; break;
  438. P_FUTEX_OP(FD); arg->mask |= SCF_VAL3|SCF_UADDR2|SCF_TIMEOUT; break;
  439. P_FUTEX_OP(REQUEUE); arg->mask |= SCF_VAL3|SCF_TIMEOUT; break;
  440. P_FUTEX_OP(CMP_REQUEUE); arg->mask |= SCF_TIMEOUT; break;
  441. P_FUTEX_OP(CMP_REQUEUE_PI); arg->mask |= SCF_TIMEOUT; break;
  442. P_FUTEX_OP(WAKE_OP); break;
  443. P_FUTEX_OP(LOCK_PI); arg->mask |= SCF_VAL3|SCF_UADDR2|SCF_TIMEOUT; break;
  444. P_FUTEX_OP(UNLOCK_PI); arg->mask |= SCF_VAL3|SCF_UADDR2|SCF_TIMEOUT; break;
  445. P_FUTEX_OP(TRYLOCK_PI); arg->mask |= SCF_VAL3|SCF_UADDR2; break;
  446. P_FUTEX_OP(WAIT_BITSET); arg->mask |= SCF_UADDR2; break;
  447. P_FUTEX_OP(WAKE_BITSET); arg->mask |= SCF_UADDR2; break;
  448. P_FUTEX_OP(WAIT_REQUEUE_PI); break;
  449. default: printed = scnprintf(bf, size, "%#x", cmd); break;
  450. }
  451. if (op & FUTEX_PRIVATE_FLAG)
  452. printed += scnprintf(bf + printed, size - printed, "|PRIV");
  453. if (op & FUTEX_CLOCK_REALTIME)
  454. printed += scnprintf(bf + printed, size - printed, "|CLKRT");
  455. return printed;
  456. }
  457. #define SCA_FUTEX_OP syscall_arg__scnprintf_futex_op
  458. static const char *epoll_ctl_ops[] = { "ADD", "DEL", "MOD", };
  459. static DEFINE_STRARRAY_OFFSET(epoll_ctl_ops, 1);
  460. static const char *itimers[] = { "REAL", "VIRTUAL", "PROF", };
  461. static DEFINE_STRARRAY(itimers);
  462. static const char *whences[] = { "SET", "CUR", "END",
  463. #ifdef SEEK_DATA
  464. "DATA",
  465. #endif
  466. #ifdef SEEK_HOLE
  467. "HOLE",
  468. #endif
  469. };
  470. static DEFINE_STRARRAY(whences);
  471. static const char *fcntl_cmds[] = {
  472. "DUPFD", "GETFD", "SETFD", "GETFL", "SETFL", "GETLK", "SETLK",
  473. "SETLKW", "SETOWN", "GETOWN", "SETSIG", "GETSIG", "F_GETLK64",
  474. "F_SETLK64", "F_SETLKW64", "F_SETOWN_EX", "F_GETOWN_EX",
  475. "F_GETOWNER_UIDS",
  476. };
  477. static DEFINE_STRARRAY(fcntl_cmds);
  478. static const char *rlimit_resources[] = {
  479. "CPU", "FSIZE", "DATA", "STACK", "CORE", "RSS", "NPROC", "NOFILE",
  480. "MEMLOCK", "AS", "LOCKS", "SIGPENDING", "MSGQUEUE", "NICE", "RTPRIO",
  481. "RTTIME",
  482. };
  483. static DEFINE_STRARRAY(rlimit_resources);
  484. static const char *sighow[] = { "BLOCK", "UNBLOCK", "SETMASK", };
  485. static DEFINE_STRARRAY(sighow);
  486. static const char *clockid[] = {
  487. "REALTIME", "MONOTONIC", "PROCESS_CPUTIME_ID", "THREAD_CPUTIME_ID",
  488. "MONOTONIC_RAW", "REALTIME_COARSE", "MONOTONIC_COARSE",
  489. };
  490. static DEFINE_STRARRAY(clockid);
  491. static const char *socket_families[] = {
  492. "UNSPEC", "LOCAL", "INET", "AX25", "IPX", "APPLETALK", "NETROM",
  493. "BRIDGE", "ATMPVC", "X25", "INET6", "ROSE", "DECnet", "NETBEUI",
  494. "SECURITY", "KEY", "NETLINK", "PACKET", "ASH", "ECONET", "ATMSVC",
  495. "RDS", "SNA", "IRDA", "PPPOX", "WANPIPE", "LLC", "IB", "CAN", "TIPC",
  496. "BLUETOOTH", "IUCV", "RXRPC", "ISDN", "PHONET", "IEEE802154", "CAIF",
  497. "ALG", "NFC", "VSOCK",
  498. };
  499. static DEFINE_STRARRAY(socket_families);
  500. #ifndef SOCK_TYPE_MASK
  501. #define SOCK_TYPE_MASK 0xf
  502. #endif
  503. static size_t syscall_arg__scnprintf_socket_type(char *bf, size_t size,
  504. struct syscall_arg *arg)
  505. {
  506. size_t printed;
  507. int type = arg->val,
  508. flags = type & ~SOCK_TYPE_MASK;
  509. type &= SOCK_TYPE_MASK;
  510. /*
  511. * Can't use a strarray, MIPS may override for ABI reasons.
  512. */
  513. switch (type) {
  514. #define P_SK_TYPE(n) case SOCK_##n: printed = scnprintf(bf, size, #n); break;
  515. P_SK_TYPE(STREAM);
  516. P_SK_TYPE(DGRAM);
  517. P_SK_TYPE(RAW);
  518. P_SK_TYPE(RDM);
  519. P_SK_TYPE(SEQPACKET);
  520. P_SK_TYPE(DCCP);
  521. P_SK_TYPE(PACKET);
  522. #undef P_SK_TYPE
  523. default:
  524. printed = scnprintf(bf, size, "%#x", type);
  525. }
  526. #define P_SK_FLAG(n) \
  527. if (flags & SOCK_##n) { \
  528. printed += scnprintf(bf + printed, size - printed, "|%s", #n); \
  529. flags &= ~SOCK_##n; \
  530. }
  531. P_SK_FLAG(CLOEXEC);
  532. P_SK_FLAG(NONBLOCK);
  533. #undef P_SK_FLAG
  534. if (flags)
  535. printed += scnprintf(bf + printed, size - printed, "|%#x", flags);
  536. return printed;
  537. }
  538. #define SCA_SK_TYPE syscall_arg__scnprintf_socket_type
  539. #ifndef MSG_PROBE
  540. #define MSG_PROBE 0x10
  541. #endif
  542. #ifndef MSG_WAITFORONE
  543. #define MSG_WAITFORONE 0x10000
  544. #endif
  545. #ifndef MSG_SENDPAGE_NOTLAST
  546. #define MSG_SENDPAGE_NOTLAST 0x20000
  547. #endif
  548. #ifndef MSG_FASTOPEN
  549. #define MSG_FASTOPEN 0x20000000
  550. #endif
  551. static size_t syscall_arg__scnprintf_msg_flags(char *bf, size_t size,
  552. struct syscall_arg *arg)
  553. {
  554. int printed = 0, flags = arg->val;
  555. if (flags == 0)
  556. return scnprintf(bf, size, "NONE");
  557. #define P_MSG_FLAG(n) \
  558. if (flags & MSG_##n) { \
  559. printed += scnprintf(bf + printed, size - printed, "%s%s", printed ? "|" : "", #n); \
  560. flags &= ~MSG_##n; \
  561. }
  562. P_MSG_FLAG(OOB);
  563. P_MSG_FLAG(PEEK);
  564. P_MSG_FLAG(DONTROUTE);
  565. P_MSG_FLAG(TRYHARD);
  566. P_MSG_FLAG(CTRUNC);
  567. P_MSG_FLAG(PROBE);
  568. P_MSG_FLAG(TRUNC);
  569. P_MSG_FLAG(DONTWAIT);
  570. P_MSG_FLAG(EOR);
  571. P_MSG_FLAG(WAITALL);
  572. P_MSG_FLAG(FIN);
  573. P_MSG_FLAG(SYN);
  574. P_MSG_FLAG(CONFIRM);
  575. P_MSG_FLAG(RST);
  576. P_MSG_FLAG(ERRQUEUE);
  577. P_MSG_FLAG(NOSIGNAL);
  578. P_MSG_FLAG(MORE);
  579. P_MSG_FLAG(WAITFORONE);
  580. P_MSG_FLAG(SENDPAGE_NOTLAST);
  581. P_MSG_FLAG(FASTOPEN);
  582. P_MSG_FLAG(CMSG_CLOEXEC);
  583. #undef P_MSG_FLAG
  584. if (flags)
  585. printed += scnprintf(bf + printed, size - printed, "%s%#x", printed ? "|" : "", flags);
  586. return printed;
  587. }
  588. #define SCA_MSG_FLAGS syscall_arg__scnprintf_msg_flags
  589. static size_t syscall_arg__scnprintf_access_mode(char *bf, size_t size,
  590. struct syscall_arg *arg)
  591. {
  592. size_t printed = 0;
  593. int mode = arg->val;
  594. if (mode == F_OK) /* 0 */
  595. return scnprintf(bf, size, "F");
  596. #define P_MODE(n) \
  597. if (mode & n##_OK) { \
  598. printed += scnprintf(bf + printed, size - printed, "%s", #n); \
  599. mode &= ~n##_OK; \
  600. }
  601. P_MODE(R);
  602. P_MODE(W);
  603. P_MODE(X);
  604. #undef P_MODE
  605. if (mode)
  606. printed += scnprintf(bf + printed, size - printed, "|%#x", mode);
  607. return printed;
  608. }
  609. #define SCA_ACCMODE syscall_arg__scnprintf_access_mode
  610. static size_t syscall_arg__scnprintf_open_flags(char *bf, size_t size,
  611. struct syscall_arg *arg)
  612. {
  613. int printed = 0, flags = arg->val;
  614. if (!(flags & O_CREAT))
  615. arg->mask |= 1 << (arg->idx + 1); /* Mask the mode parm */
  616. if (flags == 0)
  617. return scnprintf(bf, size, "RDONLY");
  618. #define P_FLAG(n) \
  619. if (flags & O_##n) { \
  620. printed += scnprintf(bf + printed, size - printed, "%s%s", printed ? "|" : "", #n); \
  621. flags &= ~O_##n; \
  622. }
  623. P_FLAG(APPEND);
  624. P_FLAG(ASYNC);
  625. P_FLAG(CLOEXEC);
  626. P_FLAG(CREAT);
  627. P_FLAG(DIRECT);
  628. P_FLAG(DIRECTORY);
  629. P_FLAG(EXCL);
  630. P_FLAG(LARGEFILE);
  631. P_FLAG(NOATIME);
  632. P_FLAG(NOCTTY);
  633. #ifdef O_NONBLOCK
  634. P_FLAG(NONBLOCK);
  635. #elif O_NDELAY
  636. P_FLAG(NDELAY);
  637. #endif
  638. #ifdef O_PATH
  639. P_FLAG(PATH);
  640. #endif
  641. P_FLAG(RDWR);
  642. #ifdef O_DSYNC
  643. if ((flags & O_SYNC) == O_SYNC)
  644. printed += scnprintf(bf + printed, size - printed, "%s%s", printed ? "|" : "", "SYNC");
  645. else {
  646. P_FLAG(DSYNC);
  647. }
  648. #else
  649. P_FLAG(SYNC);
  650. #endif
  651. P_FLAG(TRUNC);
  652. P_FLAG(WRONLY);
  653. #undef P_FLAG
  654. if (flags)
  655. printed += scnprintf(bf + printed, size - printed, "%s%#x", printed ? "|" : "", flags);
  656. return printed;
  657. }
  658. #define SCA_OPEN_FLAGS syscall_arg__scnprintf_open_flags
  659. static size_t syscall_arg__scnprintf_perf_flags(char *bf, size_t size,
  660. struct syscall_arg *arg)
  661. {
  662. int printed = 0, flags = arg->val;
  663. if (flags == 0)
  664. return 0;
  665. #define P_FLAG(n) \
  666. if (flags & PERF_FLAG_##n) { \
  667. printed += scnprintf(bf + printed, size - printed, "%s%s", printed ? "|" : "", #n); \
  668. flags &= ~PERF_FLAG_##n; \
  669. }
  670. P_FLAG(FD_NO_GROUP);
  671. P_FLAG(FD_OUTPUT);
  672. P_FLAG(PID_CGROUP);
  673. P_FLAG(FD_CLOEXEC);
  674. #undef P_FLAG
  675. if (flags)
  676. printed += scnprintf(bf + printed, size - printed, "%s%#x", printed ? "|" : "", flags);
  677. return printed;
  678. }
  679. #define SCA_PERF_FLAGS syscall_arg__scnprintf_perf_flags
  680. static size_t syscall_arg__scnprintf_eventfd_flags(char *bf, size_t size,
  681. struct syscall_arg *arg)
  682. {
  683. int printed = 0, flags = arg->val;
  684. if (flags == 0)
  685. return scnprintf(bf, size, "NONE");
  686. #define P_FLAG(n) \
  687. if (flags & EFD_##n) { \
  688. printed += scnprintf(bf + printed, size - printed, "%s%s", printed ? "|" : "", #n); \
  689. flags &= ~EFD_##n; \
  690. }
  691. P_FLAG(SEMAPHORE);
  692. P_FLAG(CLOEXEC);
  693. P_FLAG(NONBLOCK);
  694. #undef P_FLAG
  695. if (flags)
  696. printed += scnprintf(bf + printed, size - printed, "%s%#x", printed ? "|" : "", flags);
  697. return printed;
  698. }
  699. #define SCA_EFD_FLAGS syscall_arg__scnprintf_eventfd_flags
  700. static size_t syscall_arg__scnprintf_pipe_flags(char *bf, size_t size,
  701. struct syscall_arg *arg)
  702. {
  703. int printed = 0, flags = arg->val;
  704. #define P_FLAG(n) \
  705. if (flags & O_##n) { \
  706. printed += scnprintf(bf + printed, size - printed, "%s%s", printed ? "|" : "", #n); \
  707. flags &= ~O_##n; \
  708. }
  709. P_FLAG(CLOEXEC);
  710. P_FLAG(NONBLOCK);
  711. #undef P_FLAG
  712. if (flags)
  713. printed += scnprintf(bf + printed, size - printed, "%s%#x", printed ? "|" : "", flags);
  714. return printed;
  715. }
  716. #define SCA_PIPE_FLAGS syscall_arg__scnprintf_pipe_flags
  717. static size_t syscall_arg__scnprintf_signum(char *bf, size_t size, struct syscall_arg *arg)
  718. {
  719. int sig = arg->val;
  720. switch (sig) {
  721. #define P_SIGNUM(n) case SIG##n: return scnprintf(bf, size, #n)
  722. P_SIGNUM(HUP);
  723. P_SIGNUM(INT);
  724. P_SIGNUM(QUIT);
  725. P_SIGNUM(ILL);
  726. P_SIGNUM(TRAP);
  727. P_SIGNUM(ABRT);
  728. P_SIGNUM(BUS);
  729. P_SIGNUM(FPE);
  730. P_SIGNUM(KILL);
  731. P_SIGNUM(USR1);
  732. P_SIGNUM(SEGV);
  733. P_SIGNUM(USR2);
  734. P_SIGNUM(PIPE);
  735. P_SIGNUM(ALRM);
  736. P_SIGNUM(TERM);
  737. P_SIGNUM(CHLD);
  738. P_SIGNUM(CONT);
  739. P_SIGNUM(STOP);
  740. P_SIGNUM(TSTP);
  741. P_SIGNUM(TTIN);
  742. P_SIGNUM(TTOU);
  743. P_SIGNUM(URG);
  744. P_SIGNUM(XCPU);
  745. P_SIGNUM(XFSZ);
  746. P_SIGNUM(VTALRM);
  747. P_SIGNUM(PROF);
  748. P_SIGNUM(WINCH);
  749. P_SIGNUM(IO);
  750. P_SIGNUM(PWR);
  751. P_SIGNUM(SYS);
  752. #ifdef SIGEMT
  753. P_SIGNUM(EMT);
  754. #endif
  755. #ifdef SIGSTKFLT
  756. P_SIGNUM(STKFLT);
  757. #endif
  758. #ifdef SIGSWI
  759. P_SIGNUM(SWI);
  760. #endif
  761. default: break;
  762. }
  763. return scnprintf(bf, size, "%#x", sig);
  764. }
  765. #define SCA_SIGNUM syscall_arg__scnprintf_signum
  766. #if defined(__i386__) || defined(__x86_64__)
  767. /*
  768. * FIXME: Make this available to all arches.
  769. */
  770. #define TCGETS 0x5401
  771. static const char *tioctls[] = {
  772. "TCGETS", "TCSETS", "TCSETSW", "TCSETSF", "TCGETA", "TCSETA", "TCSETAW",
  773. "TCSETAF", "TCSBRK", "TCXONC", "TCFLSH", "TIOCEXCL", "TIOCNXCL",
  774. "TIOCSCTTY", "TIOCGPGRP", "TIOCSPGRP", "TIOCOUTQ", "TIOCSTI",
  775. "TIOCGWINSZ", "TIOCSWINSZ", "TIOCMGET", "TIOCMBIS", "TIOCMBIC",
  776. "TIOCMSET", "TIOCGSOFTCAR", "TIOCSSOFTCAR", "FIONREAD", "TIOCLINUX",
  777. "TIOCCONS", "TIOCGSERIAL", "TIOCSSERIAL", "TIOCPKT", "FIONBIO",
  778. "TIOCNOTTY", "TIOCSETD", "TIOCGETD", "TCSBRKP", [0x27] = "TIOCSBRK",
  779. "TIOCCBRK", "TIOCGSID", "TCGETS2", "TCSETS2", "TCSETSW2", "TCSETSF2",
  780. "TIOCGRS485", "TIOCSRS485", "TIOCGPTN", "TIOCSPTLCK",
  781. "TIOCGDEV||TCGETX", "TCSETX", "TCSETXF", "TCSETXW", "TIOCSIG",
  782. "TIOCVHANGUP", "TIOCGPKT", "TIOCGPTLCK", "TIOCGEXCL",
  783. [0x50] = "FIONCLEX", "FIOCLEX", "FIOASYNC", "TIOCSERCONFIG",
  784. "TIOCSERGWILD", "TIOCSERSWILD", "TIOCGLCKTRMIOS", "TIOCSLCKTRMIOS",
  785. "TIOCSERGSTRUCT", "TIOCSERGETLSR", "TIOCSERGETMULTI", "TIOCSERSETMULTI",
  786. "TIOCMIWAIT", "TIOCGICOUNT", [0x60] = "FIOQSIZE",
  787. };
  788. static DEFINE_STRARRAY_OFFSET(tioctls, 0x5401);
  789. #endif /* defined(__i386__) || defined(__x86_64__) */
  790. #define STRARRAY(arg, name, array) \
  791. .arg_scnprintf = { [arg] = SCA_STRARRAY, }, \
  792. .arg_parm = { [arg] = &strarray__##array, }
  793. static struct syscall_fmt {
  794. const char *name;
  795. const char *alias;
  796. size_t (*arg_scnprintf[6])(char *bf, size_t size, struct syscall_arg *arg);
  797. void *arg_parm[6];
  798. bool errmsg;
  799. bool timeout;
  800. bool hexret;
  801. } syscall_fmts[] = {
  802. { .name = "access", .errmsg = true,
  803. .arg_scnprintf = { [1] = SCA_ACCMODE, /* mode */ }, },
  804. { .name = "arch_prctl", .errmsg = true, .alias = "prctl", },
  805. { .name = "brk", .hexret = true,
  806. .arg_scnprintf = { [0] = SCA_HEX, /* brk */ }, },
  807. { .name = "clock_gettime", .errmsg = true, STRARRAY(0, clk_id, clockid), },
  808. { .name = "close", .errmsg = true,
  809. .arg_scnprintf = { [0] = SCA_CLOSE_FD, /* fd */ }, },
  810. { .name = "connect", .errmsg = true, },
  811. { .name = "dup", .errmsg = true,
  812. .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
  813. { .name = "dup2", .errmsg = true,
  814. .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
  815. { .name = "dup3", .errmsg = true,
  816. .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
  817. { .name = "epoll_ctl", .errmsg = true, STRARRAY(1, op, epoll_ctl_ops), },
  818. { .name = "eventfd2", .errmsg = true,
  819. .arg_scnprintf = { [1] = SCA_EFD_FLAGS, /* flags */ }, },
  820. { .name = "faccessat", .errmsg = true,
  821. .arg_scnprintf = { [0] = SCA_FDAT, /* dfd */ }, },
  822. { .name = "fadvise64", .errmsg = true,
  823. .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
  824. { .name = "fallocate", .errmsg = true,
  825. .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
  826. { .name = "fchdir", .errmsg = true,
  827. .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
  828. { .name = "fchmod", .errmsg = true,
  829. .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
  830. { .name = "fchmodat", .errmsg = true,
  831. .arg_scnprintf = { [0] = SCA_FDAT, /* fd */ }, },
  832. { .name = "fchown", .errmsg = true,
  833. .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
  834. { .name = "fchownat", .errmsg = true,
  835. .arg_scnprintf = { [0] = SCA_FDAT, /* fd */ }, },
  836. { .name = "fcntl", .errmsg = true,
  837. .arg_scnprintf = { [0] = SCA_FD, /* fd */
  838. [1] = SCA_STRARRAY, /* cmd */ },
  839. .arg_parm = { [1] = &strarray__fcntl_cmds, /* cmd */ }, },
  840. { .name = "fdatasync", .errmsg = true,
  841. .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
  842. { .name = "flock", .errmsg = true,
  843. .arg_scnprintf = { [0] = SCA_FD, /* fd */
  844. [1] = SCA_FLOCK, /* cmd */ }, },
  845. { .name = "fsetxattr", .errmsg = true,
  846. .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
  847. { .name = "fstat", .errmsg = true, .alias = "newfstat",
  848. .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
  849. { .name = "fstatat", .errmsg = true, .alias = "newfstatat",
  850. .arg_scnprintf = { [0] = SCA_FDAT, /* dfd */ }, },
  851. { .name = "fstatfs", .errmsg = true,
  852. .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
  853. { .name = "fsync", .errmsg = true,
  854. .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
  855. { .name = "ftruncate", .errmsg = true,
  856. .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
  857. { .name = "futex", .errmsg = true,
  858. .arg_scnprintf = { [1] = SCA_FUTEX_OP, /* op */ }, },
  859. { .name = "futimesat", .errmsg = true,
  860. .arg_scnprintf = { [0] = SCA_FDAT, /* fd */ }, },
  861. { .name = "getdents", .errmsg = true,
  862. .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
  863. { .name = "getdents64", .errmsg = true,
  864. .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
  865. { .name = "getitimer", .errmsg = true, STRARRAY(0, which, itimers), },
  866. { .name = "getrlimit", .errmsg = true, STRARRAY(0, resource, rlimit_resources), },
  867. { .name = "ioctl", .errmsg = true,
  868. .arg_scnprintf = { [0] = SCA_FD, /* fd */
  869. #if defined(__i386__) || defined(__x86_64__)
  870. /*
  871. * FIXME: Make this available to all arches.
  872. */
  873. [1] = SCA_STRHEXARRAY, /* cmd */
  874. [2] = SCA_HEX, /* arg */ },
  875. .arg_parm = { [1] = &strarray__tioctls, /* cmd */ }, },
  876. #else
  877. [2] = SCA_HEX, /* arg */ }, },
  878. #endif
  879. { .name = "kill", .errmsg = true,
  880. .arg_scnprintf = { [1] = SCA_SIGNUM, /* sig */ }, },
  881. { .name = "linkat", .errmsg = true,
  882. .arg_scnprintf = { [0] = SCA_FDAT, /* fd */ }, },
  883. { .name = "lseek", .errmsg = true,
  884. .arg_scnprintf = { [0] = SCA_FD, /* fd */
  885. [2] = SCA_STRARRAY, /* whence */ },
  886. .arg_parm = { [2] = &strarray__whences, /* whence */ }, },
  887. { .name = "lstat", .errmsg = true, .alias = "newlstat", },
  888. { .name = "madvise", .errmsg = true,
  889. .arg_scnprintf = { [0] = SCA_HEX, /* start */
  890. [2] = SCA_MADV_BHV, /* behavior */ }, },
  891. { .name = "mkdirat", .errmsg = true,
  892. .arg_scnprintf = { [0] = SCA_FDAT, /* fd */ }, },
  893. { .name = "mknodat", .errmsg = true,
  894. .arg_scnprintf = { [0] = SCA_FDAT, /* fd */ }, },
  895. { .name = "mlock", .errmsg = true,
  896. .arg_scnprintf = { [0] = SCA_HEX, /* addr */ }, },
  897. { .name = "mlockall", .errmsg = true,
  898. .arg_scnprintf = { [0] = SCA_HEX, /* addr */ }, },
  899. { .name = "mmap", .hexret = true,
  900. .arg_scnprintf = { [0] = SCA_HEX, /* addr */
  901. [2] = SCA_MMAP_PROT, /* prot */
  902. [3] = SCA_MMAP_FLAGS, /* flags */
  903. [4] = SCA_FD, /* fd */ }, },
  904. { .name = "mprotect", .errmsg = true,
  905. .arg_scnprintf = { [0] = SCA_HEX, /* start */
  906. [2] = SCA_MMAP_PROT, /* prot */ }, },
  907. { .name = "mremap", .hexret = true,
  908. .arg_scnprintf = { [0] = SCA_HEX, /* addr */
  909. [3] = SCA_MREMAP_FLAGS, /* flags */
  910. [4] = SCA_HEX, /* new_addr */ }, },
  911. { .name = "munlock", .errmsg = true,
  912. .arg_scnprintf = { [0] = SCA_HEX, /* addr */ }, },
  913. { .name = "munmap", .errmsg = true,
  914. .arg_scnprintf = { [0] = SCA_HEX, /* addr */ }, },
  915. { .name = "name_to_handle_at", .errmsg = true,
  916. .arg_scnprintf = { [0] = SCA_FDAT, /* dfd */ }, },
  917. { .name = "newfstatat", .errmsg = true,
  918. .arg_scnprintf = { [0] = SCA_FDAT, /* dfd */ }, },
  919. { .name = "open", .errmsg = true,
  920. .arg_scnprintf = { [1] = SCA_OPEN_FLAGS, /* flags */ }, },
  921. { .name = "open_by_handle_at", .errmsg = true,
  922. .arg_scnprintf = { [0] = SCA_FDAT, /* dfd */
  923. [2] = SCA_OPEN_FLAGS, /* flags */ }, },
  924. { .name = "openat", .errmsg = true,
  925. .arg_scnprintf = { [0] = SCA_FDAT, /* dfd */
  926. [2] = SCA_OPEN_FLAGS, /* flags */ }, },
  927. { .name = "perf_event_open", .errmsg = true,
  928. .arg_scnprintf = { [1] = SCA_INT, /* pid */
  929. [2] = SCA_INT, /* cpu */
  930. [3] = SCA_FD, /* group_fd */
  931. [4] = SCA_PERF_FLAGS, /* flags */ }, },
  932. { .name = "pipe2", .errmsg = true,
  933. .arg_scnprintf = { [1] = SCA_PIPE_FLAGS, /* flags */ }, },
  934. { .name = "poll", .errmsg = true, .timeout = true, },
  935. { .name = "ppoll", .errmsg = true, .timeout = true, },
  936. { .name = "pread", .errmsg = true, .alias = "pread64",
  937. .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
  938. { .name = "preadv", .errmsg = true, .alias = "pread",
  939. .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
  940. { .name = "prlimit64", .errmsg = true, STRARRAY(1, resource, rlimit_resources), },
  941. { .name = "pwrite", .errmsg = true, .alias = "pwrite64",
  942. .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
  943. { .name = "pwritev", .errmsg = true,
  944. .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
  945. { .name = "read", .errmsg = true,
  946. .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
  947. { .name = "readlinkat", .errmsg = true,
  948. .arg_scnprintf = { [0] = SCA_FDAT, /* dfd */ }, },
  949. { .name = "readv", .errmsg = true,
  950. .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
  951. { .name = "recvfrom", .errmsg = true,
  952. .arg_scnprintf = { [3] = SCA_MSG_FLAGS, /* flags */ }, },
  953. { .name = "recvmmsg", .errmsg = true,
  954. .arg_scnprintf = { [3] = SCA_MSG_FLAGS, /* flags */ }, },
  955. { .name = "recvmsg", .errmsg = true,
  956. .arg_scnprintf = { [2] = SCA_MSG_FLAGS, /* flags */ }, },
  957. { .name = "renameat", .errmsg = true,
  958. .arg_scnprintf = { [0] = SCA_FDAT, /* dfd */ }, },
  959. { .name = "rt_sigaction", .errmsg = true,
  960. .arg_scnprintf = { [0] = SCA_SIGNUM, /* sig */ }, },
  961. { .name = "rt_sigprocmask", .errmsg = true, STRARRAY(0, how, sighow), },
  962. { .name = "rt_sigqueueinfo", .errmsg = true,
  963. .arg_scnprintf = { [1] = SCA_SIGNUM, /* sig */ }, },
  964. { .name = "rt_tgsigqueueinfo", .errmsg = true,
  965. .arg_scnprintf = { [2] = SCA_SIGNUM, /* sig */ }, },
  966. { .name = "select", .errmsg = true, .timeout = true, },
  967. { .name = "sendmmsg", .errmsg = true,
  968. .arg_scnprintf = { [3] = SCA_MSG_FLAGS, /* flags */ }, },
  969. { .name = "sendmsg", .errmsg = true,
  970. .arg_scnprintf = { [2] = SCA_MSG_FLAGS, /* flags */ }, },
  971. { .name = "sendto", .errmsg = true,
  972. .arg_scnprintf = { [3] = SCA_MSG_FLAGS, /* flags */ }, },
  973. { .name = "setitimer", .errmsg = true, STRARRAY(0, which, itimers), },
  974. { .name = "setrlimit", .errmsg = true, STRARRAY(0, resource, rlimit_resources), },
  975. { .name = "shutdown", .errmsg = true,
  976. .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
  977. { .name = "socket", .errmsg = true,
  978. .arg_scnprintf = { [0] = SCA_STRARRAY, /* family */
  979. [1] = SCA_SK_TYPE, /* type */ },
  980. .arg_parm = { [0] = &strarray__socket_families, /* family */ }, },
  981. { .name = "socketpair", .errmsg = true,
  982. .arg_scnprintf = { [0] = SCA_STRARRAY, /* family */
  983. [1] = SCA_SK_TYPE, /* type */ },
  984. .arg_parm = { [0] = &strarray__socket_families, /* family */ }, },
  985. { .name = "stat", .errmsg = true, .alias = "newstat", },
  986. { .name = "symlinkat", .errmsg = true,
  987. .arg_scnprintf = { [0] = SCA_FDAT, /* dfd */ }, },
  988. { .name = "tgkill", .errmsg = true,
  989. .arg_scnprintf = { [2] = SCA_SIGNUM, /* sig */ }, },
  990. { .name = "tkill", .errmsg = true,
  991. .arg_scnprintf = { [1] = SCA_SIGNUM, /* sig */ }, },
  992. { .name = "uname", .errmsg = true, .alias = "newuname", },
  993. { .name = "unlinkat", .errmsg = true,
  994. .arg_scnprintf = { [0] = SCA_FDAT, /* dfd */ }, },
  995. { .name = "utimensat", .errmsg = true,
  996. .arg_scnprintf = { [0] = SCA_FDAT, /* dirfd */ }, },
  997. { .name = "write", .errmsg = true,
  998. .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
  999. { .name = "writev", .errmsg = true,
  1000. .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
  1001. };
  1002. static int syscall_fmt__cmp(const void *name, const void *fmtp)
  1003. {
  1004. const struct syscall_fmt *fmt = fmtp;
  1005. return strcmp(name, fmt->name);
  1006. }
  1007. static struct syscall_fmt *syscall_fmt__find(const char *name)
  1008. {
  1009. const int nmemb = ARRAY_SIZE(syscall_fmts);
  1010. return bsearch(name, syscall_fmts, nmemb, sizeof(struct syscall_fmt), syscall_fmt__cmp);
  1011. }
  1012. struct syscall {
  1013. struct event_format *tp_format;
  1014. int nr_args;
  1015. struct format_field *args;
  1016. const char *name;
  1017. bool is_exit;
  1018. struct syscall_fmt *fmt;
  1019. size_t (**arg_scnprintf)(char *bf, size_t size, struct syscall_arg *arg);
  1020. void **arg_parm;
  1021. };
  1022. static size_t fprintf_duration(unsigned long t, FILE *fp)
  1023. {
  1024. double duration = (double)t / NSEC_PER_MSEC;
  1025. size_t printed = fprintf(fp, "(");
  1026. if (duration >= 1.0)
  1027. printed += color_fprintf(fp, PERF_COLOR_RED, "%6.3f ms", duration);
  1028. else if (duration >= 0.01)
  1029. printed += color_fprintf(fp, PERF_COLOR_YELLOW, "%6.3f ms", duration);
  1030. else
  1031. printed += color_fprintf(fp, PERF_COLOR_NORMAL, "%6.3f ms", duration);
  1032. return printed + fprintf(fp, "): ");
  1033. }
  1034. struct thread_trace {
  1035. u64 entry_time;
  1036. u64 exit_time;
  1037. bool entry_pending;
  1038. unsigned long nr_events;
  1039. unsigned long pfmaj, pfmin;
  1040. char *entry_str;
  1041. double runtime_ms;
  1042. struct {
  1043. int max;
  1044. char **table;
  1045. } paths;
  1046. struct intlist *syscall_stats;
  1047. };
  1048. static struct thread_trace *thread_trace__new(void)
  1049. {
  1050. struct thread_trace *ttrace = zalloc(sizeof(struct thread_trace));
  1051. if (ttrace)
  1052. ttrace->paths.max = -1;
  1053. ttrace->syscall_stats = intlist__new(NULL);
  1054. return ttrace;
  1055. }
  1056. static struct thread_trace *thread__trace(struct thread *thread, FILE *fp)
  1057. {
  1058. struct thread_trace *ttrace;
  1059. if (thread == NULL)
  1060. goto fail;
  1061. if (thread__priv(thread) == NULL)
  1062. thread__set_priv(thread, thread_trace__new());
  1063. if (thread__priv(thread) == NULL)
  1064. goto fail;
  1065. ttrace = thread__priv(thread);
  1066. ++ttrace->nr_events;
  1067. return ttrace;
  1068. fail:
  1069. color_fprintf(fp, PERF_COLOR_RED,
  1070. "WARNING: not enough memory, dropping samples!\n");
  1071. return NULL;
  1072. }
  1073. #define TRACE_PFMAJ (1 << 0)
  1074. #define TRACE_PFMIN (1 << 1)
  1075. struct trace {
  1076. struct perf_tool tool;
  1077. struct {
  1078. int machine;
  1079. int open_id;
  1080. } audit;
  1081. struct {
  1082. int max;
  1083. struct syscall *table;
  1084. struct {
  1085. struct perf_evsel *sys_enter,
  1086. *sys_exit;
  1087. } events;
  1088. } syscalls;
  1089. struct record_opts opts;
  1090. struct perf_evlist *evlist;
  1091. struct machine *host;
  1092. struct thread *current;
  1093. u64 base_time;
  1094. FILE *output;
  1095. unsigned long nr_events;
  1096. struct strlist *ev_qualifier;
  1097. struct {
  1098. size_t nr;
  1099. int *entries;
  1100. } ev_qualifier_ids;
  1101. const char *last_vfs_getname;
  1102. struct intlist *tid_list;
  1103. struct intlist *pid_list;
  1104. struct {
  1105. size_t nr;
  1106. pid_t *entries;
  1107. } filter_pids;
  1108. double duration_filter;
  1109. double runtime_ms;
  1110. struct {
  1111. u64 vfs_getname,
  1112. proc_getname;
  1113. } stats;
  1114. bool not_ev_qualifier;
  1115. bool live;
  1116. bool full_time;
  1117. bool sched;
  1118. bool multiple_threads;
  1119. bool summary;
  1120. bool summary_only;
  1121. bool show_comm;
  1122. bool show_tool_stats;
  1123. bool trace_syscalls;
  1124. bool force;
  1125. int trace_pgfaults;
  1126. };
  1127. static int trace__set_fd_pathname(struct thread *thread, int fd, const char *pathname)
  1128. {
  1129. struct thread_trace *ttrace = thread__priv(thread);
  1130. if (fd > ttrace->paths.max) {
  1131. char **npath = realloc(ttrace->paths.table, (fd + 1) * sizeof(char *));
  1132. if (npath == NULL)
  1133. return -1;
  1134. if (ttrace->paths.max != -1) {
  1135. memset(npath + ttrace->paths.max + 1, 0,
  1136. (fd - ttrace->paths.max) * sizeof(char *));
  1137. } else {
  1138. memset(npath, 0, (fd + 1) * sizeof(char *));
  1139. }
  1140. ttrace->paths.table = npath;
  1141. ttrace->paths.max = fd;
  1142. }
  1143. ttrace->paths.table[fd] = strdup(pathname);
  1144. return ttrace->paths.table[fd] != NULL ? 0 : -1;
  1145. }
  1146. static int thread__read_fd_path(struct thread *thread, int fd)
  1147. {
  1148. char linkname[PATH_MAX], pathname[PATH_MAX];
  1149. struct stat st;
  1150. int ret;
  1151. if (thread->pid_ == thread->tid) {
  1152. scnprintf(linkname, sizeof(linkname),
  1153. "/proc/%d/fd/%d", thread->pid_, fd);
  1154. } else {
  1155. scnprintf(linkname, sizeof(linkname),
  1156. "/proc/%d/task/%d/fd/%d", thread->pid_, thread->tid, fd);
  1157. }
  1158. if (lstat(linkname, &st) < 0 || st.st_size + 1 > (off_t)sizeof(pathname))
  1159. return -1;
  1160. ret = readlink(linkname, pathname, sizeof(pathname));
  1161. if (ret < 0 || ret > st.st_size)
  1162. return -1;
  1163. pathname[ret] = '\0';
  1164. return trace__set_fd_pathname(thread, fd, pathname);
  1165. }
  1166. static const char *thread__fd_path(struct thread *thread, int fd,
  1167. struct trace *trace)
  1168. {
  1169. struct thread_trace *ttrace = thread__priv(thread);
  1170. if (ttrace == NULL)
  1171. return NULL;
  1172. if (fd < 0)
  1173. return NULL;
  1174. if ((fd > ttrace->paths.max || ttrace->paths.table[fd] == NULL)) {
  1175. if (!trace->live)
  1176. return NULL;
  1177. ++trace->stats.proc_getname;
  1178. if (thread__read_fd_path(thread, fd))
  1179. return NULL;
  1180. }
  1181. return ttrace->paths.table[fd];
  1182. }
  1183. static size_t syscall_arg__scnprintf_fd(char *bf, size_t size,
  1184. struct syscall_arg *arg)
  1185. {
  1186. int fd = arg->val;
  1187. size_t printed = scnprintf(bf, size, "%d", fd);
  1188. const char *path = thread__fd_path(arg->thread, fd, arg->trace);
  1189. if (path)
  1190. printed += scnprintf(bf + printed, size - printed, "<%s>", path);
  1191. return printed;
  1192. }
  1193. static size_t syscall_arg__scnprintf_close_fd(char *bf, size_t size,
  1194. struct syscall_arg *arg)
  1195. {
  1196. int fd = arg->val;
  1197. size_t printed = syscall_arg__scnprintf_fd(bf, size, arg);
  1198. struct thread_trace *ttrace = thread__priv(arg->thread);
  1199. if (ttrace && fd >= 0 && fd <= ttrace->paths.max)
  1200. zfree(&ttrace->paths.table[fd]);
  1201. return printed;
  1202. }
  1203. static bool trace__filter_duration(struct trace *trace, double t)
  1204. {
  1205. return t < (trace->duration_filter * NSEC_PER_MSEC);
  1206. }
  1207. static size_t trace__fprintf_tstamp(struct trace *trace, u64 tstamp, FILE *fp)
  1208. {
  1209. double ts = (double)(tstamp - trace->base_time) / NSEC_PER_MSEC;
  1210. return fprintf(fp, "%10.3f ", ts);
  1211. }
  1212. static bool done = false;
  1213. static bool interrupted = false;
  1214. static void sig_handler(int sig)
  1215. {
  1216. done = true;
  1217. interrupted = sig == SIGINT;
  1218. }
  1219. static size_t trace__fprintf_entry_head(struct trace *trace, struct thread *thread,
  1220. u64 duration, u64 tstamp, FILE *fp)
  1221. {
  1222. size_t printed = trace__fprintf_tstamp(trace, tstamp, fp);
  1223. printed += fprintf_duration(duration, fp);
  1224. if (trace->multiple_threads) {
  1225. if (trace->show_comm)
  1226. printed += fprintf(fp, "%.14s/", thread__comm_str(thread));
  1227. printed += fprintf(fp, "%d ", thread->tid);
  1228. }
  1229. return printed;
  1230. }
  1231. static int trace__process_event(struct trace *trace, struct machine *machine,
  1232. union perf_event *event, struct perf_sample *sample)
  1233. {
  1234. int ret = 0;
  1235. switch (event->header.type) {
  1236. case PERF_RECORD_LOST:
  1237. color_fprintf(trace->output, PERF_COLOR_RED,
  1238. "LOST %" PRIu64 " events!\n", event->lost.lost);
  1239. ret = machine__process_lost_event(machine, event, sample);
  1240. default:
  1241. ret = machine__process_event(machine, event, sample);
  1242. break;
  1243. }
  1244. return ret;
  1245. }
  1246. static int trace__tool_process(struct perf_tool *tool,
  1247. union perf_event *event,
  1248. struct perf_sample *sample,
  1249. struct machine *machine)
  1250. {
  1251. struct trace *trace = container_of(tool, struct trace, tool);
  1252. return trace__process_event(trace, machine, event, sample);
  1253. }
  1254. static int trace__symbols_init(struct trace *trace, struct perf_evlist *evlist)
  1255. {
  1256. int err = symbol__init(NULL);
  1257. if (err)
  1258. return err;
  1259. trace->host = machine__new_host();
  1260. if (trace->host == NULL)
  1261. return -ENOMEM;
  1262. err = __machine__synthesize_threads(trace->host, &trace->tool, &trace->opts.target,
  1263. evlist->threads, trace__tool_process, false,
  1264. trace->opts.proc_map_timeout);
  1265. if (err)
  1266. symbol__exit();
  1267. return err;
  1268. }
  1269. static int syscall__set_arg_fmts(struct syscall *sc)
  1270. {
  1271. struct format_field *field;
  1272. int idx = 0;
  1273. sc->arg_scnprintf = calloc(sc->nr_args, sizeof(void *));
  1274. if (sc->arg_scnprintf == NULL)
  1275. return -1;
  1276. if (sc->fmt)
  1277. sc->arg_parm = sc->fmt->arg_parm;
  1278. for (field = sc->args; field; field = field->next) {
  1279. if (sc->fmt && sc->fmt->arg_scnprintf[idx])
  1280. sc->arg_scnprintf[idx] = sc->fmt->arg_scnprintf[idx];
  1281. else if (field->flags & FIELD_IS_POINTER)
  1282. sc->arg_scnprintf[idx] = syscall_arg__scnprintf_hex;
  1283. ++idx;
  1284. }
  1285. return 0;
  1286. }
  1287. static int trace__read_syscall_info(struct trace *trace, int id)
  1288. {
  1289. char tp_name[128];
  1290. struct syscall *sc;
  1291. const char *name = audit_syscall_to_name(id, trace->audit.machine);
  1292. if (name == NULL)
  1293. return -1;
  1294. if (id > trace->syscalls.max) {
  1295. struct syscall *nsyscalls = realloc(trace->syscalls.table, (id + 1) * sizeof(*sc));
  1296. if (nsyscalls == NULL)
  1297. return -1;
  1298. if (trace->syscalls.max != -1) {
  1299. memset(nsyscalls + trace->syscalls.max + 1, 0,
  1300. (id - trace->syscalls.max) * sizeof(*sc));
  1301. } else {
  1302. memset(nsyscalls, 0, (id + 1) * sizeof(*sc));
  1303. }
  1304. trace->syscalls.table = nsyscalls;
  1305. trace->syscalls.max = id;
  1306. }
  1307. sc = trace->syscalls.table + id;
  1308. sc->name = name;
  1309. sc->fmt = syscall_fmt__find(sc->name);
  1310. snprintf(tp_name, sizeof(tp_name), "sys_enter_%s", sc->name);
  1311. sc->tp_format = trace_event__tp_format("syscalls", tp_name);
  1312. if (sc->tp_format == NULL && sc->fmt && sc->fmt->alias) {
  1313. snprintf(tp_name, sizeof(tp_name), "sys_enter_%s", sc->fmt->alias);
  1314. sc->tp_format = trace_event__tp_format("syscalls", tp_name);
  1315. }
  1316. if (sc->tp_format == NULL)
  1317. return -1;
  1318. sc->args = sc->tp_format->format.fields;
  1319. sc->nr_args = sc->tp_format->format.nr_fields;
  1320. /* drop nr field - not relevant here; does not exist on older kernels */
  1321. if (sc->args && strcmp(sc->args->name, "nr") == 0) {
  1322. sc->args = sc->args->next;
  1323. --sc->nr_args;
  1324. }
  1325. sc->is_exit = !strcmp(name, "exit_group") || !strcmp(name, "exit");
  1326. return syscall__set_arg_fmts(sc);
  1327. }
  1328. static int trace__validate_ev_qualifier(struct trace *trace)
  1329. {
  1330. int err = 0, i;
  1331. struct str_node *pos;
  1332. trace->ev_qualifier_ids.nr = strlist__nr_entries(trace->ev_qualifier);
  1333. trace->ev_qualifier_ids.entries = malloc(trace->ev_qualifier_ids.nr *
  1334. sizeof(trace->ev_qualifier_ids.entries[0]));
  1335. if (trace->ev_qualifier_ids.entries == NULL) {
  1336. fputs("Error:\tNot enough memory for allocating events qualifier ids\n",
  1337. trace->output);
  1338. err = -EINVAL;
  1339. goto out;
  1340. }
  1341. i = 0;
  1342. strlist__for_each(pos, trace->ev_qualifier) {
  1343. const char *sc = pos->s;
  1344. int id = audit_name_to_syscall(sc, trace->audit.machine);
  1345. if (id < 0) {
  1346. if (err == 0) {
  1347. fputs("Error:\tInvalid syscall ", trace->output);
  1348. err = -EINVAL;
  1349. } else {
  1350. fputs(", ", trace->output);
  1351. }
  1352. fputs(sc, trace->output);
  1353. }
  1354. trace->ev_qualifier_ids.entries[i++] = id;
  1355. }
  1356. if (err < 0) {
  1357. fputs("\nHint:\ttry 'perf list syscalls:sys_enter_*'"
  1358. "\nHint:\tand: 'man syscalls'\n", trace->output);
  1359. zfree(&trace->ev_qualifier_ids.entries);
  1360. trace->ev_qualifier_ids.nr = 0;
  1361. }
  1362. out:
  1363. return err;
  1364. }
  1365. /*
  1366. * args is to be interpreted as a series of longs but we need to handle
  1367. * 8-byte unaligned accesses. args points to raw_data within the event
  1368. * and raw_data is guaranteed to be 8-byte unaligned because it is
  1369. * preceded by raw_size which is a u32. So we need to copy args to a temp
  1370. * variable to read it. Most notably this avoids extended load instructions
  1371. * on unaligned addresses
  1372. */
  1373. static size_t syscall__scnprintf_args(struct syscall *sc, char *bf, size_t size,
  1374. unsigned char *args, struct trace *trace,
  1375. struct thread *thread)
  1376. {
  1377. size_t printed = 0;
  1378. unsigned char *p;
  1379. unsigned long val;
  1380. if (sc->args != NULL) {
  1381. struct format_field *field;
  1382. u8 bit = 1;
  1383. struct syscall_arg arg = {
  1384. .idx = 0,
  1385. .mask = 0,
  1386. .trace = trace,
  1387. .thread = thread,
  1388. };
  1389. for (field = sc->args; field;
  1390. field = field->next, ++arg.idx, bit <<= 1) {
  1391. if (arg.mask & bit)
  1392. continue;
  1393. /* special care for unaligned accesses */
  1394. p = args + sizeof(unsigned long) * arg.idx;
  1395. memcpy(&val, p, sizeof(val));
  1396. /*
  1397. * Suppress this argument if its value is zero and
  1398. * and we don't have a string associated in an
  1399. * strarray for it.
  1400. */
  1401. if (val == 0 &&
  1402. !(sc->arg_scnprintf &&
  1403. sc->arg_scnprintf[arg.idx] == SCA_STRARRAY &&
  1404. sc->arg_parm[arg.idx]))
  1405. continue;
  1406. printed += scnprintf(bf + printed, size - printed,
  1407. "%s%s: ", printed ? ", " : "", field->name);
  1408. if (sc->arg_scnprintf && sc->arg_scnprintf[arg.idx]) {
  1409. arg.val = val;
  1410. if (sc->arg_parm)
  1411. arg.parm = sc->arg_parm[arg.idx];
  1412. printed += sc->arg_scnprintf[arg.idx](bf + printed,
  1413. size - printed, &arg);
  1414. } else {
  1415. printed += scnprintf(bf + printed, size - printed,
  1416. "%ld", val);
  1417. }
  1418. }
  1419. } else {
  1420. int i = 0;
  1421. while (i < 6) {
  1422. /* special care for unaligned accesses */
  1423. p = args + sizeof(unsigned long) * i;
  1424. memcpy(&val, p, sizeof(val));
  1425. printed += scnprintf(bf + printed, size - printed,
  1426. "%sarg%d: %ld",
  1427. printed ? ", " : "", i, val);
  1428. ++i;
  1429. }
  1430. }
  1431. return printed;
  1432. }
  1433. typedef int (*tracepoint_handler)(struct trace *trace, struct perf_evsel *evsel,
  1434. union perf_event *event,
  1435. struct perf_sample *sample);
  1436. static struct syscall *trace__syscall_info(struct trace *trace,
  1437. struct perf_evsel *evsel, int id)
  1438. {
  1439. if (id < 0) {
  1440. /*
  1441. * XXX: Noticed on x86_64, reproduced as far back as 3.0.36, haven't tried
  1442. * before that, leaving at a higher verbosity level till that is
  1443. * explained. Reproduced with plain ftrace with:
  1444. *
  1445. * echo 1 > /t/events/raw_syscalls/sys_exit/enable
  1446. * grep "NR -1 " /t/trace_pipe
  1447. *
  1448. * After generating some load on the machine.
  1449. */
  1450. if (verbose > 1) {
  1451. static u64 n;
  1452. fprintf(trace->output, "Invalid syscall %d id, skipping (%s, %" PRIu64 ") ...\n",
  1453. id, perf_evsel__name(evsel), ++n);
  1454. }
  1455. return NULL;
  1456. }
  1457. if ((id > trace->syscalls.max || trace->syscalls.table[id].name == NULL) &&
  1458. trace__read_syscall_info(trace, id))
  1459. goto out_cant_read;
  1460. if ((id > trace->syscalls.max || trace->syscalls.table[id].name == NULL))
  1461. goto out_cant_read;
  1462. return &trace->syscalls.table[id];
  1463. out_cant_read:
  1464. if (verbose) {
  1465. fprintf(trace->output, "Problems reading syscall %d", id);
  1466. if (id <= trace->syscalls.max && trace->syscalls.table[id].name != NULL)
  1467. fprintf(trace->output, "(%s)", trace->syscalls.table[id].name);
  1468. fputs(" information\n", trace->output);
  1469. }
  1470. return NULL;
  1471. }
  1472. static void thread__update_stats(struct thread_trace *ttrace,
  1473. int id, struct perf_sample *sample)
  1474. {
  1475. struct int_node *inode;
  1476. struct stats *stats;
  1477. u64 duration = 0;
  1478. inode = intlist__findnew(ttrace->syscall_stats, id);
  1479. if (inode == NULL)
  1480. return;
  1481. stats = inode->priv;
  1482. if (stats == NULL) {
  1483. stats = malloc(sizeof(struct stats));
  1484. if (stats == NULL)
  1485. return;
  1486. init_stats(stats);
  1487. inode->priv = stats;
  1488. }
  1489. if (ttrace->entry_time && sample->time > ttrace->entry_time)
  1490. duration = sample->time - ttrace->entry_time;
  1491. update_stats(stats, duration);
  1492. }
  1493. static int trace__printf_interrupted_entry(struct trace *trace, struct perf_sample *sample)
  1494. {
  1495. struct thread_trace *ttrace;
  1496. u64 duration;
  1497. size_t printed;
  1498. if (trace->current == NULL)
  1499. return 0;
  1500. ttrace = thread__priv(trace->current);
  1501. if (!ttrace->entry_pending)
  1502. return 0;
  1503. duration = sample->time - ttrace->entry_time;
  1504. printed = trace__fprintf_entry_head(trace, trace->current, duration, sample->time, trace->output);
  1505. printed += fprintf(trace->output, "%-70s) ...\n", ttrace->entry_str);
  1506. ttrace->entry_pending = false;
  1507. return printed;
  1508. }
  1509. static int trace__sys_enter(struct trace *trace, struct perf_evsel *evsel,
  1510. union perf_event *event __maybe_unused,
  1511. struct perf_sample *sample)
  1512. {
  1513. char *msg;
  1514. void *args;
  1515. size_t printed = 0;
  1516. struct thread *thread;
  1517. int id = perf_evsel__sc_tp_uint(evsel, id, sample), err = -1;
  1518. struct syscall *sc = trace__syscall_info(trace, evsel, id);
  1519. struct thread_trace *ttrace;
  1520. if (sc == NULL)
  1521. return -1;
  1522. thread = machine__findnew_thread(trace->host, sample->pid, sample->tid);
  1523. ttrace = thread__trace(thread, trace->output);
  1524. if (ttrace == NULL)
  1525. goto out_put;
  1526. args = perf_evsel__sc_tp_ptr(evsel, args, sample);
  1527. if (ttrace->entry_str == NULL) {
  1528. ttrace->entry_str = malloc(1024);
  1529. if (!ttrace->entry_str)
  1530. goto out_put;
  1531. }
  1532. if (!trace->summary_only)
  1533. trace__printf_interrupted_entry(trace, sample);
  1534. ttrace->entry_time = sample->time;
  1535. msg = ttrace->entry_str;
  1536. printed += scnprintf(msg + printed, 1024 - printed, "%s(", sc->name);
  1537. printed += syscall__scnprintf_args(sc, msg + printed, 1024 - printed,
  1538. args, trace, thread);
  1539. if (sc->is_exit) {
  1540. if (!trace->duration_filter && !trace->summary_only) {
  1541. trace__fprintf_entry_head(trace, thread, 1, sample->time, trace->output);
  1542. fprintf(trace->output, "%-70s\n", ttrace->entry_str);
  1543. }
  1544. } else
  1545. ttrace->entry_pending = true;
  1546. if (trace->current != thread) {
  1547. thread__put(trace->current);
  1548. trace->current = thread__get(thread);
  1549. }
  1550. err = 0;
  1551. out_put:
  1552. thread__put(thread);
  1553. return err;
  1554. }
  1555. static int trace__sys_exit(struct trace *trace, struct perf_evsel *evsel,
  1556. union perf_event *event __maybe_unused,
  1557. struct perf_sample *sample)
  1558. {
  1559. long ret;
  1560. u64 duration = 0;
  1561. struct thread *thread;
  1562. int id = perf_evsel__sc_tp_uint(evsel, id, sample), err = -1;
  1563. struct syscall *sc = trace__syscall_info(trace, evsel, id);
  1564. struct thread_trace *ttrace;
  1565. if (sc == NULL)
  1566. return -1;
  1567. thread = machine__findnew_thread(trace->host, sample->pid, sample->tid);
  1568. ttrace = thread__trace(thread, trace->output);
  1569. if (ttrace == NULL)
  1570. goto out_put;
  1571. if (trace->summary)
  1572. thread__update_stats(ttrace, id, sample);
  1573. ret = perf_evsel__sc_tp_uint(evsel, ret, sample);
  1574. if (id == trace->audit.open_id && ret >= 0 && trace->last_vfs_getname) {
  1575. trace__set_fd_pathname(thread, ret, trace->last_vfs_getname);
  1576. trace->last_vfs_getname = NULL;
  1577. ++trace->stats.vfs_getname;
  1578. }
  1579. ttrace->exit_time = sample->time;
  1580. if (ttrace->entry_time) {
  1581. duration = sample->time - ttrace->entry_time;
  1582. if (trace__filter_duration(trace, duration))
  1583. goto out;
  1584. } else if (trace->duration_filter)
  1585. goto out;
  1586. if (trace->summary_only)
  1587. goto out;
  1588. trace__fprintf_entry_head(trace, thread, duration, sample->time, trace->output);
  1589. if (ttrace->entry_pending) {
  1590. fprintf(trace->output, "%-70s", ttrace->entry_str);
  1591. } else {
  1592. fprintf(trace->output, " ... [");
  1593. color_fprintf(trace->output, PERF_COLOR_YELLOW, "continued");
  1594. fprintf(trace->output, "]: %s()", sc->name);
  1595. }
  1596. if (sc->fmt == NULL) {
  1597. signed_print:
  1598. fprintf(trace->output, ") = %ld", ret);
  1599. } else if (ret < 0 && sc->fmt->errmsg) {
  1600. char bf[STRERR_BUFSIZE];
  1601. const char *emsg = strerror_r(-ret, bf, sizeof(bf)),
  1602. *e = audit_errno_to_name(-ret);
  1603. fprintf(trace->output, ") = -1 %s %s", e, emsg);
  1604. } else if (ret == 0 && sc->fmt->timeout)
  1605. fprintf(trace->output, ") = 0 Timeout");
  1606. else if (sc->fmt->hexret)
  1607. fprintf(trace->output, ") = %#lx", ret);
  1608. else
  1609. goto signed_print;
  1610. fputc('\n', trace->output);
  1611. out:
  1612. ttrace->entry_pending = false;
  1613. err = 0;
  1614. out_put:
  1615. thread__put(thread);
  1616. return err;
  1617. }
  1618. static int trace__vfs_getname(struct trace *trace, struct perf_evsel *evsel,
  1619. union perf_event *event __maybe_unused,
  1620. struct perf_sample *sample)
  1621. {
  1622. trace->last_vfs_getname = perf_evsel__rawptr(evsel, sample, "pathname");
  1623. return 0;
  1624. }
  1625. static int trace__sched_stat_runtime(struct trace *trace, struct perf_evsel *evsel,
  1626. union perf_event *event __maybe_unused,
  1627. struct perf_sample *sample)
  1628. {
  1629. u64 runtime = perf_evsel__intval(evsel, sample, "runtime");
  1630. double runtime_ms = (double)runtime / NSEC_PER_MSEC;
  1631. struct thread *thread = machine__findnew_thread(trace->host,
  1632. sample->pid,
  1633. sample->tid);
  1634. struct thread_trace *ttrace = thread__trace(thread, trace->output);
  1635. if (ttrace == NULL)
  1636. goto out_dump;
  1637. ttrace->runtime_ms += runtime_ms;
  1638. trace->runtime_ms += runtime_ms;
  1639. thread__put(thread);
  1640. return 0;
  1641. out_dump:
  1642. fprintf(trace->output, "%s: comm=%s,pid=%u,runtime=%" PRIu64 ",vruntime=%" PRIu64 ")\n",
  1643. evsel->name,
  1644. perf_evsel__strval(evsel, sample, "comm"),
  1645. (pid_t)perf_evsel__intval(evsel, sample, "pid"),
  1646. runtime,
  1647. perf_evsel__intval(evsel, sample, "vruntime"));
  1648. thread__put(thread);
  1649. return 0;
  1650. }
  1651. static int trace__event_handler(struct trace *trace, struct perf_evsel *evsel,
  1652. union perf_event *event __maybe_unused,
  1653. struct perf_sample *sample)
  1654. {
  1655. trace__printf_interrupted_entry(trace, sample);
  1656. trace__fprintf_tstamp(trace, sample->time, trace->output);
  1657. if (trace->trace_syscalls)
  1658. fprintf(trace->output, "( ): ");
  1659. fprintf(trace->output, "%s:", evsel->name);
  1660. if (evsel->tp_format) {
  1661. event_format__fprintf(evsel->tp_format, sample->cpu,
  1662. sample->raw_data, sample->raw_size,
  1663. trace->output);
  1664. }
  1665. fprintf(trace->output, ")\n");
  1666. return 0;
  1667. }
  1668. static void print_location(FILE *f, struct perf_sample *sample,
  1669. struct addr_location *al,
  1670. bool print_dso, bool print_sym)
  1671. {
  1672. if ((verbose || print_dso) && al->map)
  1673. fprintf(f, "%s@", al->map->dso->long_name);
  1674. if ((verbose || print_sym) && al->sym)
  1675. fprintf(f, "%s+0x%" PRIx64, al->sym->name,
  1676. al->addr - al->sym->start);
  1677. else if (al->map)
  1678. fprintf(f, "0x%" PRIx64, al->addr);
  1679. else
  1680. fprintf(f, "0x%" PRIx64, sample->addr);
  1681. }
  1682. static int trace__pgfault(struct trace *trace,
  1683. struct perf_evsel *evsel,
  1684. union perf_event *event,
  1685. struct perf_sample *sample)
  1686. {
  1687. struct thread *thread;
  1688. u8 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
  1689. struct addr_location al;
  1690. char map_type = 'd';
  1691. struct thread_trace *ttrace;
  1692. int err = -1;
  1693. thread = machine__findnew_thread(trace->host, sample->pid, sample->tid);
  1694. ttrace = thread__trace(thread, trace->output);
  1695. if (ttrace == NULL)
  1696. goto out_put;
  1697. if (evsel->attr.config == PERF_COUNT_SW_PAGE_FAULTS_MAJ)
  1698. ttrace->pfmaj++;
  1699. else
  1700. ttrace->pfmin++;
  1701. if (trace->summary_only)
  1702. goto out;
  1703. thread__find_addr_location(thread, cpumode, MAP__FUNCTION,
  1704. sample->ip, &al);
  1705. trace__fprintf_entry_head(trace, thread, 0, sample->time, trace->output);
  1706. fprintf(trace->output, "%sfault [",
  1707. evsel->attr.config == PERF_COUNT_SW_PAGE_FAULTS_MAJ ?
  1708. "maj" : "min");
  1709. print_location(trace->output, sample, &al, false, true);
  1710. fprintf(trace->output, "] => ");
  1711. thread__find_addr_location(thread, cpumode, MAP__VARIABLE,
  1712. sample->addr, &al);
  1713. if (!al.map) {
  1714. thread__find_addr_location(thread, cpumode,
  1715. MAP__FUNCTION, sample->addr, &al);
  1716. if (al.map)
  1717. map_type = 'x';
  1718. else
  1719. map_type = '?';
  1720. }
  1721. print_location(trace->output, sample, &al, true, false);
  1722. fprintf(trace->output, " (%c%c)\n", map_type, al.level);
  1723. out:
  1724. err = 0;
  1725. out_put:
  1726. thread__put(thread);
  1727. return err;
  1728. }
  1729. static bool skip_sample(struct trace *trace, struct perf_sample *sample)
  1730. {
  1731. if ((trace->pid_list && intlist__find(trace->pid_list, sample->pid)) ||
  1732. (trace->tid_list && intlist__find(trace->tid_list, sample->tid)))
  1733. return false;
  1734. if (trace->pid_list || trace->tid_list)
  1735. return true;
  1736. return false;
  1737. }
  1738. static int trace__process_sample(struct perf_tool *tool,
  1739. union perf_event *event,
  1740. struct perf_sample *sample,
  1741. struct perf_evsel *evsel,
  1742. struct machine *machine __maybe_unused)
  1743. {
  1744. struct trace *trace = container_of(tool, struct trace, tool);
  1745. int err = 0;
  1746. tracepoint_handler handler = evsel->handler;
  1747. if (skip_sample(trace, sample))
  1748. return 0;
  1749. if (!trace->full_time && trace->base_time == 0)
  1750. trace->base_time = sample->time;
  1751. if (handler) {
  1752. ++trace->nr_events;
  1753. handler(trace, evsel, event, sample);
  1754. }
  1755. return err;
  1756. }
  1757. static int parse_target_str(struct trace *trace)
  1758. {
  1759. if (trace->opts.target.pid) {
  1760. trace->pid_list = intlist__new(trace->opts.target.pid);
  1761. if (trace->pid_list == NULL) {
  1762. pr_err("Error parsing process id string\n");
  1763. return -EINVAL;
  1764. }
  1765. }
  1766. if (trace->opts.target.tid) {
  1767. trace->tid_list = intlist__new(trace->opts.target.tid);
  1768. if (trace->tid_list == NULL) {
  1769. pr_err("Error parsing thread id string\n");
  1770. return -EINVAL;
  1771. }
  1772. }
  1773. return 0;
  1774. }
  1775. static int trace__record(struct trace *trace, int argc, const char **argv)
  1776. {
  1777. unsigned int rec_argc, i, j;
  1778. const char **rec_argv;
  1779. const char * const record_args[] = {
  1780. "record",
  1781. "-R",
  1782. "-m", "1024",
  1783. "-c", "1",
  1784. };
  1785. const char * const sc_args[] = { "-e", };
  1786. unsigned int sc_args_nr = ARRAY_SIZE(sc_args);
  1787. const char * const majpf_args[] = { "-e", "major-faults" };
  1788. unsigned int majpf_args_nr = ARRAY_SIZE(majpf_args);
  1789. const char * const minpf_args[] = { "-e", "minor-faults" };
  1790. unsigned int minpf_args_nr = ARRAY_SIZE(minpf_args);
  1791. /* +1 is for the event string below */
  1792. rec_argc = ARRAY_SIZE(record_args) + sc_args_nr + 1 +
  1793. majpf_args_nr + minpf_args_nr + argc;
  1794. rec_argv = calloc(rec_argc + 1, sizeof(char *));
  1795. if (rec_argv == NULL)
  1796. return -ENOMEM;
  1797. j = 0;
  1798. for (i = 0; i < ARRAY_SIZE(record_args); i++)
  1799. rec_argv[j++] = record_args[i];
  1800. if (trace->trace_syscalls) {
  1801. for (i = 0; i < sc_args_nr; i++)
  1802. rec_argv[j++] = sc_args[i];
  1803. /* event string may be different for older kernels - e.g., RHEL6 */
  1804. if (is_valid_tracepoint("raw_syscalls:sys_enter"))
  1805. rec_argv[j++] = "raw_syscalls:sys_enter,raw_syscalls:sys_exit";
  1806. else if (is_valid_tracepoint("syscalls:sys_enter"))
  1807. rec_argv[j++] = "syscalls:sys_enter,syscalls:sys_exit";
  1808. else {
  1809. pr_err("Neither raw_syscalls nor syscalls events exist.\n");
  1810. return -1;
  1811. }
  1812. }
  1813. if (trace->trace_pgfaults & TRACE_PFMAJ)
  1814. for (i = 0; i < majpf_args_nr; i++)
  1815. rec_argv[j++] = majpf_args[i];
  1816. if (trace->trace_pgfaults & TRACE_PFMIN)
  1817. for (i = 0; i < minpf_args_nr; i++)
  1818. rec_argv[j++] = minpf_args[i];
  1819. for (i = 0; i < (unsigned int)argc; i++)
  1820. rec_argv[j++] = argv[i];
  1821. return cmd_record(j, rec_argv, NULL);
  1822. }
  1823. static size_t trace__fprintf_thread_summary(struct trace *trace, FILE *fp);
  1824. static void perf_evlist__add_vfs_getname(struct perf_evlist *evlist)
  1825. {
  1826. struct perf_evsel *evsel = perf_evsel__newtp("probe", "vfs_getname");
  1827. if (evsel == NULL)
  1828. return;
  1829. if (perf_evsel__field(evsel, "pathname") == NULL) {
  1830. perf_evsel__delete(evsel);
  1831. return;
  1832. }
  1833. evsel->handler = trace__vfs_getname;
  1834. perf_evlist__add(evlist, evsel);
  1835. }
  1836. static int perf_evlist__add_pgfault(struct perf_evlist *evlist,
  1837. u64 config)
  1838. {
  1839. struct perf_evsel *evsel;
  1840. struct perf_event_attr attr = {
  1841. .type = PERF_TYPE_SOFTWARE,
  1842. .mmap_data = 1,
  1843. };
  1844. attr.config = config;
  1845. attr.sample_period = 1;
  1846. event_attr_init(&attr);
  1847. evsel = perf_evsel__new(&attr);
  1848. if (!evsel)
  1849. return -ENOMEM;
  1850. evsel->handler = trace__pgfault;
  1851. perf_evlist__add(evlist, evsel);
  1852. return 0;
  1853. }
  1854. static void trace__handle_event(struct trace *trace, union perf_event *event, struct perf_sample *sample)
  1855. {
  1856. const u32 type = event->header.type;
  1857. struct perf_evsel *evsel;
  1858. if (!trace->full_time && trace->base_time == 0)
  1859. trace->base_time = sample->time;
  1860. if (type != PERF_RECORD_SAMPLE) {
  1861. trace__process_event(trace, trace->host, event, sample);
  1862. return;
  1863. }
  1864. evsel = perf_evlist__id2evsel(trace->evlist, sample->id);
  1865. if (evsel == NULL) {
  1866. fprintf(trace->output, "Unknown tp ID %" PRIu64 ", skipping...\n", sample->id);
  1867. return;
  1868. }
  1869. if (evsel->attr.type == PERF_TYPE_TRACEPOINT &&
  1870. sample->raw_data == NULL) {
  1871. fprintf(trace->output, "%s sample with no payload for tid: %d, cpu %d, raw_size=%d, skipping...\n",
  1872. perf_evsel__name(evsel), sample->tid,
  1873. sample->cpu, sample->raw_size);
  1874. } else {
  1875. tracepoint_handler handler = evsel->handler;
  1876. handler(trace, evsel, event, sample);
  1877. }
  1878. }
  1879. static int trace__add_syscall_newtp(struct trace *trace)
  1880. {
  1881. int ret = -1;
  1882. struct perf_evlist *evlist = trace->evlist;
  1883. struct perf_evsel *sys_enter, *sys_exit;
  1884. sys_enter = perf_evsel__syscall_newtp("sys_enter", trace__sys_enter);
  1885. if (sys_enter == NULL)
  1886. goto out;
  1887. if (perf_evsel__init_sc_tp_ptr_field(sys_enter, args))
  1888. goto out_delete_sys_enter;
  1889. sys_exit = perf_evsel__syscall_newtp("sys_exit", trace__sys_exit);
  1890. if (sys_exit == NULL)
  1891. goto out_delete_sys_enter;
  1892. if (perf_evsel__init_sc_tp_uint_field(sys_exit, ret))
  1893. goto out_delete_sys_exit;
  1894. perf_evlist__add(evlist, sys_enter);
  1895. perf_evlist__add(evlist, sys_exit);
  1896. trace->syscalls.events.sys_enter = sys_enter;
  1897. trace->syscalls.events.sys_exit = sys_exit;
  1898. ret = 0;
  1899. out:
  1900. return ret;
  1901. out_delete_sys_exit:
  1902. perf_evsel__delete_priv(sys_exit);
  1903. out_delete_sys_enter:
  1904. perf_evsel__delete_priv(sys_enter);
  1905. goto out;
  1906. }
  1907. static int trace__set_ev_qualifier_filter(struct trace *trace)
  1908. {
  1909. int err = -1;
  1910. char *filter = asprintf_expr_inout_ints("id", !trace->not_ev_qualifier,
  1911. trace->ev_qualifier_ids.nr,
  1912. trace->ev_qualifier_ids.entries);
  1913. if (filter == NULL)
  1914. goto out_enomem;
  1915. if (!perf_evsel__append_filter(trace->syscalls.events.sys_enter, "&&", filter))
  1916. err = perf_evsel__append_filter(trace->syscalls.events.sys_exit, "&&", filter);
  1917. free(filter);
  1918. out:
  1919. return err;
  1920. out_enomem:
  1921. errno = ENOMEM;
  1922. goto out;
  1923. }
  1924. static int trace__run(struct trace *trace, int argc, const char **argv)
  1925. {
  1926. struct perf_evlist *evlist = trace->evlist;
  1927. struct perf_evsel *evsel;
  1928. int err = -1, i;
  1929. unsigned long before;
  1930. const bool forks = argc > 0;
  1931. bool draining = false;
  1932. trace->live = true;
  1933. if (trace->trace_syscalls && trace__add_syscall_newtp(trace))
  1934. goto out_error_raw_syscalls;
  1935. if (trace->trace_syscalls)
  1936. perf_evlist__add_vfs_getname(evlist);
  1937. if ((trace->trace_pgfaults & TRACE_PFMAJ) &&
  1938. perf_evlist__add_pgfault(evlist, PERF_COUNT_SW_PAGE_FAULTS_MAJ)) {
  1939. goto out_error_mem;
  1940. }
  1941. if ((trace->trace_pgfaults & TRACE_PFMIN) &&
  1942. perf_evlist__add_pgfault(evlist, PERF_COUNT_SW_PAGE_FAULTS_MIN))
  1943. goto out_error_mem;
  1944. if (trace->sched &&
  1945. perf_evlist__add_newtp(evlist, "sched", "sched_stat_runtime",
  1946. trace__sched_stat_runtime))
  1947. goto out_error_sched_stat_runtime;
  1948. err = perf_evlist__create_maps(evlist, &trace->opts.target);
  1949. if (err < 0) {
  1950. fprintf(trace->output, "Problems parsing the target to trace, check your options!\n");
  1951. goto out_delete_evlist;
  1952. }
  1953. err = trace__symbols_init(trace, evlist);
  1954. if (err < 0) {
  1955. fprintf(trace->output, "Problems initializing symbol libraries!\n");
  1956. goto out_delete_evlist;
  1957. }
  1958. perf_evlist__config(evlist, &trace->opts);
  1959. signal(SIGCHLD, sig_handler);
  1960. signal(SIGINT, sig_handler);
  1961. if (forks) {
  1962. err = perf_evlist__prepare_workload(evlist, &trace->opts.target,
  1963. argv, false, NULL);
  1964. if (err < 0) {
  1965. fprintf(trace->output, "Couldn't run the workload!\n");
  1966. goto out_delete_evlist;
  1967. }
  1968. }
  1969. err = perf_evlist__open(evlist);
  1970. if (err < 0)
  1971. goto out_error_open;
  1972. /*
  1973. * Better not use !target__has_task() here because we need to cover the
  1974. * case where no threads were specified in the command line, but a
  1975. * workload was, and in that case we will fill in the thread_map when
  1976. * we fork the workload in perf_evlist__prepare_workload.
  1977. */
  1978. if (trace->filter_pids.nr > 0)
  1979. err = perf_evlist__set_filter_pids(evlist, trace->filter_pids.nr, trace->filter_pids.entries);
  1980. else if (thread_map__pid(evlist->threads, 0) == -1)
  1981. err = perf_evlist__set_filter_pid(evlist, getpid());
  1982. if (err < 0)
  1983. goto out_error_mem;
  1984. if (trace->ev_qualifier_ids.nr > 0) {
  1985. err = trace__set_ev_qualifier_filter(trace);
  1986. if (err < 0)
  1987. goto out_errno;
  1988. }
  1989. pr_debug("%s\n", trace->syscalls.events.sys_exit->filter);
  1990. err = perf_evlist__apply_filters(evlist, &evsel);
  1991. if (err < 0)
  1992. goto out_error_apply_filters;
  1993. err = perf_evlist__mmap(evlist, trace->opts.mmap_pages, false);
  1994. if (err < 0)
  1995. goto out_error_mmap;
  1996. if (!target__none(&trace->opts.target))
  1997. perf_evlist__enable(evlist);
  1998. if (forks)
  1999. perf_evlist__start_workload(evlist);
  2000. trace->multiple_threads = thread_map__pid(evlist->threads, 0) == -1 ||
  2001. evlist->threads->nr > 1 ||
  2002. perf_evlist__first(evlist)->attr.inherit;
  2003. again:
  2004. before = trace->nr_events;
  2005. for (i = 0; i < evlist->nr_mmaps; i++) {
  2006. union perf_event *event;
  2007. while ((event = perf_evlist__mmap_read(evlist, i)) != NULL) {
  2008. struct perf_sample sample;
  2009. ++trace->nr_events;
  2010. err = perf_evlist__parse_sample(evlist, event, &sample);
  2011. if (err) {
  2012. fprintf(trace->output, "Can't parse sample, err = %d, skipping...\n", err);
  2013. goto next_event;
  2014. }
  2015. trace__handle_event(trace, event, &sample);
  2016. next_event:
  2017. perf_evlist__mmap_consume(evlist, i);
  2018. if (interrupted)
  2019. goto out_disable;
  2020. if (done && !draining) {
  2021. perf_evlist__disable(evlist);
  2022. draining = true;
  2023. }
  2024. }
  2025. }
  2026. if (trace->nr_events == before) {
  2027. int timeout = done ? 100 : -1;
  2028. if (!draining && perf_evlist__poll(evlist, timeout) > 0) {
  2029. if (perf_evlist__filter_pollfd(evlist, POLLERR | POLLHUP) == 0)
  2030. draining = true;
  2031. goto again;
  2032. }
  2033. } else {
  2034. goto again;
  2035. }
  2036. out_disable:
  2037. thread__zput(trace->current);
  2038. perf_evlist__disable(evlist);
  2039. if (!err) {
  2040. if (trace->summary)
  2041. trace__fprintf_thread_summary(trace, trace->output);
  2042. if (trace->show_tool_stats) {
  2043. fprintf(trace->output, "Stats:\n "
  2044. " vfs_getname : %" PRIu64 "\n"
  2045. " proc_getname: %" PRIu64 "\n",
  2046. trace->stats.vfs_getname,
  2047. trace->stats.proc_getname);
  2048. }
  2049. }
  2050. out_delete_evlist:
  2051. perf_evlist__delete(evlist);
  2052. trace->evlist = NULL;
  2053. trace->live = false;
  2054. return err;
  2055. {
  2056. char errbuf[BUFSIZ];
  2057. out_error_sched_stat_runtime:
  2058. debugfs__strerror_open_tp(errno, errbuf, sizeof(errbuf), "sched", "sched_stat_runtime");
  2059. goto out_error;
  2060. out_error_raw_syscalls:
  2061. debugfs__strerror_open_tp(errno, errbuf, sizeof(errbuf), "raw_syscalls", "sys_(enter|exit)");
  2062. goto out_error;
  2063. out_error_mmap:
  2064. perf_evlist__strerror_mmap(evlist, errno, errbuf, sizeof(errbuf));
  2065. goto out_error;
  2066. out_error_open:
  2067. perf_evlist__strerror_open(evlist, errno, errbuf, sizeof(errbuf));
  2068. out_error:
  2069. fprintf(trace->output, "%s\n", errbuf);
  2070. goto out_delete_evlist;
  2071. out_error_apply_filters:
  2072. fprintf(trace->output,
  2073. "Failed to set filter \"%s\" on event %s with %d (%s)\n",
  2074. evsel->filter, perf_evsel__name(evsel), errno,
  2075. strerror_r(errno, errbuf, sizeof(errbuf)));
  2076. goto out_delete_evlist;
  2077. }
  2078. out_error_mem:
  2079. fprintf(trace->output, "Not enough memory to run!\n");
  2080. goto out_delete_evlist;
  2081. out_errno:
  2082. fprintf(trace->output, "errno=%d,%s\n", errno, strerror(errno));
  2083. goto out_delete_evlist;
  2084. }
  2085. static int trace__replay(struct trace *trace)
  2086. {
  2087. const struct perf_evsel_str_handler handlers[] = {
  2088. { "probe:vfs_getname", trace__vfs_getname, },
  2089. };
  2090. struct perf_data_file file = {
  2091. .path = input_name,
  2092. .mode = PERF_DATA_MODE_READ,
  2093. .force = trace->force,
  2094. };
  2095. struct perf_session *session;
  2096. struct perf_evsel *evsel;
  2097. int err = -1;
  2098. trace->tool.sample = trace__process_sample;
  2099. trace->tool.mmap = perf_event__process_mmap;
  2100. trace->tool.mmap2 = perf_event__process_mmap2;
  2101. trace->tool.comm = perf_event__process_comm;
  2102. trace->tool.exit = perf_event__process_exit;
  2103. trace->tool.fork = perf_event__process_fork;
  2104. trace->tool.attr = perf_event__process_attr;
  2105. trace->tool.tracing_data = perf_event__process_tracing_data;
  2106. trace->tool.build_id = perf_event__process_build_id;
  2107. trace->tool.ordered_events = true;
  2108. trace->tool.ordering_requires_timestamps = true;
  2109. /* add tid to output */
  2110. trace->multiple_threads = true;
  2111. session = perf_session__new(&file, false, &trace->tool);
  2112. if (session == NULL)
  2113. return -1;
  2114. if (symbol__init(&session->header.env) < 0)
  2115. goto out;
  2116. trace->host = &session->machines.host;
  2117. err = perf_session__set_tracepoints_handlers(session, handlers);
  2118. if (err)
  2119. goto out;
  2120. evsel = perf_evlist__find_tracepoint_by_name(session->evlist,
  2121. "raw_syscalls:sys_enter");
  2122. /* older kernels have syscalls tp versus raw_syscalls */
  2123. if (evsel == NULL)
  2124. evsel = perf_evlist__find_tracepoint_by_name(session->evlist,
  2125. "syscalls:sys_enter");
  2126. if (evsel &&
  2127. (perf_evsel__init_syscall_tp(evsel, trace__sys_enter) < 0 ||
  2128. perf_evsel__init_sc_tp_ptr_field(evsel, args))) {
  2129. pr_err("Error during initialize raw_syscalls:sys_enter event\n");
  2130. goto out;
  2131. }
  2132. evsel = perf_evlist__find_tracepoint_by_name(session->evlist,
  2133. "raw_syscalls:sys_exit");
  2134. if (evsel == NULL)
  2135. evsel = perf_evlist__find_tracepoint_by_name(session->evlist,
  2136. "syscalls:sys_exit");
  2137. if (evsel &&
  2138. (perf_evsel__init_syscall_tp(evsel, trace__sys_exit) < 0 ||
  2139. perf_evsel__init_sc_tp_uint_field(evsel, ret))) {
  2140. pr_err("Error during initialize raw_syscalls:sys_exit event\n");
  2141. goto out;
  2142. }
  2143. evlist__for_each(session->evlist, evsel) {
  2144. if (evsel->attr.type == PERF_TYPE_SOFTWARE &&
  2145. (evsel->attr.config == PERF_COUNT_SW_PAGE_FAULTS_MAJ ||
  2146. evsel->attr.config == PERF_COUNT_SW_PAGE_FAULTS_MIN ||
  2147. evsel->attr.config == PERF_COUNT_SW_PAGE_FAULTS))
  2148. evsel->handler = trace__pgfault;
  2149. }
  2150. err = parse_target_str(trace);
  2151. if (err != 0)
  2152. goto out;
  2153. setup_pager();
  2154. err = perf_session__process_events(session);
  2155. if (err)
  2156. pr_err("Failed to process events, error %d", err);
  2157. else if (trace->summary)
  2158. trace__fprintf_thread_summary(trace, trace->output);
  2159. out:
  2160. perf_session__delete(session);
  2161. return err;
  2162. }
  2163. static size_t trace__fprintf_threads_header(FILE *fp)
  2164. {
  2165. size_t printed;
  2166. printed = fprintf(fp, "\n Summary of events:\n\n");
  2167. return printed;
  2168. }
  2169. static size_t thread__dump_stats(struct thread_trace *ttrace,
  2170. struct trace *trace, FILE *fp)
  2171. {
  2172. struct stats *stats;
  2173. size_t printed = 0;
  2174. struct syscall *sc;
  2175. struct int_node *inode = intlist__first(ttrace->syscall_stats);
  2176. if (inode == NULL)
  2177. return 0;
  2178. printed += fprintf(fp, "\n");
  2179. printed += fprintf(fp, " syscall calls min avg max stddev\n");
  2180. printed += fprintf(fp, " (msec) (msec) (msec) (%%)\n");
  2181. printed += fprintf(fp, " --------------- -------- --------- --------- --------- ------\n");
  2182. /* each int_node is a syscall */
  2183. while (inode) {
  2184. stats = inode->priv;
  2185. if (stats) {
  2186. double min = (double)(stats->min) / NSEC_PER_MSEC;
  2187. double max = (double)(stats->max) / NSEC_PER_MSEC;
  2188. double avg = avg_stats(stats);
  2189. double pct;
  2190. u64 n = (u64) stats->n;
  2191. pct = avg ? 100.0 * stddev_stats(stats)/avg : 0.0;
  2192. avg /= NSEC_PER_MSEC;
  2193. sc = &trace->syscalls.table[inode->i];
  2194. printed += fprintf(fp, " %-15s", sc->name);
  2195. printed += fprintf(fp, " %8" PRIu64 " %9.3f %9.3f",
  2196. n, min, avg);
  2197. printed += fprintf(fp, " %9.3f %9.2f%%\n", max, pct);
  2198. }
  2199. inode = intlist__next(inode);
  2200. }
  2201. printed += fprintf(fp, "\n\n");
  2202. return printed;
  2203. }
  2204. /* struct used to pass data to per-thread function */
  2205. struct summary_data {
  2206. FILE *fp;
  2207. struct trace *trace;
  2208. size_t printed;
  2209. };
  2210. static int trace__fprintf_one_thread(struct thread *thread, void *priv)
  2211. {
  2212. struct summary_data *data = priv;
  2213. FILE *fp = data->fp;
  2214. size_t printed = data->printed;
  2215. struct trace *trace = data->trace;
  2216. struct thread_trace *ttrace = thread__priv(thread);
  2217. double ratio;
  2218. if (ttrace == NULL)
  2219. return 0;
  2220. ratio = (double)ttrace->nr_events / trace->nr_events * 100.0;
  2221. printed += fprintf(fp, " %s (%d), ", thread__comm_str(thread), thread->tid);
  2222. printed += fprintf(fp, "%lu events, ", ttrace->nr_events);
  2223. printed += fprintf(fp, "%.1f%%", ratio);
  2224. if (ttrace->pfmaj)
  2225. printed += fprintf(fp, ", %lu majfaults", ttrace->pfmaj);
  2226. if (ttrace->pfmin)
  2227. printed += fprintf(fp, ", %lu minfaults", ttrace->pfmin);
  2228. printed += fprintf(fp, ", %.3f msec\n", ttrace->runtime_ms);
  2229. printed += thread__dump_stats(ttrace, trace, fp);
  2230. data->printed += printed;
  2231. return 0;
  2232. }
  2233. static size_t trace__fprintf_thread_summary(struct trace *trace, FILE *fp)
  2234. {
  2235. struct summary_data data = {
  2236. .fp = fp,
  2237. .trace = trace
  2238. };
  2239. data.printed = trace__fprintf_threads_header(fp);
  2240. machine__for_each_thread(trace->host, trace__fprintf_one_thread, &data);
  2241. return data.printed;
  2242. }
  2243. static int trace__set_duration(const struct option *opt, const char *str,
  2244. int unset __maybe_unused)
  2245. {
  2246. struct trace *trace = opt->value;
  2247. trace->duration_filter = atof(str);
  2248. return 0;
  2249. }
  2250. static int trace__set_filter_pids(const struct option *opt, const char *str,
  2251. int unset __maybe_unused)
  2252. {
  2253. int ret = -1;
  2254. size_t i;
  2255. struct trace *trace = opt->value;
  2256. /*
  2257. * FIXME: introduce a intarray class, plain parse csv and create a
  2258. * { int nr, int entries[] } struct...
  2259. */
  2260. struct intlist *list = intlist__new(str);
  2261. if (list == NULL)
  2262. return -1;
  2263. i = trace->filter_pids.nr = intlist__nr_entries(list) + 1;
  2264. trace->filter_pids.entries = calloc(i, sizeof(pid_t));
  2265. if (trace->filter_pids.entries == NULL)
  2266. goto out;
  2267. trace->filter_pids.entries[0] = getpid();
  2268. for (i = 1; i < trace->filter_pids.nr; ++i)
  2269. trace->filter_pids.entries[i] = intlist__entry(list, i - 1)->i;
  2270. intlist__delete(list);
  2271. ret = 0;
  2272. out:
  2273. return ret;
  2274. }
  2275. static int trace__open_output(struct trace *trace, const char *filename)
  2276. {
  2277. struct stat st;
  2278. if (!stat(filename, &st) && st.st_size) {
  2279. char oldname[PATH_MAX];
  2280. scnprintf(oldname, sizeof(oldname), "%s.old", filename);
  2281. unlink(oldname);
  2282. rename(filename, oldname);
  2283. }
  2284. trace->output = fopen(filename, "w");
  2285. return trace->output == NULL ? -errno : 0;
  2286. }
  2287. static int parse_pagefaults(const struct option *opt, const char *str,
  2288. int unset __maybe_unused)
  2289. {
  2290. int *trace_pgfaults = opt->value;
  2291. if (strcmp(str, "all") == 0)
  2292. *trace_pgfaults |= TRACE_PFMAJ | TRACE_PFMIN;
  2293. else if (strcmp(str, "maj") == 0)
  2294. *trace_pgfaults |= TRACE_PFMAJ;
  2295. else if (strcmp(str, "min") == 0)
  2296. *trace_pgfaults |= TRACE_PFMIN;
  2297. else
  2298. return -1;
  2299. return 0;
  2300. }
  2301. static void evlist__set_evsel_handler(struct perf_evlist *evlist, void *handler)
  2302. {
  2303. struct perf_evsel *evsel;
  2304. evlist__for_each(evlist, evsel)
  2305. evsel->handler = handler;
  2306. }
  2307. int cmd_trace(int argc, const char **argv, const char *prefix __maybe_unused)
  2308. {
  2309. const char *trace_usage[] = {
  2310. "perf trace [<options>] [<command>]",
  2311. "perf trace [<options>] -- <command> [<options>]",
  2312. "perf trace record [<options>] [<command>]",
  2313. "perf trace record [<options>] -- <command> [<options>]",
  2314. NULL
  2315. };
  2316. struct trace trace = {
  2317. .audit = {
  2318. .machine = audit_detect_machine(),
  2319. .open_id = audit_name_to_syscall("open", trace.audit.machine),
  2320. },
  2321. .syscalls = {
  2322. . max = -1,
  2323. },
  2324. .opts = {
  2325. .target = {
  2326. .uid = UINT_MAX,
  2327. .uses_mmap = true,
  2328. },
  2329. .user_freq = UINT_MAX,
  2330. .user_interval = ULLONG_MAX,
  2331. .no_buffering = true,
  2332. .mmap_pages = UINT_MAX,
  2333. .proc_map_timeout = 500,
  2334. },
  2335. .output = stdout,
  2336. .show_comm = true,
  2337. .trace_syscalls = true,
  2338. };
  2339. const char *output_name = NULL;
  2340. const char *ev_qualifier_str = NULL;
  2341. const struct option trace_options[] = {
  2342. OPT_CALLBACK(0, "event", &trace.evlist, "event",
  2343. "event selector. use 'perf list' to list available events",
  2344. parse_events_option),
  2345. OPT_BOOLEAN(0, "comm", &trace.show_comm,
  2346. "show the thread COMM next to its id"),
  2347. OPT_BOOLEAN(0, "tool_stats", &trace.show_tool_stats, "show tool stats"),
  2348. OPT_STRING('e', "expr", &ev_qualifier_str, "expr", "list of syscalls to trace"),
  2349. OPT_STRING('o', "output", &output_name, "file", "output file name"),
  2350. OPT_STRING('i', "input", &input_name, "file", "Analyze events in file"),
  2351. OPT_STRING('p', "pid", &trace.opts.target.pid, "pid",
  2352. "trace events on existing process id"),
  2353. OPT_STRING('t', "tid", &trace.opts.target.tid, "tid",
  2354. "trace events on existing thread id"),
  2355. OPT_CALLBACK(0, "filter-pids", &trace, "CSV list of pids",
  2356. "pids to filter (by the kernel)", trace__set_filter_pids),
  2357. OPT_BOOLEAN('a', "all-cpus", &trace.opts.target.system_wide,
  2358. "system-wide collection from all CPUs"),
  2359. OPT_STRING('C', "cpu", &trace.opts.target.cpu_list, "cpu",
  2360. "list of cpus to monitor"),
  2361. OPT_BOOLEAN(0, "no-inherit", &trace.opts.no_inherit,
  2362. "child tasks do not inherit counters"),
  2363. OPT_CALLBACK('m', "mmap-pages", &trace.opts.mmap_pages, "pages",
  2364. "number of mmap data pages",
  2365. perf_evlist__parse_mmap_pages),
  2366. OPT_STRING('u', "uid", &trace.opts.target.uid_str, "user",
  2367. "user to profile"),
  2368. OPT_CALLBACK(0, "duration", &trace, "float",
  2369. "show only events with duration > N.M ms",
  2370. trace__set_duration),
  2371. OPT_BOOLEAN(0, "sched", &trace.sched, "show blocking scheduler events"),
  2372. OPT_INCR('v', "verbose", &verbose, "be more verbose"),
  2373. OPT_BOOLEAN('T', "time", &trace.full_time,
  2374. "Show full timestamp, not time relative to first start"),
  2375. OPT_BOOLEAN('s', "summary", &trace.summary_only,
  2376. "Show only syscall summary with statistics"),
  2377. OPT_BOOLEAN('S', "with-summary", &trace.summary,
  2378. "Show all syscalls and summary with statistics"),
  2379. OPT_CALLBACK_DEFAULT('F', "pf", &trace.trace_pgfaults, "all|maj|min",
  2380. "Trace pagefaults", parse_pagefaults, "maj"),
  2381. OPT_BOOLEAN(0, "syscalls", &trace.trace_syscalls, "Trace syscalls"),
  2382. OPT_BOOLEAN('f', "force", &trace.force, "don't complain, do it"),
  2383. OPT_UINTEGER(0, "proc-map-timeout", &trace.opts.proc_map_timeout,
  2384. "per thread proc mmap processing timeout in ms"),
  2385. OPT_END()
  2386. };
  2387. const char * const trace_subcommands[] = { "record", NULL };
  2388. int err;
  2389. char bf[BUFSIZ];
  2390. signal(SIGSEGV, sighandler_dump_stack);
  2391. signal(SIGFPE, sighandler_dump_stack);
  2392. trace.evlist = perf_evlist__new();
  2393. if (trace.evlist == NULL) {
  2394. pr_err("Not enough memory to run!\n");
  2395. err = -ENOMEM;
  2396. goto out;
  2397. }
  2398. argc = parse_options_subcommand(argc, argv, trace_options, trace_subcommands,
  2399. trace_usage, PARSE_OPT_STOP_AT_NON_OPTION);
  2400. if (trace.trace_pgfaults) {
  2401. trace.opts.sample_address = true;
  2402. trace.opts.sample_time = true;
  2403. }
  2404. if (trace.evlist->nr_entries > 0)
  2405. evlist__set_evsel_handler(trace.evlist, trace__event_handler);
  2406. if ((argc >= 1) && (strcmp(argv[0], "record") == 0))
  2407. return trace__record(&trace, argc-1, &argv[1]);
  2408. /* summary_only implies summary option, but don't overwrite summary if set */
  2409. if (trace.summary_only)
  2410. trace.summary = trace.summary_only;
  2411. if (!trace.trace_syscalls && !trace.trace_pgfaults &&
  2412. trace.evlist->nr_entries == 0 /* Was --events used? */) {
  2413. pr_err("Please specify something to trace.\n");
  2414. return -1;
  2415. }
  2416. if (output_name != NULL) {
  2417. err = trace__open_output(&trace, output_name);
  2418. if (err < 0) {
  2419. perror("failed to create output file");
  2420. goto out;
  2421. }
  2422. }
  2423. if (ev_qualifier_str != NULL) {
  2424. const char *s = ev_qualifier_str;
  2425. trace.not_ev_qualifier = *s == '!';
  2426. if (trace.not_ev_qualifier)
  2427. ++s;
  2428. trace.ev_qualifier = strlist__new(true, s);
  2429. if (trace.ev_qualifier == NULL) {
  2430. fputs("Not enough memory to parse event qualifier",
  2431. trace.output);
  2432. err = -ENOMEM;
  2433. goto out_close;
  2434. }
  2435. err = trace__validate_ev_qualifier(&trace);
  2436. if (err)
  2437. goto out_close;
  2438. }
  2439. err = target__validate(&trace.opts.target);
  2440. if (err) {
  2441. target__strerror(&trace.opts.target, err, bf, sizeof(bf));
  2442. fprintf(trace.output, "%s", bf);
  2443. goto out_close;
  2444. }
  2445. err = target__parse_uid(&trace.opts.target);
  2446. if (err) {
  2447. target__strerror(&trace.opts.target, err, bf, sizeof(bf));
  2448. fprintf(trace.output, "%s", bf);
  2449. goto out_close;
  2450. }
  2451. if (!argc && target__none(&trace.opts.target))
  2452. trace.opts.target.system_wide = true;
  2453. if (input_name)
  2454. err = trace__replay(&trace);
  2455. else
  2456. err = trace__run(&trace, argc, argv);
  2457. out_close:
  2458. if (output_name != NULL)
  2459. fclose(trace.output);
  2460. out:
  2461. return err;
  2462. }