builtin-timechart.c 30 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350
  1. /*
  2. * builtin-timechart.c - make an svg timechart of system activity
  3. *
  4. * (C) Copyright 2009 Intel Corporation
  5. *
  6. * Authors:
  7. * Arjan van de Ven <arjan@linux.intel.com>
  8. *
  9. * This program is free software; you can redistribute it and/or
  10. * modify it under the terms of the GNU General Public License
  11. * as published by the Free Software Foundation; version 2
  12. * of the License.
  13. */
  14. #include <traceevent/event-parse.h>
  15. #include "builtin.h"
  16. #include "util/util.h"
  17. #include "util/color.h"
  18. #include <linux/list.h>
  19. #include "util/cache.h"
  20. #include "util/evlist.h"
  21. #include "util/evsel.h"
  22. #include <linux/rbtree.h>
  23. #include "util/symbol.h"
  24. #include "util/callchain.h"
  25. #include "util/strlist.h"
  26. #include "perf.h"
  27. #include "util/header.h"
  28. #include "util/parse-options.h"
  29. #include "util/parse-events.h"
  30. #include "util/event.h"
  31. #include "util/session.h"
  32. #include "util/svghelper.h"
  33. #include "util/tool.h"
  34. #include "util/data.h"
  35. #define SUPPORT_OLD_POWER_EVENTS 1
  36. #define PWR_EVENT_EXIT -1
  37. struct per_pid;
  38. struct power_event;
  39. struct wake_event;
  40. struct timechart {
  41. struct perf_tool tool;
  42. struct per_pid *all_data;
  43. struct power_event *power_events;
  44. struct wake_event *wake_events;
  45. int proc_num;
  46. unsigned int numcpus;
  47. u64 min_freq, /* Lowest CPU frequency seen */
  48. max_freq, /* Highest CPU frequency seen */
  49. turbo_frequency,
  50. first_time, last_time;
  51. bool power_only,
  52. tasks_only,
  53. with_backtrace,
  54. topology;
  55. };
  56. struct per_pidcomm;
  57. struct cpu_sample;
  58. /*
  59. * Datastructure layout:
  60. * We keep an list of "pid"s, matching the kernels notion of a task struct.
  61. * Each "pid" entry, has a list of "comm"s.
  62. * this is because we want to track different programs different, while
  63. * exec will reuse the original pid (by design).
  64. * Each comm has a list of samples that will be used to draw
  65. * final graph.
  66. */
  67. struct per_pid {
  68. struct per_pid *next;
  69. int pid;
  70. int ppid;
  71. u64 start_time;
  72. u64 end_time;
  73. u64 total_time;
  74. int display;
  75. struct per_pidcomm *all;
  76. struct per_pidcomm *current;
  77. };
  78. struct per_pidcomm {
  79. struct per_pidcomm *next;
  80. u64 start_time;
  81. u64 end_time;
  82. u64 total_time;
  83. int Y;
  84. int display;
  85. long state;
  86. u64 state_since;
  87. char *comm;
  88. struct cpu_sample *samples;
  89. };
  90. struct sample_wrapper {
  91. struct sample_wrapper *next;
  92. u64 timestamp;
  93. unsigned char data[0];
  94. };
  95. #define TYPE_NONE 0
  96. #define TYPE_RUNNING 1
  97. #define TYPE_WAITING 2
  98. #define TYPE_BLOCKED 3
  99. struct cpu_sample {
  100. struct cpu_sample *next;
  101. u64 start_time;
  102. u64 end_time;
  103. int type;
  104. int cpu;
  105. const char *backtrace;
  106. };
  107. #define CSTATE 1
  108. #define PSTATE 2
  109. struct power_event {
  110. struct power_event *next;
  111. int type;
  112. int state;
  113. u64 start_time;
  114. u64 end_time;
  115. int cpu;
  116. };
  117. struct wake_event {
  118. struct wake_event *next;
  119. int waker;
  120. int wakee;
  121. u64 time;
  122. const char *backtrace;
  123. };
  124. struct process_filter {
  125. char *name;
  126. int pid;
  127. struct process_filter *next;
  128. };
  129. static struct process_filter *process_filter;
  130. static struct per_pid *find_create_pid(struct timechart *tchart, int pid)
  131. {
  132. struct per_pid *cursor = tchart->all_data;
  133. while (cursor) {
  134. if (cursor->pid == pid)
  135. return cursor;
  136. cursor = cursor->next;
  137. }
  138. cursor = zalloc(sizeof(*cursor));
  139. assert(cursor != NULL);
  140. cursor->pid = pid;
  141. cursor->next = tchart->all_data;
  142. tchart->all_data = cursor;
  143. return cursor;
  144. }
  145. static void pid_set_comm(struct timechart *tchart, int pid, char *comm)
  146. {
  147. struct per_pid *p;
  148. struct per_pidcomm *c;
  149. p = find_create_pid(tchart, pid);
  150. c = p->all;
  151. while (c) {
  152. if (c->comm && strcmp(c->comm, comm) == 0) {
  153. p->current = c;
  154. return;
  155. }
  156. if (!c->comm) {
  157. c->comm = strdup(comm);
  158. p->current = c;
  159. return;
  160. }
  161. c = c->next;
  162. }
  163. c = zalloc(sizeof(*c));
  164. assert(c != NULL);
  165. c->comm = strdup(comm);
  166. p->current = c;
  167. c->next = p->all;
  168. p->all = c;
  169. }
  170. static void pid_fork(struct timechart *tchart, int pid, int ppid, u64 timestamp)
  171. {
  172. struct per_pid *p, *pp;
  173. p = find_create_pid(tchart, pid);
  174. pp = find_create_pid(tchart, ppid);
  175. p->ppid = ppid;
  176. if (pp->current && pp->current->comm && !p->current)
  177. pid_set_comm(tchart, pid, pp->current->comm);
  178. p->start_time = timestamp;
  179. if (p->current) {
  180. p->current->start_time = timestamp;
  181. p->current->state_since = timestamp;
  182. }
  183. }
  184. static void pid_exit(struct timechart *tchart, int pid, u64 timestamp)
  185. {
  186. struct per_pid *p;
  187. p = find_create_pid(tchart, pid);
  188. p->end_time = timestamp;
  189. if (p->current)
  190. p->current->end_time = timestamp;
  191. }
  192. static void pid_put_sample(struct timechart *tchart, int pid, int type,
  193. unsigned int cpu, u64 start, u64 end,
  194. const char *backtrace)
  195. {
  196. struct per_pid *p;
  197. struct per_pidcomm *c;
  198. struct cpu_sample *sample;
  199. p = find_create_pid(tchart, pid);
  200. c = p->current;
  201. if (!c) {
  202. c = zalloc(sizeof(*c));
  203. assert(c != NULL);
  204. p->current = c;
  205. c->next = p->all;
  206. p->all = c;
  207. }
  208. sample = zalloc(sizeof(*sample));
  209. assert(sample != NULL);
  210. sample->start_time = start;
  211. sample->end_time = end;
  212. sample->type = type;
  213. sample->next = c->samples;
  214. sample->cpu = cpu;
  215. sample->backtrace = backtrace;
  216. c->samples = sample;
  217. if (sample->type == TYPE_RUNNING && end > start && start > 0) {
  218. c->total_time += (end-start);
  219. p->total_time += (end-start);
  220. }
  221. if (c->start_time == 0 || c->start_time > start)
  222. c->start_time = start;
  223. if (p->start_time == 0 || p->start_time > start)
  224. p->start_time = start;
  225. }
  226. #define MAX_CPUS 4096
  227. static u64 cpus_cstate_start_times[MAX_CPUS];
  228. static int cpus_cstate_state[MAX_CPUS];
  229. static u64 cpus_pstate_start_times[MAX_CPUS];
  230. static u64 cpus_pstate_state[MAX_CPUS];
  231. static int process_comm_event(struct perf_tool *tool,
  232. union perf_event *event,
  233. struct perf_sample *sample __maybe_unused,
  234. struct machine *machine __maybe_unused)
  235. {
  236. struct timechart *tchart = container_of(tool, struct timechart, tool);
  237. pid_set_comm(tchart, event->comm.tid, event->comm.comm);
  238. return 0;
  239. }
  240. static int process_fork_event(struct perf_tool *tool,
  241. union perf_event *event,
  242. struct perf_sample *sample __maybe_unused,
  243. struct machine *machine __maybe_unused)
  244. {
  245. struct timechart *tchart = container_of(tool, struct timechart, tool);
  246. pid_fork(tchart, event->fork.pid, event->fork.ppid, event->fork.time);
  247. return 0;
  248. }
  249. static int process_exit_event(struct perf_tool *tool,
  250. union perf_event *event,
  251. struct perf_sample *sample __maybe_unused,
  252. struct machine *machine __maybe_unused)
  253. {
  254. struct timechart *tchart = container_of(tool, struct timechart, tool);
  255. pid_exit(tchart, event->fork.pid, event->fork.time);
  256. return 0;
  257. }
  258. #ifdef SUPPORT_OLD_POWER_EVENTS
  259. static int use_old_power_events;
  260. #endif
  261. static void c_state_start(int cpu, u64 timestamp, int state)
  262. {
  263. cpus_cstate_start_times[cpu] = timestamp;
  264. cpus_cstate_state[cpu] = state;
  265. }
  266. static void c_state_end(struct timechart *tchart, int cpu, u64 timestamp)
  267. {
  268. struct power_event *pwr = zalloc(sizeof(*pwr));
  269. if (!pwr)
  270. return;
  271. pwr->state = cpus_cstate_state[cpu];
  272. pwr->start_time = cpus_cstate_start_times[cpu];
  273. pwr->end_time = timestamp;
  274. pwr->cpu = cpu;
  275. pwr->type = CSTATE;
  276. pwr->next = tchart->power_events;
  277. tchart->power_events = pwr;
  278. }
  279. static void p_state_change(struct timechart *tchart, int cpu, u64 timestamp, u64 new_freq)
  280. {
  281. struct power_event *pwr;
  282. if (new_freq > 8000000) /* detect invalid data */
  283. return;
  284. pwr = zalloc(sizeof(*pwr));
  285. if (!pwr)
  286. return;
  287. pwr->state = cpus_pstate_state[cpu];
  288. pwr->start_time = cpus_pstate_start_times[cpu];
  289. pwr->end_time = timestamp;
  290. pwr->cpu = cpu;
  291. pwr->type = PSTATE;
  292. pwr->next = tchart->power_events;
  293. if (!pwr->start_time)
  294. pwr->start_time = tchart->first_time;
  295. tchart->power_events = pwr;
  296. cpus_pstate_state[cpu] = new_freq;
  297. cpus_pstate_start_times[cpu] = timestamp;
  298. if ((u64)new_freq > tchart->max_freq)
  299. tchart->max_freq = new_freq;
  300. if (new_freq < tchart->min_freq || tchart->min_freq == 0)
  301. tchart->min_freq = new_freq;
  302. if (new_freq == tchart->max_freq - 1000)
  303. tchart->turbo_frequency = tchart->max_freq;
  304. }
  305. static void sched_wakeup(struct timechart *tchart, int cpu, u64 timestamp,
  306. int waker, int wakee, u8 flags, const char *backtrace)
  307. {
  308. struct per_pid *p;
  309. struct wake_event *we = zalloc(sizeof(*we));
  310. if (!we)
  311. return;
  312. we->time = timestamp;
  313. we->waker = waker;
  314. we->backtrace = backtrace;
  315. if ((flags & TRACE_FLAG_HARDIRQ) || (flags & TRACE_FLAG_SOFTIRQ))
  316. we->waker = -1;
  317. we->wakee = wakee;
  318. we->next = tchart->wake_events;
  319. tchart->wake_events = we;
  320. p = find_create_pid(tchart, we->wakee);
  321. if (p && p->current && p->current->state == TYPE_NONE) {
  322. p->current->state_since = timestamp;
  323. p->current->state = TYPE_WAITING;
  324. }
  325. if (p && p->current && p->current->state == TYPE_BLOCKED) {
  326. pid_put_sample(tchart, p->pid, p->current->state, cpu,
  327. p->current->state_since, timestamp, NULL);
  328. p->current->state_since = timestamp;
  329. p->current->state = TYPE_WAITING;
  330. }
  331. }
  332. static void sched_switch(struct timechart *tchart, int cpu, u64 timestamp,
  333. int prev_pid, int next_pid, u64 prev_state,
  334. const char *backtrace)
  335. {
  336. struct per_pid *p = NULL, *prev_p;
  337. prev_p = find_create_pid(tchart, prev_pid);
  338. p = find_create_pid(tchart, next_pid);
  339. if (prev_p->current && prev_p->current->state != TYPE_NONE)
  340. pid_put_sample(tchart, prev_pid, TYPE_RUNNING, cpu,
  341. prev_p->current->state_since, timestamp,
  342. backtrace);
  343. if (p && p->current) {
  344. if (p->current->state != TYPE_NONE)
  345. pid_put_sample(tchart, next_pid, p->current->state, cpu,
  346. p->current->state_since, timestamp,
  347. backtrace);
  348. p->current->state_since = timestamp;
  349. p->current->state = TYPE_RUNNING;
  350. }
  351. if (prev_p->current) {
  352. prev_p->current->state = TYPE_NONE;
  353. prev_p->current->state_since = timestamp;
  354. if (prev_state & 2)
  355. prev_p->current->state = TYPE_BLOCKED;
  356. if (prev_state == 0)
  357. prev_p->current->state = TYPE_WAITING;
  358. }
  359. }
  360. static const char *cat_backtrace(union perf_event *event,
  361. struct perf_sample *sample,
  362. struct machine *machine)
  363. {
  364. struct addr_location al;
  365. unsigned int i;
  366. char *p = NULL;
  367. size_t p_len;
  368. u8 cpumode = PERF_RECORD_MISC_USER;
  369. struct addr_location tal;
  370. struct ip_callchain *chain = sample->callchain;
  371. FILE *f = open_memstream(&p, &p_len);
  372. if (!f) {
  373. perror("open_memstream error");
  374. return NULL;
  375. }
  376. if (!chain)
  377. goto exit;
  378. if (perf_event__preprocess_sample(event, machine, &al, sample) < 0) {
  379. fprintf(stderr, "problem processing %d event, skipping it.\n",
  380. event->header.type);
  381. goto exit;
  382. }
  383. for (i = 0; i < chain->nr; i++) {
  384. u64 ip;
  385. if (callchain_param.order == ORDER_CALLEE)
  386. ip = chain->ips[i];
  387. else
  388. ip = chain->ips[chain->nr - i - 1];
  389. if (ip >= PERF_CONTEXT_MAX) {
  390. switch (ip) {
  391. case PERF_CONTEXT_HV:
  392. cpumode = PERF_RECORD_MISC_HYPERVISOR;
  393. break;
  394. case PERF_CONTEXT_KERNEL:
  395. cpumode = PERF_RECORD_MISC_KERNEL;
  396. break;
  397. case PERF_CONTEXT_USER:
  398. cpumode = PERF_RECORD_MISC_USER;
  399. break;
  400. default:
  401. pr_debug("invalid callchain context: "
  402. "%"PRId64"\n", (s64) ip);
  403. /*
  404. * It seems the callchain is corrupted.
  405. * Discard all.
  406. */
  407. zfree(&p);
  408. goto exit;
  409. }
  410. continue;
  411. }
  412. tal.filtered = 0;
  413. thread__find_addr_location(al.thread, machine, cpumode,
  414. MAP__FUNCTION, ip, &tal);
  415. if (tal.sym)
  416. fprintf(f, "..... %016" PRIx64 " %s\n", ip,
  417. tal.sym->name);
  418. else
  419. fprintf(f, "..... %016" PRIx64 "\n", ip);
  420. }
  421. exit:
  422. fclose(f);
  423. return p;
  424. }
  425. typedef int (*tracepoint_handler)(struct timechart *tchart,
  426. struct perf_evsel *evsel,
  427. struct perf_sample *sample,
  428. const char *backtrace);
  429. static int process_sample_event(struct perf_tool *tool,
  430. union perf_event *event,
  431. struct perf_sample *sample,
  432. struct perf_evsel *evsel,
  433. struct machine *machine)
  434. {
  435. struct timechart *tchart = container_of(tool, struct timechart, tool);
  436. if (evsel->attr.sample_type & PERF_SAMPLE_TIME) {
  437. if (!tchart->first_time || tchart->first_time > sample->time)
  438. tchart->first_time = sample->time;
  439. if (tchart->last_time < sample->time)
  440. tchart->last_time = sample->time;
  441. }
  442. if (evsel->handler != NULL) {
  443. tracepoint_handler f = evsel->handler;
  444. return f(tchart, evsel, sample,
  445. cat_backtrace(event, sample, machine));
  446. }
  447. return 0;
  448. }
  449. static int
  450. process_sample_cpu_idle(struct timechart *tchart __maybe_unused,
  451. struct perf_evsel *evsel,
  452. struct perf_sample *sample,
  453. const char *backtrace __maybe_unused)
  454. {
  455. u32 state = perf_evsel__intval(evsel, sample, "state");
  456. u32 cpu_id = perf_evsel__intval(evsel, sample, "cpu_id");
  457. if (state == (u32)PWR_EVENT_EXIT)
  458. c_state_end(tchart, cpu_id, sample->time);
  459. else
  460. c_state_start(cpu_id, sample->time, state);
  461. return 0;
  462. }
  463. static int
  464. process_sample_cpu_frequency(struct timechart *tchart,
  465. struct perf_evsel *evsel,
  466. struct perf_sample *sample,
  467. const char *backtrace __maybe_unused)
  468. {
  469. u32 state = perf_evsel__intval(evsel, sample, "state");
  470. u32 cpu_id = perf_evsel__intval(evsel, sample, "cpu_id");
  471. p_state_change(tchart, cpu_id, sample->time, state);
  472. return 0;
  473. }
  474. static int
  475. process_sample_sched_wakeup(struct timechart *tchart,
  476. struct perf_evsel *evsel,
  477. struct perf_sample *sample,
  478. const char *backtrace)
  479. {
  480. u8 flags = perf_evsel__intval(evsel, sample, "common_flags");
  481. int waker = perf_evsel__intval(evsel, sample, "common_pid");
  482. int wakee = perf_evsel__intval(evsel, sample, "pid");
  483. sched_wakeup(tchart, sample->cpu, sample->time, waker, wakee, flags, backtrace);
  484. return 0;
  485. }
  486. static int
  487. process_sample_sched_switch(struct timechart *tchart,
  488. struct perf_evsel *evsel,
  489. struct perf_sample *sample,
  490. const char *backtrace)
  491. {
  492. int prev_pid = perf_evsel__intval(evsel, sample, "prev_pid");
  493. int next_pid = perf_evsel__intval(evsel, sample, "next_pid");
  494. u64 prev_state = perf_evsel__intval(evsel, sample, "prev_state");
  495. sched_switch(tchart, sample->cpu, sample->time, prev_pid, next_pid,
  496. prev_state, backtrace);
  497. return 0;
  498. }
  499. #ifdef SUPPORT_OLD_POWER_EVENTS
  500. static int
  501. process_sample_power_start(struct timechart *tchart __maybe_unused,
  502. struct perf_evsel *evsel,
  503. struct perf_sample *sample,
  504. const char *backtrace __maybe_unused)
  505. {
  506. u64 cpu_id = perf_evsel__intval(evsel, sample, "cpu_id");
  507. u64 value = perf_evsel__intval(evsel, sample, "value");
  508. c_state_start(cpu_id, sample->time, value);
  509. return 0;
  510. }
  511. static int
  512. process_sample_power_end(struct timechart *tchart,
  513. struct perf_evsel *evsel __maybe_unused,
  514. struct perf_sample *sample,
  515. const char *backtrace __maybe_unused)
  516. {
  517. c_state_end(tchart, sample->cpu, sample->time);
  518. return 0;
  519. }
  520. static int
  521. process_sample_power_frequency(struct timechart *tchart,
  522. struct perf_evsel *evsel,
  523. struct perf_sample *sample,
  524. const char *backtrace __maybe_unused)
  525. {
  526. u64 cpu_id = perf_evsel__intval(evsel, sample, "cpu_id");
  527. u64 value = perf_evsel__intval(evsel, sample, "value");
  528. p_state_change(tchart, cpu_id, sample->time, value);
  529. return 0;
  530. }
  531. #endif /* SUPPORT_OLD_POWER_EVENTS */
  532. /*
  533. * After the last sample we need to wrap up the current C/P state
  534. * and close out each CPU for these.
  535. */
  536. static void end_sample_processing(struct timechart *tchart)
  537. {
  538. u64 cpu;
  539. struct power_event *pwr;
  540. for (cpu = 0; cpu <= tchart->numcpus; cpu++) {
  541. /* C state */
  542. #if 0
  543. pwr = zalloc(sizeof(*pwr));
  544. if (!pwr)
  545. return;
  546. pwr->state = cpus_cstate_state[cpu];
  547. pwr->start_time = cpus_cstate_start_times[cpu];
  548. pwr->end_time = tchart->last_time;
  549. pwr->cpu = cpu;
  550. pwr->type = CSTATE;
  551. pwr->next = tchart->power_events;
  552. tchart->power_events = pwr;
  553. #endif
  554. /* P state */
  555. pwr = zalloc(sizeof(*pwr));
  556. if (!pwr)
  557. return;
  558. pwr->state = cpus_pstate_state[cpu];
  559. pwr->start_time = cpus_pstate_start_times[cpu];
  560. pwr->end_time = tchart->last_time;
  561. pwr->cpu = cpu;
  562. pwr->type = PSTATE;
  563. pwr->next = tchart->power_events;
  564. if (!pwr->start_time)
  565. pwr->start_time = tchart->first_time;
  566. if (!pwr->state)
  567. pwr->state = tchart->min_freq;
  568. tchart->power_events = pwr;
  569. }
  570. }
  571. /*
  572. * Sort the pid datastructure
  573. */
  574. static void sort_pids(struct timechart *tchart)
  575. {
  576. struct per_pid *new_list, *p, *cursor, *prev;
  577. /* sort by ppid first, then by pid, lowest to highest */
  578. new_list = NULL;
  579. while (tchart->all_data) {
  580. p = tchart->all_data;
  581. tchart->all_data = p->next;
  582. p->next = NULL;
  583. if (new_list == NULL) {
  584. new_list = p;
  585. p->next = NULL;
  586. continue;
  587. }
  588. prev = NULL;
  589. cursor = new_list;
  590. while (cursor) {
  591. if (cursor->ppid > p->ppid ||
  592. (cursor->ppid == p->ppid && cursor->pid > p->pid)) {
  593. /* must insert before */
  594. if (prev) {
  595. p->next = prev->next;
  596. prev->next = p;
  597. cursor = NULL;
  598. continue;
  599. } else {
  600. p->next = new_list;
  601. new_list = p;
  602. cursor = NULL;
  603. continue;
  604. }
  605. }
  606. prev = cursor;
  607. cursor = cursor->next;
  608. if (!cursor)
  609. prev->next = p;
  610. }
  611. }
  612. tchart->all_data = new_list;
  613. }
  614. static void draw_c_p_states(struct timechart *tchart)
  615. {
  616. struct power_event *pwr;
  617. pwr = tchart->power_events;
  618. /*
  619. * two pass drawing so that the P state bars are on top of the C state blocks
  620. */
  621. while (pwr) {
  622. if (pwr->type == CSTATE)
  623. svg_cstate(pwr->cpu, pwr->start_time, pwr->end_time, pwr->state);
  624. pwr = pwr->next;
  625. }
  626. pwr = tchart->power_events;
  627. while (pwr) {
  628. if (pwr->type == PSTATE) {
  629. if (!pwr->state)
  630. pwr->state = tchart->min_freq;
  631. svg_pstate(pwr->cpu, pwr->start_time, pwr->end_time, pwr->state);
  632. }
  633. pwr = pwr->next;
  634. }
  635. }
  636. static void draw_wakeups(struct timechart *tchart)
  637. {
  638. struct wake_event *we;
  639. struct per_pid *p;
  640. struct per_pidcomm *c;
  641. we = tchart->wake_events;
  642. while (we) {
  643. int from = 0, to = 0;
  644. char *task_from = NULL, *task_to = NULL;
  645. /* locate the column of the waker and wakee */
  646. p = tchart->all_data;
  647. while (p) {
  648. if (p->pid == we->waker || p->pid == we->wakee) {
  649. c = p->all;
  650. while (c) {
  651. if (c->Y && c->start_time <= we->time && c->end_time >= we->time) {
  652. if (p->pid == we->waker && !from) {
  653. from = c->Y;
  654. task_from = strdup(c->comm);
  655. }
  656. if (p->pid == we->wakee && !to) {
  657. to = c->Y;
  658. task_to = strdup(c->comm);
  659. }
  660. }
  661. c = c->next;
  662. }
  663. c = p->all;
  664. while (c) {
  665. if (p->pid == we->waker && !from) {
  666. from = c->Y;
  667. task_from = strdup(c->comm);
  668. }
  669. if (p->pid == we->wakee && !to) {
  670. to = c->Y;
  671. task_to = strdup(c->comm);
  672. }
  673. c = c->next;
  674. }
  675. }
  676. p = p->next;
  677. }
  678. if (!task_from) {
  679. task_from = malloc(40);
  680. sprintf(task_from, "[%i]", we->waker);
  681. }
  682. if (!task_to) {
  683. task_to = malloc(40);
  684. sprintf(task_to, "[%i]", we->wakee);
  685. }
  686. if (we->waker == -1)
  687. svg_interrupt(we->time, to, we->backtrace);
  688. else if (from && to && abs(from - to) == 1)
  689. svg_wakeline(we->time, from, to, we->backtrace);
  690. else
  691. svg_partial_wakeline(we->time, from, task_from, to,
  692. task_to, we->backtrace);
  693. we = we->next;
  694. free(task_from);
  695. free(task_to);
  696. }
  697. }
  698. static void draw_cpu_usage(struct timechart *tchart)
  699. {
  700. struct per_pid *p;
  701. struct per_pidcomm *c;
  702. struct cpu_sample *sample;
  703. p = tchart->all_data;
  704. while (p) {
  705. c = p->all;
  706. while (c) {
  707. sample = c->samples;
  708. while (sample) {
  709. if (sample->type == TYPE_RUNNING) {
  710. svg_process(sample->cpu,
  711. sample->start_time,
  712. sample->end_time,
  713. p->pid,
  714. c->comm,
  715. sample->backtrace);
  716. }
  717. sample = sample->next;
  718. }
  719. c = c->next;
  720. }
  721. p = p->next;
  722. }
  723. }
  724. static void draw_process_bars(struct timechart *tchart)
  725. {
  726. struct per_pid *p;
  727. struct per_pidcomm *c;
  728. struct cpu_sample *sample;
  729. int Y = 0;
  730. Y = 2 * tchart->numcpus + 2;
  731. p = tchart->all_data;
  732. while (p) {
  733. c = p->all;
  734. while (c) {
  735. if (!c->display) {
  736. c->Y = 0;
  737. c = c->next;
  738. continue;
  739. }
  740. svg_box(Y, c->start_time, c->end_time, "process");
  741. sample = c->samples;
  742. while (sample) {
  743. if (sample->type == TYPE_RUNNING)
  744. svg_running(Y, sample->cpu,
  745. sample->start_time,
  746. sample->end_time,
  747. sample->backtrace);
  748. if (sample->type == TYPE_BLOCKED)
  749. svg_blocked(Y, sample->cpu,
  750. sample->start_time,
  751. sample->end_time,
  752. sample->backtrace);
  753. if (sample->type == TYPE_WAITING)
  754. svg_waiting(Y, sample->cpu,
  755. sample->start_time,
  756. sample->end_time,
  757. sample->backtrace);
  758. sample = sample->next;
  759. }
  760. if (c->comm) {
  761. char comm[256];
  762. if (c->total_time > 5000000000) /* 5 seconds */
  763. sprintf(comm, "%s:%i (%2.2fs)", c->comm, p->pid, c->total_time / 1000000000.0);
  764. else
  765. sprintf(comm, "%s:%i (%3.1fms)", c->comm, p->pid, c->total_time / 1000000.0);
  766. svg_text(Y, c->start_time, comm);
  767. }
  768. c->Y = Y;
  769. Y++;
  770. c = c->next;
  771. }
  772. p = p->next;
  773. }
  774. }
  775. static void add_process_filter(const char *string)
  776. {
  777. int pid = strtoull(string, NULL, 10);
  778. struct process_filter *filt = malloc(sizeof(*filt));
  779. if (!filt)
  780. return;
  781. filt->name = strdup(string);
  782. filt->pid = pid;
  783. filt->next = process_filter;
  784. process_filter = filt;
  785. }
  786. static int passes_filter(struct per_pid *p, struct per_pidcomm *c)
  787. {
  788. struct process_filter *filt;
  789. if (!process_filter)
  790. return 1;
  791. filt = process_filter;
  792. while (filt) {
  793. if (filt->pid && p->pid == filt->pid)
  794. return 1;
  795. if (strcmp(filt->name, c->comm) == 0)
  796. return 1;
  797. filt = filt->next;
  798. }
  799. return 0;
  800. }
  801. static int determine_display_tasks_filtered(struct timechart *tchart)
  802. {
  803. struct per_pid *p;
  804. struct per_pidcomm *c;
  805. int count = 0;
  806. p = tchart->all_data;
  807. while (p) {
  808. p->display = 0;
  809. if (p->start_time == 1)
  810. p->start_time = tchart->first_time;
  811. /* no exit marker, task kept running to the end */
  812. if (p->end_time == 0)
  813. p->end_time = tchart->last_time;
  814. c = p->all;
  815. while (c) {
  816. c->display = 0;
  817. if (c->start_time == 1)
  818. c->start_time = tchart->first_time;
  819. if (passes_filter(p, c)) {
  820. c->display = 1;
  821. p->display = 1;
  822. count++;
  823. }
  824. if (c->end_time == 0)
  825. c->end_time = tchart->last_time;
  826. c = c->next;
  827. }
  828. p = p->next;
  829. }
  830. return count;
  831. }
  832. static int determine_display_tasks(struct timechart *tchart, u64 threshold)
  833. {
  834. struct per_pid *p;
  835. struct per_pidcomm *c;
  836. int count = 0;
  837. if (process_filter)
  838. return determine_display_tasks_filtered(tchart);
  839. p = tchart->all_data;
  840. while (p) {
  841. p->display = 0;
  842. if (p->start_time == 1)
  843. p->start_time = tchart->first_time;
  844. /* no exit marker, task kept running to the end */
  845. if (p->end_time == 0)
  846. p->end_time = tchart->last_time;
  847. if (p->total_time >= threshold)
  848. p->display = 1;
  849. c = p->all;
  850. while (c) {
  851. c->display = 0;
  852. if (c->start_time == 1)
  853. c->start_time = tchart->first_time;
  854. if (c->total_time >= threshold) {
  855. c->display = 1;
  856. count++;
  857. }
  858. if (c->end_time == 0)
  859. c->end_time = tchart->last_time;
  860. c = c->next;
  861. }
  862. p = p->next;
  863. }
  864. return count;
  865. }
  866. #define TIME_THRESH 10000000
  867. static void write_svg_file(struct timechart *tchart, const char *filename)
  868. {
  869. u64 i;
  870. int count;
  871. int thresh = TIME_THRESH;
  872. if (tchart->power_only)
  873. tchart->proc_num = 0;
  874. /* We'd like to show at least proc_num tasks;
  875. * be less picky if we have fewer */
  876. do {
  877. count = determine_display_tasks(tchart, thresh);
  878. thresh /= 10;
  879. } while (!process_filter && thresh && count < tchart->proc_num);
  880. if (!tchart->proc_num)
  881. count = 0;
  882. open_svg(filename, tchart->numcpus, count, tchart->first_time, tchart->last_time);
  883. svg_time_grid();
  884. svg_legenda();
  885. for (i = 0; i < tchart->numcpus; i++)
  886. svg_cpu_box(i, tchart->max_freq, tchart->turbo_frequency);
  887. draw_cpu_usage(tchart);
  888. if (tchart->proc_num)
  889. draw_process_bars(tchart);
  890. if (!tchart->tasks_only)
  891. draw_c_p_states(tchart);
  892. if (tchart->proc_num)
  893. draw_wakeups(tchart);
  894. svg_close();
  895. }
  896. static int process_header(struct perf_file_section *section __maybe_unused,
  897. struct perf_header *ph,
  898. int feat,
  899. int fd __maybe_unused,
  900. void *data)
  901. {
  902. struct timechart *tchart = data;
  903. switch (feat) {
  904. case HEADER_NRCPUS:
  905. tchart->numcpus = ph->env.nr_cpus_avail;
  906. break;
  907. case HEADER_CPU_TOPOLOGY:
  908. if (!tchart->topology)
  909. break;
  910. if (svg_build_topology_map(ph->env.sibling_cores,
  911. ph->env.nr_sibling_cores,
  912. ph->env.sibling_threads,
  913. ph->env.nr_sibling_threads))
  914. fprintf(stderr, "problem building topology\n");
  915. break;
  916. default:
  917. break;
  918. }
  919. return 0;
  920. }
  921. static int __cmd_timechart(struct timechart *tchart, const char *output_name)
  922. {
  923. const struct perf_evsel_str_handler power_tracepoints[] = {
  924. { "power:cpu_idle", process_sample_cpu_idle },
  925. { "power:cpu_frequency", process_sample_cpu_frequency },
  926. { "sched:sched_wakeup", process_sample_sched_wakeup },
  927. { "sched:sched_switch", process_sample_sched_switch },
  928. #ifdef SUPPORT_OLD_POWER_EVENTS
  929. { "power:power_start", process_sample_power_start },
  930. { "power:power_end", process_sample_power_end },
  931. { "power:power_frequency", process_sample_power_frequency },
  932. #endif
  933. };
  934. struct perf_data_file file = {
  935. .path = input_name,
  936. .mode = PERF_DATA_MODE_READ,
  937. };
  938. struct perf_session *session = perf_session__new(&file, false,
  939. &tchart->tool);
  940. int ret = -EINVAL;
  941. if (session == NULL)
  942. return -ENOMEM;
  943. (void)perf_header__process_sections(&session->header,
  944. perf_data_file__fd(session->file),
  945. tchart,
  946. process_header);
  947. if (!perf_session__has_traces(session, "timechart record"))
  948. goto out_delete;
  949. if (perf_session__set_tracepoints_handlers(session,
  950. power_tracepoints)) {
  951. pr_err("Initializing session tracepoint handlers failed\n");
  952. goto out_delete;
  953. }
  954. ret = perf_session__process_events(session, &tchart->tool);
  955. if (ret)
  956. goto out_delete;
  957. end_sample_processing(tchart);
  958. sort_pids(tchart);
  959. write_svg_file(tchart, output_name);
  960. pr_info("Written %2.1f seconds of trace to %s.\n",
  961. (tchart->last_time - tchart->first_time) / 1000000000.0, output_name);
  962. out_delete:
  963. perf_session__delete(session);
  964. return ret;
  965. }
  966. static int timechart__record(struct timechart *tchart, int argc, const char **argv)
  967. {
  968. unsigned int rec_argc, i, j;
  969. const char **rec_argv;
  970. const char **p;
  971. unsigned int record_elems;
  972. const char * const common_args[] = {
  973. "record", "-a", "-R", "-c", "1",
  974. };
  975. unsigned int common_args_nr = ARRAY_SIZE(common_args);
  976. const char * const backtrace_args[] = {
  977. "-g",
  978. };
  979. unsigned int backtrace_args_no = ARRAY_SIZE(backtrace_args);
  980. const char * const power_args[] = {
  981. "-e", "power:cpu_frequency",
  982. "-e", "power:cpu_idle",
  983. };
  984. unsigned int power_args_nr = ARRAY_SIZE(power_args);
  985. const char * const old_power_args[] = {
  986. #ifdef SUPPORT_OLD_POWER_EVENTS
  987. "-e", "power:power_start",
  988. "-e", "power:power_end",
  989. "-e", "power:power_frequency",
  990. #endif
  991. };
  992. unsigned int old_power_args_nr = ARRAY_SIZE(old_power_args);
  993. const char * const tasks_args[] = {
  994. "-e", "sched:sched_wakeup",
  995. "-e", "sched:sched_switch",
  996. };
  997. unsigned int tasks_args_nr = ARRAY_SIZE(tasks_args);
  998. #ifdef SUPPORT_OLD_POWER_EVENTS
  999. if (!is_valid_tracepoint("power:cpu_idle") &&
  1000. is_valid_tracepoint("power:power_start")) {
  1001. use_old_power_events = 1;
  1002. power_args_nr = 0;
  1003. } else {
  1004. old_power_args_nr = 0;
  1005. }
  1006. #endif
  1007. if (tchart->power_only)
  1008. tasks_args_nr = 0;
  1009. if (tchart->tasks_only) {
  1010. power_args_nr = 0;
  1011. old_power_args_nr = 0;
  1012. }
  1013. if (!tchart->with_backtrace)
  1014. backtrace_args_no = 0;
  1015. record_elems = common_args_nr + tasks_args_nr +
  1016. power_args_nr + old_power_args_nr + backtrace_args_no;
  1017. rec_argc = record_elems + argc;
  1018. rec_argv = calloc(rec_argc + 1, sizeof(char *));
  1019. if (rec_argv == NULL)
  1020. return -ENOMEM;
  1021. p = rec_argv;
  1022. for (i = 0; i < common_args_nr; i++)
  1023. *p++ = strdup(common_args[i]);
  1024. for (i = 0; i < backtrace_args_no; i++)
  1025. *p++ = strdup(backtrace_args[i]);
  1026. for (i = 0; i < tasks_args_nr; i++)
  1027. *p++ = strdup(tasks_args[i]);
  1028. for (i = 0; i < power_args_nr; i++)
  1029. *p++ = strdup(power_args[i]);
  1030. for (i = 0; i < old_power_args_nr; i++)
  1031. *p++ = strdup(old_power_args[i]);
  1032. for (j = 0; j < (unsigned int)argc; j++)
  1033. *p++ = argv[j];
  1034. return cmd_record(rec_argc, rec_argv, NULL);
  1035. }
  1036. static int
  1037. parse_process(const struct option *opt __maybe_unused, const char *arg,
  1038. int __maybe_unused unset)
  1039. {
  1040. if (arg)
  1041. add_process_filter(arg);
  1042. return 0;
  1043. }
  1044. static int
  1045. parse_highlight(const struct option *opt __maybe_unused, const char *arg,
  1046. int __maybe_unused unset)
  1047. {
  1048. unsigned long duration = strtoul(arg, NULL, 0);
  1049. if (svg_highlight || svg_highlight_name)
  1050. return -1;
  1051. if (duration)
  1052. svg_highlight = duration;
  1053. else
  1054. svg_highlight_name = strdup(arg);
  1055. return 0;
  1056. }
  1057. int cmd_timechart(int argc, const char **argv,
  1058. const char *prefix __maybe_unused)
  1059. {
  1060. struct timechart tchart = {
  1061. .tool = {
  1062. .comm = process_comm_event,
  1063. .fork = process_fork_event,
  1064. .exit = process_exit_event,
  1065. .sample = process_sample_event,
  1066. .ordered_samples = true,
  1067. },
  1068. .proc_num = 15,
  1069. };
  1070. const char *output_name = "output.svg";
  1071. const struct option timechart_options[] = {
  1072. OPT_STRING('i', "input", &input_name, "file", "input file name"),
  1073. OPT_STRING('o', "output", &output_name, "file", "output file name"),
  1074. OPT_INTEGER('w', "width", &svg_page_width, "page width"),
  1075. OPT_CALLBACK(0, "highlight", NULL, "duration or task name",
  1076. "highlight tasks. Pass duration in ns or process name.",
  1077. parse_highlight),
  1078. OPT_BOOLEAN('P', "power-only", &tchart.power_only, "output power data only"),
  1079. OPT_BOOLEAN('T', "tasks-only", &tchart.tasks_only,
  1080. "output processes data only"),
  1081. OPT_CALLBACK('p', "process", NULL, "process",
  1082. "process selector. Pass a pid or process name.",
  1083. parse_process),
  1084. OPT_STRING(0, "symfs", &symbol_conf.symfs, "directory",
  1085. "Look for files with symbols relative to this directory"),
  1086. OPT_INTEGER('n', "proc-num", &tchart.proc_num,
  1087. "min. number of tasks to print"),
  1088. OPT_BOOLEAN('t', "topology", &tchart.topology,
  1089. "sort CPUs according to topology"),
  1090. OPT_END()
  1091. };
  1092. const char * const timechart_usage[] = {
  1093. "perf timechart [<options>] {record}",
  1094. NULL
  1095. };
  1096. const struct option record_options[] = {
  1097. OPT_BOOLEAN('P', "power-only", &tchart.power_only, "output power data only"),
  1098. OPT_BOOLEAN('T', "tasks-only", &tchart.tasks_only,
  1099. "output processes data only"),
  1100. OPT_BOOLEAN('g', "callchain", &tchart.with_backtrace, "record callchain"),
  1101. OPT_END()
  1102. };
  1103. const char * const record_usage[] = {
  1104. "perf timechart record [<options>]",
  1105. NULL
  1106. };
  1107. argc = parse_options(argc, argv, timechart_options, timechart_usage,
  1108. PARSE_OPT_STOP_AT_NON_OPTION);
  1109. if (tchart.power_only && tchart.tasks_only) {
  1110. pr_err("-P and -T options cannot be used at the same time.\n");
  1111. return -1;
  1112. }
  1113. symbol__init();
  1114. if (argc && !strncmp(argv[0], "rec", 3)) {
  1115. argc = parse_options(argc, argv, record_options, record_usage,
  1116. PARSE_OPT_STOP_AT_NON_OPTION);
  1117. if (tchart.power_only && tchart.tasks_only) {
  1118. pr_err("-P and -T options cannot be used at the same time.\n");
  1119. return -1;
  1120. }
  1121. return timechart__record(&tchart, argc, argv);
  1122. } else if (argc)
  1123. usage_with_options(timechart_usage, timechart_options);
  1124. setup_pager();
  1125. return __cmd_timechart(&tchart, output_name);
  1126. }