data-convert-bt.c 30 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319
  1. /*
  2. * CTF writing support via babeltrace.
  3. *
  4. * Copyright (C) 2014, Jiri Olsa <jolsa@redhat.com>
  5. * Copyright (C) 2014, Sebastian Andrzej Siewior <bigeasy@linutronix.de>
  6. *
  7. * Released under the GPL v2. (and only v2, not any later version)
  8. */
  9. #include <linux/compiler.h>
  10. #include <babeltrace/ctf-writer/writer.h>
  11. #include <babeltrace/ctf-writer/clock.h>
  12. #include <babeltrace/ctf-writer/stream.h>
  13. #include <babeltrace/ctf-writer/event.h>
  14. #include <babeltrace/ctf-writer/event-types.h>
  15. #include <babeltrace/ctf-writer/event-fields.h>
  16. #include <babeltrace/ctf-ir/utils.h>
  17. #include <babeltrace/ctf/events.h>
  18. #include <traceevent/event-parse.h>
  19. #include "asm/bug.h"
  20. #include "data-convert-bt.h"
  21. #include "session.h"
  22. #include "util.h"
  23. #include "debug.h"
  24. #include "tool.h"
  25. #include "evlist.h"
  26. #include "evsel.h"
  27. #include "machine.h"
  28. #define pr_N(n, fmt, ...) \
  29. eprintf(n, debug_data_convert, fmt, ##__VA_ARGS__)
  30. #define pr(fmt, ...) pr_N(1, pr_fmt(fmt), ##__VA_ARGS__)
  31. #define pr2(fmt, ...) pr_N(2, pr_fmt(fmt), ##__VA_ARGS__)
  32. #define pr_time2(t, fmt, ...) pr_time_N(2, debug_data_convert, t, pr_fmt(fmt), ##__VA_ARGS__)
  33. struct evsel_priv {
  34. struct bt_ctf_event_class *event_class;
  35. };
  36. #define MAX_CPUS 4096
  37. struct ctf_stream {
  38. struct bt_ctf_stream *stream;
  39. int cpu;
  40. u32 count;
  41. };
  42. struct ctf_writer {
  43. /* writer primitives */
  44. struct bt_ctf_writer *writer;
  45. struct ctf_stream **stream;
  46. int stream_cnt;
  47. struct bt_ctf_stream_class *stream_class;
  48. struct bt_ctf_clock *clock;
  49. /* data types */
  50. union {
  51. struct {
  52. struct bt_ctf_field_type *s64;
  53. struct bt_ctf_field_type *u64;
  54. struct bt_ctf_field_type *s32;
  55. struct bt_ctf_field_type *u32;
  56. struct bt_ctf_field_type *string;
  57. struct bt_ctf_field_type *u32_hex;
  58. struct bt_ctf_field_type *u64_hex;
  59. };
  60. struct bt_ctf_field_type *array[6];
  61. } data;
  62. };
  63. struct convert {
  64. struct perf_tool tool;
  65. struct ctf_writer writer;
  66. u64 events_size;
  67. u64 events_count;
  68. /* Ordered events configured queue size. */
  69. u64 queue_size;
  70. };
  71. static int value_set(struct bt_ctf_field_type *type,
  72. struct bt_ctf_event *event,
  73. const char *name, u64 val)
  74. {
  75. struct bt_ctf_field *field;
  76. bool sign = bt_ctf_field_type_integer_get_signed(type);
  77. int ret;
  78. field = bt_ctf_field_create(type);
  79. if (!field) {
  80. pr_err("failed to create a field %s\n", name);
  81. return -1;
  82. }
  83. if (sign) {
  84. ret = bt_ctf_field_signed_integer_set_value(field, val);
  85. if (ret) {
  86. pr_err("failed to set field value %s\n", name);
  87. goto err;
  88. }
  89. } else {
  90. ret = bt_ctf_field_unsigned_integer_set_value(field, val);
  91. if (ret) {
  92. pr_err("failed to set field value %s\n", name);
  93. goto err;
  94. }
  95. }
  96. ret = bt_ctf_event_set_payload(event, name, field);
  97. if (ret) {
  98. pr_err("failed to set payload %s\n", name);
  99. goto err;
  100. }
  101. pr2(" SET [%s = %" PRIu64 "]\n", name, val);
  102. err:
  103. bt_ctf_field_put(field);
  104. return ret;
  105. }
  106. #define __FUNC_VALUE_SET(_name, _val_type) \
  107. static __maybe_unused int value_set_##_name(struct ctf_writer *cw, \
  108. struct bt_ctf_event *event, \
  109. const char *name, \
  110. _val_type val) \
  111. { \
  112. struct bt_ctf_field_type *type = cw->data._name; \
  113. return value_set(type, event, name, (u64) val); \
  114. }
  115. #define FUNC_VALUE_SET(_name) __FUNC_VALUE_SET(_name, _name)
  116. FUNC_VALUE_SET(s32)
  117. FUNC_VALUE_SET(u32)
  118. FUNC_VALUE_SET(s64)
  119. FUNC_VALUE_SET(u64)
  120. __FUNC_VALUE_SET(u64_hex, u64)
  121. static struct bt_ctf_field_type*
  122. get_tracepoint_field_type(struct ctf_writer *cw, struct format_field *field)
  123. {
  124. unsigned long flags = field->flags;
  125. if (flags & FIELD_IS_STRING)
  126. return cw->data.string;
  127. if (!(flags & FIELD_IS_SIGNED)) {
  128. /* unsigned long are mostly pointers */
  129. if (flags & FIELD_IS_LONG || flags & FIELD_IS_POINTER)
  130. return cw->data.u64_hex;
  131. }
  132. if (flags & FIELD_IS_SIGNED) {
  133. if (field->size == 8)
  134. return cw->data.s64;
  135. else
  136. return cw->data.s32;
  137. }
  138. if (field->size == 8)
  139. return cw->data.u64;
  140. else
  141. return cw->data.u32;
  142. }
  143. static unsigned long long adjust_signedness(unsigned long long value_int, int size)
  144. {
  145. unsigned long long value_mask;
  146. /*
  147. * value_mask = (1 << (size * 8 - 1)) - 1.
  148. * Directly set value_mask for code readers.
  149. */
  150. switch (size) {
  151. case 1:
  152. value_mask = 0x7fULL;
  153. break;
  154. case 2:
  155. value_mask = 0x7fffULL;
  156. break;
  157. case 4:
  158. value_mask = 0x7fffffffULL;
  159. break;
  160. case 8:
  161. /*
  162. * For 64 bit value, return it self. There is no need
  163. * to fill high bit.
  164. */
  165. /* Fall through */
  166. default:
  167. /* BUG! */
  168. return value_int;
  169. }
  170. /* If it is a positive value, don't adjust. */
  171. if ((value_int & (~0ULL - value_mask)) == 0)
  172. return value_int;
  173. /* Fill upper part of value_int with 1 to make it a negative long long. */
  174. return (value_int & value_mask) | ~value_mask;
  175. }
  176. static int add_tracepoint_field_value(struct ctf_writer *cw,
  177. struct bt_ctf_event_class *event_class,
  178. struct bt_ctf_event *event,
  179. struct perf_sample *sample,
  180. struct format_field *fmtf)
  181. {
  182. struct bt_ctf_field_type *type;
  183. struct bt_ctf_field *array_field;
  184. struct bt_ctf_field *field;
  185. const char *name = fmtf->name;
  186. void *data = sample->raw_data;
  187. unsigned long flags = fmtf->flags;
  188. unsigned int n_items;
  189. unsigned int i;
  190. unsigned int offset;
  191. unsigned int len;
  192. int ret;
  193. name = fmtf->alias;
  194. offset = fmtf->offset;
  195. len = fmtf->size;
  196. if (flags & FIELD_IS_STRING)
  197. flags &= ~FIELD_IS_ARRAY;
  198. if (flags & FIELD_IS_DYNAMIC) {
  199. unsigned long long tmp_val;
  200. tmp_val = pevent_read_number(fmtf->event->pevent,
  201. data + offset, len);
  202. offset = tmp_val;
  203. len = offset >> 16;
  204. offset &= 0xffff;
  205. }
  206. if (flags & FIELD_IS_ARRAY) {
  207. type = bt_ctf_event_class_get_field_by_name(
  208. event_class, name);
  209. array_field = bt_ctf_field_create(type);
  210. bt_ctf_field_type_put(type);
  211. if (!array_field) {
  212. pr_err("Failed to create array type %s\n", name);
  213. return -1;
  214. }
  215. len = fmtf->size / fmtf->arraylen;
  216. n_items = fmtf->arraylen;
  217. } else {
  218. n_items = 1;
  219. array_field = NULL;
  220. }
  221. type = get_tracepoint_field_type(cw, fmtf);
  222. for (i = 0; i < n_items; i++) {
  223. if (flags & FIELD_IS_ARRAY)
  224. field = bt_ctf_field_array_get_field(array_field, i);
  225. else
  226. field = bt_ctf_field_create(type);
  227. if (!field) {
  228. pr_err("failed to create a field %s\n", name);
  229. return -1;
  230. }
  231. if (flags & FIELD_IS_STRING)
  232. ret = bt_ctf_field_string_set_value(field,
  233. data + offset + i * len);
  234. else {
  235. unsigned long long value_int;
  236. value_int = pevent_read_number(
  237. fmtf->event->pevent,
  238. data + offset + i * len, len);
  239. if (!(flags & FIELD_IS_SIGNED))
  240. ret = bt_ctf_field_unsigned_integer_set_value(
  241. field, value_int);
  242. else
  243. ret = bt_ctf_field_signed_integer_set_value(
  244. field, adjust_signedness(value_int, len));
  245. }
  246. if (ret) {
  247. pr_err("failed to set file value %s\n", name);
  248. goto err_put_field;
  249. }
  250. if (!(flags & FIELD_IS_ARRAY)) {
  251. ret = bt_ctf_event_set_payload(event, name, field);
  252. if (ret) {
  253. pr_err("failed to set payload %s\n", name);
  254. goto err_put_field;
  255. }
  256. }
  257. bt_ctf_field_put(field);
  258. }
  259. if (flags & FIELD_IS_ARRAY) {
  260. ret = bt_ctf_event_set_payload(event, name, array_field);
  261. if (ret) {
  262. pr_err("Failed add payload array %s\n", name);
  263. return -1;
  264. }
  265. bt_ctf_field_put(array_field);
  266. }
  267. return 0;
  268. err_put_field:
  269. bt_ctf_field_put(field);
  270. return -1;
  271. }
  272. static int add_tracepoint_fields_values(struct ctf_writer *cw,
  273. struct bt_ctf_event_class *event_class,
  274. struct bt_ctf_event *event,
  275. struct format_field *fields,
  276. struct perf_sample *sample)
  277. {
  278. struct format_field *field;
  279. int ret;
  280. for (field = fields; field; field = field->next) {
  281. ret = add_tracepoint_field_value(cw, event_class, event, sample,
  282. field);
  283. if (ret)
  284. return -1;
  285. }
  286. return 0;
  287. }
  288. static int add_tracepoint_values(struct ctf_writer *cw,
  289. struct bt_ctf_event_class *event_class,
  290. struct bt_ctf_event *event,
  291. struct perf_evsel *evsel,
  292. struct perf_sample *sample)
  293. {
  294. struct format_field *common_fields = evsel->tp_format->format.common_fields;
  295. struct format_field *fields = evsel->tp_format->format.fields;
  296. int ret;
  297. ret = add_tracepoint_fields_values(cw, event_class, event,
  298. common_fields, sample);
  299. if (!ret)
  300. ret = add_tracepoint_fields_values(cw, event_class, event,
  301. fields, sample);
  302. return ret;
  303. }
  304. static int
  305. add_bpf_output_values(struct bt_ctf_event_class *event_class,
  306. struct bt_ctf_event *event,
  307. struct perf_sample *sample)
  308. {
  309. struct bt_ctf_field_type *len_type, *seq_type;
  310. struct bt_ctf_field *len_field, *seq_field;
  311. unsigned int raw_size = sample->raw_size;
  312. unsigned int nr_elements = raw_size / sizeof(u32);
  313. unsigned int i;
  314. int ret;
  315. if (nr_elements * sizeof(u32) != raw_size)
  316. pr_warning("Incorrect raw_size (%u) in bpf output event, skip %lu bytes\n",
  317. raw_size, nr_elements * sizeof(u32) - raw_size);
  318. len_type = bt_ctf_event_class_get_field_by_name(event_class, "raw_len");
  319. len_field = bt_ctf_field_create(len_type);
  320. if (!len_field) {
  321. pr_err("failed to create 'raw_len' for bpf output event\n");
  322. ret = -1;
  323. goto put_len_type;
  324. }
  325. ret = bt_ctf_field_unsigned_integer_set_value(len_field, nr_elements);
  326. if (ret) {
  327. pr_err("failed to set field value for raw_len\n");
  328. goto put_len_field;
  329. }
  330. ret = bt_ctf_event_set_payload(event, "raw_len", len_field);
  331. if (ret) {
  332. pr_err("failed to set payload to raw_len\n");
  333. goto put_len_field;
  334. }
  335. seq_type = bt_ctf_event_class_get_field_by_name(event_class, "raw_data");
  336. seq_field = bt_ctf_field_create(seq_type);
  337. if (!seq_field) {
  338. pr_err("failed to create 'raw_data' for bpf output event\n");
  339. ret = -1;
  340. goto put_seq_type;
  341. }
  342. ret = bt_ctf_field_sequence_set_length(seq_field, len_field);
  343. if (ret) {
  344. pr_err("failed to set length of 'raw_data'\n");
  345. goto put_seq_field;
  346. }
  347. for (i = 0; i < nr_elements; i++) {
  348. struct bt_ctf_field *elem_field =
  349. bt_ctf_field_sequence_get_field(seq_field, i);
  350. ret = bt_ctf_field_unsigned_integer_set_value(elem_field,
  351. ((u32 *)(sample->raw_data))[i]);
  352. bt_ctf_field_put(elem_field);
  353. if (ret) {
  354. pr_err("failed to set raw_data[%d]\n", i);
  355. goto put_seq_field;
  356. }
  357. }
  358. ret = bt_ctf_event_set_payload(event, "raw_data", seq_field);
  359. if (ret)
  360. pr_err("failed to set payload for raw_data\n");
  361. put_seq_field:
  362. bt_ctf_field_put(seq_field);
  363. put_seq_type:
  364. bt_ctf_field_type_put(seq_type);
  365. put_len_field:
  366. bt_ctf_field_put(len_field);
  367. put_len_type:
  368. bt_ctf_field_type_put(len_type);
  369. return ret;
  370. }
  371. static int add_generic_values(struct ctf_writer *cw,
  372. struct bt_ctf_event *event,
  373. struct perf_evsel *evsel,
  374. struct perf_sample *sample)
  375. {
  376. u64 type = evsel->attr.sample_type;
  377. int ret;
  378. /*
  379. * missing:
  380. * PERF_SAMPLE_TIME - not needed as we have it in
  381. * ctf event header
  382. * PERF_SAMPLE_READ - TODO
  383. * PERF_SAMPLE_CALLCHAIN - TODO
  384. * PERF_SAMPLE_RAW - tracepoint fields are handled separately
  385. * PERF_SAMPLE_BRANCH_STACK - TODO
  386. * PERF_SAMPLE_REGS_USER - TODO
  387. * PERF_SAMPLE_STACK_USER - TODO
  388. */
  389. if (type & PERF_SAMPLE_IP) {
  390. ret = value_set_u64_hex(cw, event, "perf_ip", sample->ip);
  391. if (ret)
  392. return -1;
  393. }
  394. if (type & PERF_SAMPLE_TID) {
  395. ret = value_set_s32(cw, event, "perf_tid", sample->tid);
  396. if (ret)
  397. return -1;
  398. ret = value_set_s32(cw, event, "perf_pid", sample->pid);
  399. if (ret)
  400. return -1;
  401. }
  402. if ((type & PERF_SAMPLE_ID) ||
  403. (type & PERF_SAMPLE_IDENTIFIER)) {
  404. ret = value_set_u64(cw, event, "perf_id", sample->id);
  405. if (ret)
  406. return -1;
  407. }
  408. if (type & PERF_SAMPLE_STREAM_ID) {
  409. ret = value_set_u64(cw, event, "perf_stream_id", sample->stream_id);
  410. if (ret)
  411. return -1;
  412. }
  413. if (type & PERF_SAMPLE_PERIOD) {
  414. ret = value_set_u64(cw, event, "perf_period", sample->period);
  415. if (ret)
  416. return -1;
  417. }
  418. if (type & PERF_SAMPLE_WEIGHT) {
  419. ret = value_set_u64(cw, event, "perf_weight", sample->weight);
  420. if (ret)
  421. return -1;
  422. }
  423. if (type & PERF_SAMPLE_DATA_SRC) {
  424. ret = value_set_u64(cw, event, "perf_data_src",
  425. sample->data_src);
  426. if (ret)
  427. return -1;
  428. }
  429. if (type & PERF_SAMPLE_TRANSACTION) {
  430. ret = value_set_u64(cw, event, "perf_transaction",
  431. sample->transaction);
  432. if (ret)
  433. return -1;
  434. }
  435. return 0;
  436. }
  437. static int ctf_stream__flush(struct ctf_stream *cs)
  438. {
  439. int err = 0;
  440. if (cs) {
  441. err = bt_ctf_stream_flush(cs->stream);
  442. if (err)
  443. pr_err("CTF stream %d flush failed\n", cs->cpu);
  444. pr("Flush stream for cpu %d (%u samples)\n",
  445. cs->cpu, cs->count);
  446. cs->count = 0;
  447. }
  448. return err;
  449. }
  450. static struct ctf_stream *ctf_stream__create(struct ctf_writer *cw, int cpu)
  451. {
  452. struct ctf_stream *cs;
  453. struct bt_ctf_field *pkt_ctx = NULL;
  454. struct bt_ctf_field *cpu_field = NULL;
  455. struct bt_ctf_stream *stream = NULL;
  456. int ret;
  457. cs = zalloc(sizeof(*cs));
  458. if (!cs) {
  459. pr_err("Failed to allocate ctf stream\n");
  460. return NULL;
  461. }
  462. stream = bt_ctf_writer_create_stream(cw->writer, cw->stream_class);
  463. if (!stream) {
  464. pr_err("Failed to create CTF stream\n");
  465. goto out;
  466. }
  467. pkt_ctx = bt_ctf_stream_get_packet_context(stream);
  468. if (!pkt_ctx) {
  469. pr_err("Failed to obtain packet context\n");
  470. goto out;
  471. }
  472. cpu_field = bt_ctf_field_structure_get_field(pkt_ctx, "cpu_id");
  473. bt_ctf_field_put(pkt_ctx);
  474. if (!cpu_field) {
  475. pr_err("Failed to obtain cpu field\n");
  476. goto out;
  477. }
  478. ret = bt_ctf_field_unsigned_integer_set_value(cpu_field, (u32) cpu);
  479. if (ret) {
  480. pr_err("Failed to update CPU number\n");
  481. goto out;
  482. }
  483. bt_ctf_field_put(cpu_field);
  484. cs->cpu = cpu;
  485. cs->stream = stream;
  486. return cs;
  487. out:
  488. if (cpu_field)
  489. bt_ctf_field_put(cpu_field);
  490. if (stream)
  491. bt_ctf_stream_put(stream);
  492. free(cs);
  493. return NULL;
  494. }
  495. static void ctf_stream__delete(struct ctf_stream *cs)
  496. {
  497. if (cs) {
  498. bt_ctf_stream_put(cs->stream);
  499. free(cs);
  500. }
  501. }
  502. static struct ctf_stream *ctf_stream(struct ctf_writer *cw, int cpu)
  503. {
  504. struct ctf_stream *cs = cw->stream[cpu];
  505. if (!cs) {
  506. cs = ctf_stream__create(cw, cpu);
  507. cw->stream[cpu] = cs;
  508. }
  509. return cs;
  510. }
  511. static int get_sample_cpu(struct ctf_writer *cw, struct perf_sample *sample,
  512. struct perf_evsel *evsel)
  513. {
  514. int cpu = 0;
  515. if (evsel->attr.sample_type & PERF_SAMPLE_CPU)
  516. cpu = sample->cpu;
  517. if (cpu > cw->stream_cnt) {
  518. pr_err("Event was recorded for CPU %d, limit is at %d.\n",
  519. cpu, cw->stream_cnt);
  520. cpu = 0;
  521. }
  522. return cpu;
  523. }
  524. #define STREAM_FLUSH_COUNT 100000
  525. /*
  526. * Currently we have no other way to determine the
  527. * time for the stream flush other than keep track
  528. * of the number of events and check it against
  529. * threshold.
  530. */
  531. static bool is_flush_needed(struct ctf_stream *cs)
  532. {
  533. return cs->count >= STREAM_FLUSH_COUNT;
  534. }
  535. static int process_sample_event(struct perf_tool *tool,
  536. union perf_event *_event,
  537. struct perf_sample *sample,
  538. struct perf_evsel *evsel,
  539. struct machine *machine __maybe_unused)
  540. {
  541. struct convert *c = container_of(tool, struct convert, tool);
  542. struct evsel_priv *priv = evsel->priv;
  543. struct ctf_writer *cw = &c->writer;
  544. struct ctf_stream *cs;
  545. struct bt_ctf_event_class *event_class;
  546. struct bt_ctf_event *event;
  547. int ret;
  548. if (WARN_ONCE(!priv, "Failed to setup all events.\n"))
  549. return 0;
  550. event_class = priv->event_class;
  551. /* update stats */
  552. c->events_count++;
  553. c->events_size += _event->header.size;
  554. pr_time2(sample->time, "sample %" PRIu64 "\n", c->events_count);
  555. event = bt_ctf_event_create(event_class);
  556. if (!event) {
  557. pr_err("Failed to create an CTF event\n");
  558. return -1;
  559. }
  560. bt_ctf_clock_set_time(cw->clock, sample->time);
  561. ret = add_generic_values(cw, event, evsel, sample);
  562. if (ret)
  563. return -1;
  564. if (evsel->attr.type == PERF_TYPE_TRACEPOINT) {
  565. ret = add_tracepoint_values(cw, event_class, event,
  566. evsel, sample);
  567. if (ret)
  568. return -1;
  569. }
  570. if (perf_evsel__is_bpf_output(evsel)) {
  571. ret = add_bpf_output_values(event_class, event, sample);
  572. if (ret)
  573. return -1;
  574. }
  575. cs = ctf_stream(cw, get_sample_cpu(cw, sample, evsel));
  576. if (cs) {
  577. if (is_flush_needed(cs))
  578. ctf_stream__flush(cs);
  579. cs->count++;
  580. bt_ctf_stream_append_event(cs->stream, event);
  581. }
  582. bt_ctf_event_put(event);
  583. return cs ? 0 : -1;
  584. }
  585. /* If dup < 0, add a prefix. Else, add _dupl_X suffix. */
  586. static char *change_name(char *name, char *orig_name, int dup)
  587. {
  588. char *new_name = NULL;
  589. size_t len;
  590. if (!name)
  591. name = orig_name;
  592. if (dup >= 10)
  593. goto out;
  594. /*
  595. * Add '_' prefix to potential keywork. According to
  596. * Mathieu Desnoyers (https://lkml.org/lkml/2015/1/23/652),
  597. * futher CTF spec updating may require us to use '$'.
  598. */
  599. if (dup < 0)
  600. len = strlen(name) + sizeof("_");
  601. else
  602. len = strlen(orig_name) + sizeof("_dupl_X");
  603. new_name = malloc(len);
  604. if (!new_name)
  605. goto out;
  606. if (dup < 0)
  607. snprintf(new_name, len, "_%s", name);
  608. else
  609. snprintf(new_name, len, "%s_dupl_%d", orig_name, dup);
  610. out:
  611. if (name != orig_name)
  612. free(name);
  613. return new_name;
  614. }
  615. static int event_class_add_field(struct bt_ctf_event_class *event_class,
  616. struct bt_ctf_field_type *type,
  617. struct format_field *field)
  618. {
  619. struct bt_ctf_field_type *t = NULL;
  620. char *name;
  621. int dup = 1;
  622. int ret;
  623. /* alias was already assigned */
  624. if (field->alias != field->name)
  625. return bt_ctf_event_class_add_field(event_class, type,
  626. (char *)field->alias);
  627. name = field->name;
  628. /* If 'name' is a keywork, add prefix. */
  629. if (bt_ctf_validate_identifier(name))
  630. name = change_name(name, field->name, -1);
  631. if (!name) {
  632. pr_err("Failed to fix invalid identifier.");
  633. return -1;
  634. }
  635. while ((t = bt_ctf_event_class_get_field_by_name(event_class, name))) {
  636. bt_ctf_field_type_put(t);
  637. name = change_name(name, field->name, dup++);
  638. if (!name) {
  639. pr_err("Failed to create dup name for '%s'\n", field->name);
  640. return -1;
  641. }
  642. }
  643. ret = bt_ctf_event_class_add_field(event_class, type, name);
  644. if (!ret)
  645. field->alias = name;
  646. return ret;
  647. }
  648. static int add_tracepoint_fields_types(struct ctf_writer *cw,
  649. struct format_field *fields,
  650. struct bt_ctf_event_class *event_class)
  651. {
  652. struct format_field *field;
  653. int ret;
  654. for (field = fields; field; field = field->next) {
  655. struct bt_ctf_field_type *type;
  656. unsigned long flags = field->flags;
  657. pr2(" field '%s'\n", field->name);
  658. type = get_tracepoint_field_type(cw, field);
  659. if (!type)
  660. return -1;
  661. /*
  662. * A string is an array of chars. For this we use the string
  663. * type and don't care that it is an array. What we don't
  664. * support is an array of strings.
  665. */
  666. if (flags & FIELD_IS_STRING)
  667. flags &= ~FIELD_IS_ARRAY;
  668. if (flags & FIELD_IS_ARRAY)
  669. type = bt_ctf_field_type_array_create(type, field->arraylen);
  670. ret = event_class_add_field(event_class, type, field);
  671. if (flags & FIELD_IS_ARRAY)
  672. bt_ctf_field_type_put(type);
  673. if (ret) {
  674. pr_err("Failed to add field '%s': %d\n",
  675. field->name, ret);
  676. return -1;
  677. }
  678. }
  679. return 0;
  680. }
  681. static int add_tracepoint_types(struct ctf_writer *cw,
  682. struct perf_evsel *evsel,
  683. struct bt_ctf_event_class *class)
  684. {
  685. struct format_field *common_fields = evsel->tp_format->format.common_fields;
  686. struct format_field *fields = evsel->tp_format->format.fields;
  687. int ret;
  688. ret = add_tracepoint_fields_types(cw, common_fields, class);
  689. if (!ret)
  690. ret = add_tracepoint_fields_types(cw, fields, class);
  691. return ret;
  692. }
  693. static int add_bpf_output_types(struct ctf_writer *cw,
  694. struct bt_ctf_event_class *class)
  695. {
  696. struct bt_ctf_field_type *len_type = cw->data.u32;
  697. struct bt_ctf_field_type *seq_base_type = cw->data.u32_hex;
  698. struct bt_ctf_field_type *seq_type;
  699. int ret;
  700. ret = bt_ctf_event_class_add_field(class, len_type, "raw_len");
  701. if (ret)
  702. return ret;
  703. seq_type = bt_ctf_field_type_sequence_create(seq_base_type, "raw_len");
  704. if (!seq_type)
  705. return -1;
  706. return bt_ctf_event_class_add_field(class, seq_type, "raw_data");
  707. }
  708. static int add_generic_types(struct ctf_writer *cw, struct perf_evsel *evsel,
  709. struct bt_ctf_event_class *event_class)
  710. {
  711. u64 type = evsel->attr.sample_type;
  712. /*
  713. * missing:
  714. * PERF_SAMPLE_TIME - not needed as we have it in
  715. * ctf event header
  716. * PERF_SAMPLE_READ - TODO
  717. * PERF_SAMPLE_CALLCHAIN - TODO
  718. * PERF_SAMPLE_RAW - tracepoint fields and BPF output
  719. * are handled separately
  720. * PERF_SAMPLE_BRANCH_STACK - TODO
  721. * PERF_SAMPLE_REGS_USER - TODO
  722. * PERF_SAMPLE_STACK_USER - TODO
  723. */
  724. #define ADD_FIELD(cl, t, n) \
  725. do { \
  726. pr2(" field '%s'\n", n); \
  727. if (bt_ctf_event_class_add_field(cl, t, n)) { \
  728. pr_err("Failed to add field '%s';\n", n); \
  729. return -1; \
  730. } \
  731. } while (0)
  732. if (type & PERF_SAMPLE_IP)
  733. ADD_FIELD(event_class, cw->data.u64_hex, "perf_ip");
  734. if (type & PERF_SAMPLE_TID) {
  735. ADD_FIELD(event_class, cw->data.s32, "perf_tid");
  736. ADD_FIELD(event_class, cw->data.s32, "perf_pid");
  737. }
  738. if ((type & PERF_SAMPLE_ID) ||
  739. (type & PERF_SAMPLE_IDENTIFIER))
  740. ADD_FIELD(event_class, cw->data.u64, "perf_id");
  741. if (type & PERF_SAMPLE_STREAM_ID)
  742. ADD_FIELD(event_class, cw->data.u64, "perf_stream_id");
  743. if (type & PERF_SAMPLE_PERIOD)
  744. ADD_FIELD(event_class, cw->data.u64, "perf_period");
  745. if (type & PERF_SAMPLE_WEIGHT)
  746. ADD_FIELD(event_class, cw->data.u64, "perf_weight");
  747. if (type & PERF_SAMPLE_DATA_SRC)
  748. ADD_FIELD(event_class, cw->data.u64, "perf_data_src");
  749. if (type & PERF_SAMPLE_TRANSACTION)
  750. ADD_FIELD(event_class, cw->data.u64, "perf_transaction");
  751. #undef ADD_FIELD
  752. return 0;
  753. }
  754. static int add_event(struct ctf_writer *cw, struct perf_evsel *evsel)
  755. {
  756. struct bt_ctf_event_class *event_class;
  757. struct evsel_priv *priv;
  758. const char *name = perf_evsel__name(evsel);
  759. int ret;
  760. pr("Adding event '%s' (type %d)\n", name, evsel->attr.type);
  761. event_class = bt_ctf_event_class_create(name);
  762. if (!event_class)
  763. return -1;
  764. ret = add_generic_types(cw, evsel, event_class);
  765. if (ret)
  766. goto err;
  767. if (evsel->attr.type == PERF_TYPE_TRACEPOINT) {
  768. ret = add_tracepoint_types(cw, evsel, event_class);
  769. if (ret)
  770. goto err;
  771. }
  772. if (perf_evsel__is_bpf_output(evsel)) {
  773. ret = add_bpf_output_types(cw, event_class);
  774. if (ret)
  775. goto err;
  776. }
  777. ret = bt_ctf_stream_class_add_event_class(cw->stream_class, event_class);
  778. if (ret) {
  779. pr("Failed to add event class into stream.\n");
  780. goto err;
  781. }
  782. priv = malloc(sizeof(*priv));
  783. if (!priv)
  784. goto err;
  785. priv->event_class = event_class;
  786. evsel->priv = priv;
  787. return 0;
  788. err:
  789. bt_ctf_event_class_put(event_class);
  790. pr_err("Failed to add event '%s'.\n", name);
  791. return -1;
  792. }
  793. static int setup_events(struct ctf_writer *cw, struct perf_session *session)
  794. {
  795. struct perf_evlist *evlist = session->evlist;
  796. struct perf_evsel *evsel;
  797. int ret;
  798. evlist__for_each(evlist, evsel) {
  799. ret = add_event(cw, evsel);
  800. if (ret)
  801. return ret;
  802. }
  803. return 0;
  804. }
  805. static void cleanup_events(struct perf_session *session)
  806. {
  807. struct perf_evlist *evlist = session->evlist;
  808. struct perf_evsel *evsel;
  809. evlist__for_each(evlist, evsel) {
  810. struct evsel_priv *priv;
  811. priv = evsel->priv;
  812. bt_ctf_event_class_put(priv->event_class);
  813. zfree(&evsel->priv);
  814. }
  815. perf_evlist__delete(evlist);
  816. session->evlist = NULL;
  817. }
  818. static int setup_streams(struct ctf_writer *cw, struct perf_session *session)
  819. {
  820. struct ctf_stream **stream;
  821. struct perf_header *ph = &session->header;
  822. int ncpus;
  823. /*
  824. * Try to get the number of cpus used in the data file,
  825. * if not present fallback to the MAX_CPUS.
  826. */
  827. ncpus = ph->env.nr_cpus_avail ?: MAX_CPUS;
  828. stream = zalloc(sizeof(*stream) * ncpus);
  829. if (!stream) {
  830. pr_err("Failed to allocate streams.\n");
  831. return -ENOMEM;
  832. }
  833. cw->stream = stream;
  834. cw->stream_cnt = ncpus;
  835. return 0;
  836. }
  837. static void free_streams(struct ctf_writer *cw)
  838. {
  839. int cpu;
  840. for (cpu = 0; cpu < cw->stream_cnt; cpu++)
  841. ctf_stream__delete(cw->stream[cpu]);
  842. free(cw->stream);
  843. }
  844. static int ctf_writer__setup_env(struct ctf_writer *cw,
  845. struct perf_session *session)
  846. {
  847. struct perf_header *header = &session->header;
  848. struct bt_ctf_writer *writer = cw->writer;
  849. #define ADD(__n, __v) \
  850. do { \
  851. if (bt_ctf_writer_add_environment_field(writer, __n, __v)) \
  852. return -1; \
  853. } while (0)
  854. ADD("host", header->env.hostname);
  855. ADD("sysname", "Linux");
  856. ADD("release", header->env.os_release);
  857. ADD("version", header->env.version);
  858. ADD("machine", header->env.arch);
  859. ADD("domain", "kernel");
  860. ADD("tracer_name", "perf");
  861. #undef ADD
  862. return 0;
  863. }
  864. static int ctf_writer__setup_clock(struct ctf_writer *cw)
  865. {
  866. struct bt_ctf_clock *clock = cw->clock;
  867. bt_ctf_clock_set_description(clock, "perf clock");
  868. #define SET(__n, __v) \
  869. do { \
  870. if (bt_ctf_clock_set_##__n(clock, __v)) \
  871. return -1; \
  872. } while (0)
  873. SET(frequency, 1000000000);
  874. SET(offset_s, 0);
  875. SET(offset, 0);
  876. SET(precision, 10);
  877. SET(is_absolute, 0);
  878. #undef SET
  879. return 0;
  880. }
  881. static struct bt_ctf_field_type *create_int_type(int size, bool sign, bool hex)
  882. {
  883. struct bt_ctf_field_type *type;
  884. type = bt_ctf_field_type_integer_create(size);
  885. if (!type)
  886. return NULL;
  887. if (sign &&
  888. bt_ctf_field_type_integer_set_signed(type, 1))
  889. goto err;
  890. if (hex &&
  891. bt_ctf_field_type_integer_set_base(type, BT_CTF_INTEGER_BASE_HEXADECIMAL))
  892. goto err;
  893. #if __BYTE_ORDER == __BIG_ENDIAN
  894. bt_ctf_field_type_set_byte_order(type, BT_CTF_BYTE_ORDER_BIG_ENDIAN);
  895. #else
  896. bt_ctf_field_type_set_byte_order(type, BT_CTF_BYTE_ORDER_LITTLE_ENDIAN);
  897. #endif
  898. pr2("Created type: INTEGER %d-bit %ssigned %s\n",
  899. size, sign ? "un" : "", hex ? "hex" : "");
  900. return type;
  901. err:
  902. bt_ctf_field_type_put(type);
  903. return NULL;
  904. }
  905. static void ctf_writer__cleanup_data(struct ctf_writer *cw)
  906. {
  907. unsigned int i;
  908. for (i = 0; i < ARRAY_SIZE(cw->data.array); i++)
  909. bt_ctf_field_type_put(cw->data.array[i]);
  910. }
  911. static int ctf_writer__init_data(struct ctf_writer *cw)
  912. {
  913. #define CREATE_INT_TYPE(type, size, sign, hex) \
  914. do { \
  915. (type) = create_int_type(size, sign, hex); \
  916. if (!(type)) \
  917. goto err; \
  918. } while (0)
  919. CREATE_INT_TYPE(cw->data.s64, 64, true, false);
  920. CREATE_INT_TYPE(cw->data.u64, 64, false, false);
  921. CREATE_INT_TYPE(cw->data.s32, 32, true, false);
  922. CREATE_INT_TYPE(cw->data.u32, 32, false, false);
  923. CREATE_INT_TYPE(cw->data.u32_hex, 32, false, true);
  924. CREATE_INT_TYPE(cw->data.u64_hex, 64, false, true);
  925. cw->data.string = bt_ctf_field_type_string_create();
  926. if (cw->data.string)
  927. return 0;
  928. err:
  929. ctf_writer__cleanup_data(cw);
  930. pr_err("Failed to create data types.\n");
  931. return -1;
  932. }
  933. static void ctf_writer__cleanup(struct ctf_writer *cw)
  934. {
  935. ctf_writer__cleanup_data(cw);
  936. bt_ctf_clock_put(cw->clock);
  937. free_streams(cw);
  938. bt_ctf_stream_class_put(cw->stream_class);
  939. bt_ctf_writer_put(cw->writer);
  940. /* and NULL all the pointers */
  941. memset(cw, 0, sizeof(*cw));
  942. }
  943. static int ctf_writer__init(struct ctf_writer *cw, const char *path)
  944. {
  945. struct bt_ctf_writer *writer;
  946. struct bt_ctf_stream_class *stream_class;
  947. struct bt_ctf_clock *clock;
  948. struct bt_ctf_field_type *pkt_ctx_type;
  949. int ret;
  950. /* CTF writer */
  951. writer = bt_ctf_writer_create(path);
  952. if (!writer)
  953. goto err;
  954. cw->writer = writer;
  955. /* CTF clock */
  956. clock = bt_ctf_clock_create("perf_clock");
  957. if (!clock) {
  958. pr("Failed to create CTF clock.\n");
  959. goto err_cleanup;
  960. }
  961. cw->clock = clock;
  962. if (ctf_writer__setup_clock(cw)) {
  963. pr("Failed to setup CTF clock.\n");
  964. goto err_cleanup;
  965. }
  966. /* CTF stream class */
  967. stream_class = bt_ctf_stream_class_create("perf_stream");
  968. if (!stream_class) {
  969. pr("Failed to create CTF stream class.\n");
  970. goto err_cleanup;
  971. }
  972. cw->stream_class = stream_class;
  973. /* CTF clock stream setup */
  974. if (bt_ctf_stream_class_set_clock(stream_class, clock)) {
  975. pr("Failed to assign CTF clock to stream class.\n");
  976. goto err_cleanup;
  977. }
  978. if (ctf_writer__init_data(cw))
  979. goto err_cleanup;
  980. /* Add cpu_id for packet context */
  981. pkt_ctx_type = bt_ctf_stream_class_get_packet_context_type(stream_class);
  982. if (!pkt_ctx_type)
  983. goto err_cleanup;
  984. ret = bt_ctf_field_type_structure_add_field(pkt_ctx_type, cw->data.u32, "cpu_id");
  985. bt_ctf_field_type_put(pkt_ctx_type);
  986. if (ret)
  987. goto err_cleanup;
  988. /* CTF clock writer setup */
  989. if (bt_ctf_writer_add_clock(writer, clock)) {
  990. pr("Failed to assign CTF clock to writer.\n");
  991. goto err_cleanup;
  992. }
  993. return 0;
  994. err_cleanup:
  995. ctf_writer__cleanup(cw);
  996. err:
  997. pr_err("Failed to setup CTF writer.\n");
  998. return -1;
  999. }
  1000. static int ctf_writer__flush_streams(struct ctf_writer *cw)
  1001. {
  1002. int cpu, ret = 0;
  1003. for (cpu = 0; cpu < cw->stream_cnt && !ret; cpu++)
  1004. ret = ctf_stream__flush(cw->stream[cpu]);
  1005. return ret;
  1006. }
  1007. static int convert__config(const char *var, const char *value, void *cb)
  1008. {
  1009. struct convert *c = cb;
  1010. if (!strcmp(var, "convert.queue-size")) {
  1011. c->queue_size = perf_config_u64(var, value);
  1012. return 0;
  1013. }
  1014. return 0;
  1015. }
  1016. int bt_convert__perf2ctf(const char *input, const char *path, bool force)
  1017. {
  1018. struct perf_session *session;
  1019. struct perf_data_file file = {
  1020. .path = input,
  1021. .mode = PERF_DATA_MODE_READ,
  1022. .force = force,
  1023. };
  1024. struct convert c = {
  1025. .tool = {
  1026. .sample = process_sample_event,
  1027. .mmap = perf_event__process_mmap,
  1028. .mmap2 = perf_event__process_mmap2,
  1029. .comm = perf_event__process_comm,
  1030. .exit = perf_event__process_exit,
  1031. .fork = perf_event__process_fork,
  1032. .lost = perf_event__process_lost,
  1033. .tracing_data = perf_event__process_tracing_data,
  1034. .build_id = perf_event__process_build_id,
  1035. .ordered_events = true,
  1036. .ordering_requires_timestamps = true,
  1037. },
  1038. };
  1039. struct ctf_writer *cw = &c.writer;
  1040. int err = -1;
  1041. perf_config(convert__config, &c);
  1042. /* CTF writer */
  1043. if (ctf_writer__init(cw, path))
  1044. return -1;
  1045. /* perf.data session */
  1046. session = perf_session__new(&file, 0, &c.tool);
  1047. if (!session)
  1048. goto free_writer;
  1049. if (c.queue_size) {
  1050. ordered_events__set_alloc_size(&session->ordered_events,
  1051. c.queue_size);
  1052. }
  1053. /* CTF writer env/clock setup */
  1054. if (ctf_writer__setup_env(cw, session))
  1055. goto free_session;
  1056. /* CTF events setup */
  1057. if (setup_events(cw, session))
  1058. goto free_session;
  1059. if (setup_streams(cw, session))
  1060. goto free_session;
  1061. err = perf_session__process_events(session);
  1062. if (!err)
  1063. err = ctf_writer__flush_streams(cw);
  1064. else
  1065. pr_err("Error during conversion.\n");
  1066. fprintf(stderr,
  1067. "[ perf data convert: Converted '%s' into CTF data '%s' ]\n",
  1068. file.path, path);
  1069. fprintf(stderr,
  1070. "[ perf data convert: Converted and wrote %.3f MB (%" PRIu64 " samples) ]\n",
  1071. (double) c.events_size / 1024.0 / 1024.0,
  1072. c.events_count);
  1073. cleanup_events(session);
  1074. perf_session__delete(session);
  1075. ctf_writer__cleanup(cw);
  1076. return err;
  1077. free_session:
  1078. perf_session__delete(session);
  1079. free_writer:
  1080. ctf_writer__cleanup(cw);
  1081. pr_err("Error during conversion setup.\n");
  1082. return err;
  1083. }