cs-etm.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright(C) 2015 Linaro Limited. All rights reserved.
  4. * Author: Mathieu Poirier <mathieu.poirier@linaro.org>
  5. */
  6. #include <api/fs/fs.h>
  7. #include <linux/bitops.h>
  8. #include <linux/compiler.h>
  9. #include <linux/coresight-pmu.h>
  10. #include <linux/kernel.h>
  11. #include <linux/log2.h>
  12. #include <linux/types.h>
  13. #include "cs-etm.h"
  14. #include "../../perf.h"
  15. #include "../../util/auxtrace.h"
  16. #include "../../util/cpumap.h"
  17. #include "../../util/evlist.h"
  18. #include "../../util/evsel.h"
  19. #include "../../util/pmu.h"
  20. #include "../../util/thread_map.h"
  21. #include "../../util/cs-etm.h"
  22. #include <stdlib.h>
  23. #include <sys/stat.h>
  24. #define ENABLE_SINK_MAX 128
  25. #define CS_BUS_DEVICE_PATH "/bus/coresight/devices/"
  26. struct cs_etm_recording {
  27. struct auxtrace_record itr;
  28. struct perf_pmu *cs_etm_pmu;
  29. struct perf_evlist *evlist;
  30. bool snapshot_mode;
  31. size_t snapshot_size;
  32. };
  33. static bool cs_etm_is_etmv4(struct auxtrace_record *itr, int cpu);
  34. static int cs_etm_parse_snapshot_options(struct auxtrace_record *itr,
  35. struct record_opts *opts,
  36. const char *str)
  37. {
  38. struct cs_etm_recording *ptr =
  39. container_of(itr, struct cs_etm_recording, itr);
  40. unsigned long long snapshot_size = 0;
  41. char *endptr;
  42. if (str) {
  43. snapshot_size = strtoull(str, &endptr, 0);
  44. if (*endptr || snapshot_size > SIZE_MAX)
  45. return -1;
  46. }
  47. opts->auxtrace_snapshot_mode = true;
  48. opts->auxtrace_snapshot_size = snapshot_size;
  49. ptr->snapshot_size = snapshot_size;
  50. return 0;
  51. }
  52. static int cs_etm_recording_options(struct auxtrace_record *itr,
  53. struct perf_evlist *evlist,
  54. struct record_opts *opts)
  55. {
  56. struct cs_etm_recording *ptr =
  57. container_of(itr, struct cs_etm_recording, itr);
  58. struct perf_pmu *cs_etm_pmu = ptr->cs_etm_pmu;
  59. struct perf_evsel *evsel, *cs_etm_evsel = NULL;
  60. const struct cpu_map *cpus = evlist->cpus;
  61. bool privileged = (geteuid() == 0 || perf_event_paranoid() < 0);
  62. ptr->evlist = evlist;
  63. ptr->snapshot_mode = opts->auxtrace_snapshot_mode;
  64. evlist__for_each_entry(evlist, evsel) {
  65. if (evsel->attr.type == cs_etm_pmu->type) {
  66. if (cs_etm_evsel) {
  67. pr_err("There may be only one %s event\n",
  68. CORESIGHT_ETM_PMU_NAME);
  69. return -EINVAL;
  70. }
  71. evsel->attr.freq = 0;
  72. evsel->attr.sample_period = 1;
  73. cs_etm_evsel = evsel;
  74. opts->full_auxtrace = true;
  75. }
  76. }
  77. /* no need to continue if at least one event of interest was found */
  78. if (!cs_etm_evsel)
  79. return 0;
  80. if (opts->use_clockid) {
  81. pr_err("Cannot use clockid (-k option) with %s\n",
  82. CORESIGHT_ETM_PMU_NAME);
  83. return -EINVAL;
  84. }
  85. /* we are in snapshot mode */
  86. if (opts->auxtrace_snapshot_mode) {
  87. /*
  88. * No size were given to '-S' or '-m,', so go with
  89. * the default
  90. */
  91. if (!opts->auxtrace_snapshot_size &&
  92. !opts->auxtrace_mmap_pages) {
  93. if (privileged) {
  94. opts->auxtrace_mmap_pages = MiB(4) / page_size;
  95. } else {
  96. opts->auxtrace_mmap_pages =
  97. KiB(128) / page_size;
  98. if (opts->mmap_pages == UINT_MAX)
  99. opts->mmap_pages = KiB(256) / page_size;
  100. }
  101. } else if (!opts->auxtrace_mmap_pages && !privileged &&
  102. opts->mmap_pages == UINT_MAX) {
  103. opts->mmap_pages = KiB(256) / page_size;
  104. }
  105. /*
  106. * '-m,xyz' was specified but no snapshot size, so make the
  107. * snapshot size as big as the auxtrace mmap area.
  108. */
  109. if (!opts->auxtrace_snapshot_size) {
  110. opts->auxtrace_snapshot_size =
  111. opts->auxtrace_mmap_pages * (size_t)page_size;
  112. }
  113. /*
  114. * -Sxyz was specified but no auxtrace mmap area, so make the
  115. * auxtrace mmap area big enough to fit the requested snapshot
  116. * size.
  117. */
  118. if (!opts->auxtrace_mmap_pages) {
  119. size_t sz = opts->auxtrace_snapshot_size;
  120. sz = round_up(sz, page_size) / page_size;
  121. opts->auxtrace_mmap_pages = roundup_pow_of_two(sz);
  122. }
  123. /* Snapshost size can't be bigger than the auxtrace area */
  124. if (opts->auxtrace_snapshot_size >
  125. opts->auxtrace_mmap_pages * (size_t)page_size) {
  126. pr_err("Snapshot size %zu must not be greater than AUX area tracing mmap size %zu\n",
  127. opts->auxtrace_snapshot_size,
  128. opts->auxtrace_mmap_pages * (size_t)page_size);
  129. return -EINVAL;
  130. }
  131. /* Something went wrong somewhere - this shouldn't happen */
  132. if (!opts->auxtrace_snapshot_size ||
  133. !opts->auxtrace_mmap_pages) {
  134. pr_err("Failed to calculate default snapshot size and/or AUX area tracing mmap pages\n");
  135. return -EINVAL;
  136. }
  137. }
  138. /* We are in full trace mode but '-m,xyz' wasn't specified */
  139. if (opts->full_auxtrace && !opts->auxtrace_mmap_pages) {
  140. if (privileged) {
  141. opts->auxtrace_mmap_pages = MiB(4) / page_size;
  142. } else {
  143. opts->auxtrace_mmap_pages = KiB(128) / page_size;
  144. if (opts->mmap_pages == UINT_MAX)
  145. opts->mmap_pages = KiB(256) / page_size;
  146. }
  147. }
  148. /* Validate auxtrace_mmap_pages provided by user */
  149. if (opts->auxtrace_mmap_pages) {
  150. unsigned int max_page = (KiB(128) / page_size);
  151. size_t sz = opts->auxtrace_mmap_pages * (size_t)page_size;
  152. if (!privileged &&
  153. opts->auxtrace_mmap_pages > max_page) {
  154. opts->auxtrace_mmap_pages = max_page;
  155. pr_err("auxtrace too big, truncating to %d\n",
  156. max_page);
  157. }
  158. if (!is_power_of_2(sz)) {
  159. pr_err("Invalid mmap size for %s: must be a power of 2\n",
  160. CORESIGHT_ETM_PMU_NAME);
  161. return -EINVAL;
  162. }
  163. }
  164. if (opts->auxtrace_snapshot_mode)
  165. pr_debug2("%s snapshot size: %zu\n", CORESIGHT_ETM_PMU_NAME,
  166. opts->auxtrace_snapshot_size);
  167. /*
  168. * To obtain the auxtrace buffer file descriptor, the auxtrace
  169. * event must come first.
  170. */
  171. perf_evlist__to_front(evlist, cs_etm_evsel);
  172. /*
  173. * In the case of per-cpu mmaps, we need the CPU on the
  174. * AUX event.
  175. */
  176. if (!cpu_map__empty(cpus))
  177. perf_evsel__set_sample_bit(cs_etm_evsel, CPU);
  178. /* Add dummy event to keep tracking */
  179. if (opts->full_auxtrace) {
  180. struct perf_evsel *tracking_evsel;
  181. int err;
  182. err = parse_events(evlist, "dummy:u", NULL);
  183. if (err)
  184. return err;
  185. tracking_evsel = perf_evlist__last(evlist);
  186. perf_evlist__set_tracking_event(evlist, tracking_evsel);
  187. tracking_evsel->attr.freq = 0;
  188. tracking_evsel->attr.sample_period = 1;
  189. /* In per-cpu case, always need the time of mmap events etc */
  190. if (!cpu_map__empty(cpus))
  191. perf_evsel__set_sample_bit(tracking_evsel, TIME);
  192. }
  193. return 0;
  194. }
  195. static u64 cs_etm_get_config(struct auxtrace_record *itr)
  196. {
  197. u64 config = 0;
  198. struct cs_etm_recording *ptr =
  199. container_of(itr, struct cs_etm_recording, itr);
  200. struct perf_pmu *cs_etm_pmu = ptr->cs_etm_pmu;
  201. struct perf_evlist *evlist = ptr->evlist;
  202. struct perf_evsel *evsel;
  203. evlist__for_each_entry(evlist, evsel) {
  204. if (evsel->attr.type == cs_etm_pmu->type) {
  205. /*
  206. * Variable perf_event_attr::config is assigned to
  207. * ETMv3/PTM. The bit fields have been made to match
  208. * the ETMv3.5 ETRMCR register specification. See the
  209. * PMU_FORMAT_ATTR() declarations in
  210. * drivers/hwtracing/coresight/coresight-perf.c for
  211. * details.
  212. */
  213. config = evsel->attr.config;
  214. break;
  215. }
  216. }
  217. return config;
  218. }
  219. #ifndef BIT
  220. #define BIT(N) (1UL << (N))
  221. #endif
  222. static u64 cs_etmv4_get_config(struct auxtrace_record *itr)
  223. {
  224. u64 config = 0;
  225. u64 config_opts = 0;
  226. /*
  227. * The perf event variable config bits represent both
  228. * the command line options and register programming
  229. * bits in ETMv3/PTM. For ETMv4 we must remap options
  230. * to real bits
  231. */
  232. config_opts = cs_etm_get_config(itr);
  233. if (config_opts & BIT(ETM_OPT_CYCACC))
  234. config |= BIT(ETM4_CFG_BIT_CYCACC);
  235. if (config_opts & BIT(ETM_OPT_TS))
  236. config |= BIT(ETM4_CFG_BIT_TS);
  237. if (config_opts & BIT(ETM_OPT_RETSTK))
  238. config |= BIT(ETM4_CFG_BIT_RETSTK);
  239. return config;
  240. }
  241. static size_t
  242. cs_etm_info_priv_size(struct auxtrace_record *itr __maybe_unused,
  243. struct perf_evlist *evlist __maybe_unused)
  244. {
  245. int i;
  246. int etmv3 = 0, etmv4 = 0;
  247. struct cpu_map *event_cpus = evlist->cpus;
  248. struct cpu_map *online_cpus = cpu_map__new(NULL);
  249. /* cpu map is not empty, we have specific CPUs to work with */
  250. if (!cpu_map__empty(event_cpus)) {
  251. for (i = 0; i < cpu__max_cpu(); i++) {
  252. if (!cpu_map__has(event_cpus, i) ||
  253. !cpu_map__has(online_cpus, i))
  254. continue;
  255. if (cs_etm_is_etmv4(itr, i))
  256. etmv4++;
  257. else
  258. etmv3++;
  259. }
  260. } else {
  261. /* get configuration for all CPUs in the system */
  262. for (i = 0; i < cpu__max_cpu(); i++) {
  263. if (!cpu_map__has(online_cpus, i))
  264. continue;
  265. if (cs_etm_is_etmv4(itr, i))
  266. etmv4++;
  267. else
  268. etmv3++;
  269. }
  270. }
  271. cpu_map__put(online_cpus);
  272. return (CS_ETM_HEADER_SIZE +
  273. (etmv4 * CS_ETMV4_PRIV_SIZE) +
  274. (etmv3 * CS_ETMV3_PRIV_SIZE));
  275. }
  276. static const char *metadata_etmv3_ro[CS_ETM_PRIV_MAX] = {
  277. [CS_ETM_ETMCCER] = "mgmt/etmccer",
  278. [CS_ETM_ETMIDR] = "mgmt/etmidr",
  279. };
  280. static const char *metadata_etmv4_ro[CS_ETMV4_PRIV_MAX] = {
  281. [CS_ETMV4_TRCIDR0] = "trcidr/trcidr0",
  282. [CS_ETMV4_TRCIDR1] = "trcidr/trcidr1",
  283. [CS_ETMV4_TRCIDR2] = "trcidr/trcidr2",
  284. [CS_ETMV4_TRCIDR8] = "trcidr/trcidr8",
  285. [CS_ETMV4_TRCAUTHSTATUS] = "mgmt/trcauthstatus",
  286. };
  287. static bool cs_etm_is_etmv4(struct auxtrace_record *itr, int cpu)
  288. {
  289. bool ret = false;
  290. char path[PATH_MAX];
  291. int scan;
  292. unsigned int val;
  293. struct cs_etm_recording *ptr =
  294. container_of(itr, struct cs_etm_recording, itr);
  295. struct perf_pmu *cs_etm_pmu = ptr->cs_etm_pmu;
  296. /* Take any of the RO files for ETMv4 and see if it present */
  297. snprintf(path, PATH_MAX, "cpu%d/%s",
  298. cpu, metadata_etmv4_ro[CS_ETMV4_TRCIDR0]);
  299. scan = perf_pmu__scan_file(cs_etm_pmu, path, "%x", &val);
  300. /* The file was read successfully, we have a winner */
  301. if (scan == 1)
  302. ret = true;
  303. return ret;
  304. }
  305. static int cs_etm_get_ro(struct perf_pmu *pmu, int cpu, const char *path)
  306. {
  307. char pmu_path[PATH_MAX];
  308. int scan;
  309. unsigned int val = 0;
  310. /* Get RO metadata from sysfs */
  311. snprintf(pmu_path, PATH_MAX, "cpu%d/%s", cpu, path);
  312. scan = perf_pmu__scan_file(pmu, pmu_path, "%x", &val);
  313. if (scan != 1)
  314. pr_err("%s: error reading: %s\n", __func__, pmu_path);
  315. return val;
  316. }
  317. static void cs_etm_get_metadata(int cpu, u32 *offset,
  318. struct auxtrace_record *itr,
  319. struct auxtrace_info_event *info)
  320. {
  321. u32 increment;
  322. u64 magic;
  323. struct cs_etm_recording *ptr =
  324. container_of(itr, struct cs_etm_recording, itr);
  325. struct perf_pmu *cs_etm_pmu = ptr->cs_etm_pmu;
  326. /* first see what kind of tracer this cpu is affined to */
  327. if (cs_etm_is_etmv4(itr, cpu)) {
  328. magic = __perf_cs_etmv4_magic;
  329. /* Get trace configuration register */
  330. info->priv[*offset + CS_ETMV4_TRCCONFIGR] =
  331. cs_etmv4_get_config(itr);
  332. /* Get traceID from the framework */
  333. info->priv[*offset + CS_ETMV4_TRCTRACEIDR] =
  334. coresight_get_trace_id(cpu);
  335. /* Get read-only information from sysFS */
  336. info->priv[*offset + CS_ETMV4_TRCIDR0] =
  337. cs_etm_get_ro(cs_etm_pmu, cpu,
  338. metadata_etmv4_ro[CS_ETMV4_TRCIDR0]);
  339. info->priv[*offset + CS_ETMV4_TRCIDR1] =
  340. cs_etm_get_ro(cs_etm_pmu, cpu,
  341. metadata_etmv4_ro[CS_ETMV4_TRCIDR1]);
  342. info->priv[*offset + CS_ETMV4_TRCIDR2] =
  343. cs_etm_get_ro(cs_etm_pmu, cpu,
  344. metadata_etmv4_ro[CS_ETMV4_TRCIDR2]);
  345. info->priv[*offset + CS_ETMV4_TRCIDR8] =
  346. cs_etm_get_ro(cs_etm_pmu, cpu,
  347. metadata_etmv4_ro[CS_ETMV4_TRCIDR8]);
  348. info->priv[*offset + CS_ETMV4_TRCAUTHSTATUS] =
  349. cs_etm_get_ro(cs_etm_pmu, cpu,
  350. metadata_etmv4_ro
  351. [CS_ETMV4_TRCAUTHSTATUS]);
  352. /* How much space was used */
  353. increment = CS_ETMV4_PRIV_MAX;
  354. } else {
  355. magic = __perf_cs_etmv3_magic;
  356. /* Get configuration register */
  357. info->priv[*offset + CS_ETM_ETMCR] = cs_etm_get_config(itr);
  358. /* Get traceID from the framework */
  359. info->priv[*offset + CS_ETM_ETMTRACEIDR] =
  360. coresight_get_trace_id(cpu);
  361. /* Get read-only information from sysFS */
  362. info->priv[*offset + CS_ETM_ETMCCER] =
  363. cs_etm_get_ro(cs_etm_pmu, cpu,
  364. metadata_etmv3_ro[CS_ETM_ETMCCER]);
  365. info->priv[*offset + CS_ETM_ETMIDR] =
  366. cs_etm_get_ro(cs_etm_pmu, cpu,
  367. metadata_etmv3_ro[CS_ETM_ETMIDR]);
  368. /* How much space was used */
  369. increment = CS_ETM_PRIV_MAX;
  370. }
  371. /* Build generic header portion */
  372. info->priv[*offset + CS_ETM_MAGIC] = magic;
  373. info->priv[*offset + CS_ETM_CPU] = cpu;
  374. /* Where the next CPU entry should start from */
  375. *offset += increment;
  376. }
  377. static int cs_etm_info_fill(struct auxtrace_record *itr,
  378. struct perf_session *session,
  379. struct auxtrace_info_event *info,
  380. size_t priv_size)
  381. {
  382. int i;
  383. u32 offset;
  384. u64 nr_cpu, type;
  385. struct cpu_map *cpu_map;
  386. struct cpu_map *event_cpus = session->evlist->cpus;
  387. struct cpu_map *online_cpus = cpu_map__new(NULL);
  388. struct cs_etm_recording *ptr =
  389. container_of(itr, struct cs_etm_recording, itr);
  390. struct perf_pmu *cs_etm_pmu = ptr->cs_etm_pmu;
  391. if (priv_size != cs_etm_info_priv_size(itr, session->evlist))
  392. return -EINVAL;
  393. if (!session->evlist->nr_mmaps)
  394. return -EINVAL;
  395. /* If the cpu_map is empty all online CPUs are involved */
  396. if (cpu_map__empty(event_cpus)) {
  397. cpu_map = online_cpus;
  398. } else {
  399. /* Make sure all specified CPUs are online */
  400. for (i = 0; i < cpu_map__nr(event_cpus); i++) {
  401. if (cpu_map__has(event_cpus, i) &&
  402. !cpu_map__has(online_cpus, i))
  403. return -EINVAL;
  404. }
  405. cpu_map = event_cpus;
  406. }
  407. nr_cpu = cpu_map__nr(cpu_map);
  408. /* Get PMU type as dynamically assigned by the core */
  409. type = cs_etm_pmu->type;
  410. /* First fill out the session header */
  411. info->type = PERF_AUXTRACE_CS_ETM;
  412. info->priv[CS_HEADER_VERSION_0] = 0;
  413. info->priv[CS_PMU_TYPE_CPUS] = type << 32;
  414. info->priv[CS_PMU_TYPE_CPUS] |= nr_cpu;
  415. info->priv[CS_ETM_SNAPSHOT] = ptr->snapshot_mode;
  416. offset = CS_ETM_SNAPSHOT + 1;
  417. for (i = 0; i < cpu__max_cpu() && offset < priv_size; i++)
  418. if (cpu_map__has(cpu_map, i))
  419. cs_etm_get_metadata(i, &offset, itr, info);
  420. cpu_map__put(online_cpus);
  421. return 0;
  422. }
  423. static int cs_etm_find_snapshot(struct auxtrace_record *itr __maybe_unused,
  424. int idx, struct auxtrace_mmap *mm,
  425. unsigned char *data __maybe_unused,
  426. u64 *head, u64 *old)
  427. {
  428. pr_debug3("%s: mmap index %d old head %zu new head %zu size %zu\n",
  429. __func__, idx, (size_t)*old, (size_t)*head, mm->len);
  430. *old = *head;
  431. *head += mm->len;
  432. return 0;
  433. }
  434. static int cs_etm_snapshot_start(struct auxtrace_record *itr)
  435. {
  436. struct cs_etm_recording *ptr =
  437. container_of(itr, struct cs_etm_recording, itr);
  438. struct perf_evsel *evsel;
  439. evlist__for_each_entry(ptr->evlist, evsel) {
  440. if (evsel->attr.type == ptr->cs_etm_pmu->type)
  441. return perf_evsel__disable(evsel);
  442. }
  443. return -EINVAL;
  444. }
  445. static int cs_etm_snapshot_finish(struct auxtrace_record *itr)
  446. {
  447. struct cs_etm_recording *ptr =
  448. container_of(itr, struct cs_etm_recording, itr);
  449. struct perf_evsel *evsel;
  450. evlist__for_each_entry(ptr->evlist, evsel) {
  451. if (evsel->attr.type == ptr->cs_etm_pmu->type)
  452. return perf_evsel__enable(evsel);
  453. }
  454. return -EINVAL;
  455. }
  456. static u64 cs_etm_reference(struct auxtrace_record *itr __maybe_unused)
  457. {
  458. return (((u64) rand() << 0) & 0x00000000FFFFFFFFull) |
  459. (((u64) rand() << 32) & 0xFFFFFFFF00000000ull);
  460. }
  461. static void cs_etm_recording_free(struct auxtrace_record *itr)
  462. {
  463. struct cs_etm_recording *ptr =
  464. container_of(itr, struct cs_etm_recording, itr);
  465. free(ptr);
  466. }
  467. static int cs_etm_read_finish(struct auxtrace_record *itr, int idx)
  468. {
  469. struct cs_etm_recording *ptr =
  470. container_of(itr, struct cs_etm_recording, itr);
  471. struct perf_evsel *evsel;
  472. evlist__for_each_entry(ptr->evlist, evsel) {
  473. if (evsel->attr.type == ptr->cs_etm_pmu->type)
  474. return perf_evlist__enable_event_idx(ptr->evlist,
  475. evsel, idx);
  476. }
  477. return -EINVAL;
  478. }
  479. struct auxtrace_record *cs_etm_record_init(int *err)
  480. {
  481. struct perf_pmu *cs_etm_pmu;
  482. struct cs_etm_recording *ptr;
  483. cs_etm_pmu = perf_pmu__find(CORESIGHT_ETM_PMU_NAME);
  484. if (!cs_etm_pmu) {
  485. *err = -EINVAL;
  486. goto out;
  487. }
  488. ptr = zalloc(sizeof(struct cs_etm_recording));
  489. if (!ptr) {
  490. *err = -ENOMEM;
  491. goto out;
  492. }
  493. ptr->cs_etm_pmu = cs_etm_pmu;
  494. ptr->itr.parse_snapshot_options = cs_etm_parse_snapshot_options;
  495. ptr->itr.recording_options = cs_etm_recording_options;
  496. ptr->itr.info_priv_size = cs_etm_info_priv_size;
  497. ptr->itr.info_fill = cs_etm_info_fill;
  498. ptr->itr.find_snapshot = cs_etm_find_snapshot;
  499. ptr->itr.snapshot_start = cs_etm_snapshot_start;
  500. ptr->itr.snapshot_finish = cs_etm_snapshot_finish;
  501. ptr->itr.reference = cs_etm_reference;
  502. ptr->itr.free = cs_etm_recording_free;
  503. ptr->itr.read_finish = cs_etm_read_finish;
  504. *err = 0;
  505. return &ptr->itr;
  506. out:
  507. return NULL;
  508. }
  509. static FILE *cs_device__open_file(const char *name)
  510. {
  511. struct stat st;
  512. char path[PATH_MAX];
  513. const char *sysfs;
  514. sysfs = sysfs__mountpoint();
  515. if (!sysfs)
  516. return NULL;
  517. snprintf(path, PATH_MAX,
  518. "%s" CS_BUS_DEVICE_PATH "%s", sysfs, name);
  519. if (stat(path, &st) < 0)
  520. return NULL;
  521. return fopen(path, "w");
  522. }
  523. static int __printf(2, 3) cs_device__print_file(const char *name, const char *fmt, ...)
  524. {
  525. va_list args;
  526. FILE *file;
  527. int ret = -EINVAL;
  528. va_start(args, fmt);
  529. file = cs_device__open_file(name);
  530. if (file) {
  531. ret = vfprintf(file, fmt, args);
  532. fclose(file);
  533. }
  534. va_end(args);
  535. return ret;
  536. }
  537. int cs_etm_set_drv_config(struct perf_evsel_config_term *term)
  538. {
  539. int ret;
  540. char enable_sink[ENABLE_SINK_MAX];
  541. snprintf(enable_sink, ENABLE_SINK_MAX, "%s/%s",
  542. term->val.drv_cfg, "enable_sink");
  543. ret = cs_device__print_file(enable_sink, "%d", 1);
  544. if (ret < 0)
  545. return ret;
  546. return 0;
  547. }