coresight-tmc-etf.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright(C) 2016 Linaro Limited. All rights reserved.
  4. * Author: Mathieu Poirier <mathieu.poirier@linaro.org>
  5. */
  6. #include <linux/circ_buf.h>
  7. #include <linux/coresight.h>
  8. #include <linux/perf_event.h>
  9. #include <linux/slab.h>
  10. #include "coresight-priv.h"
  11. #include "coresight-tmc.h"
  12. static void tmc_etb_enable_hw(struct tmc_drvdata *drvdata)
  13. {
  14. CS_UNLOCK(drvdata->base);
  15. /* Wait for TMCSReady bit to be set */
  16. tmc_wait_for_tmcready(drvdata);
  17. writel_relaxed(TMC_MODE_CIRCULAR_BUFFER, drvdata->base + TMC_MODE);
  18. writel_relaxed(TMC_FFCR_EN_FMT | TMC_FFCR_EN_TI |
  19. TMC_FFCR_FON_FLIN | TMC_FFCR_FON_TRIG_EVT |
  20. TMC_FFCR_TRIGON_TRIGIN,
  21. drvdata->base + TMC_FFCR);
  22. writel_relaxed(drvdata->trigger_cntr, drvdata->base + TMC_TRG);
  23. tmc_enable_hw(drvdata);
  24. CS_LOCK(drvdata->base);
  25. }
  26. static void tmc_etb_dump_hw(struct tmc_drvdata *drvdata)
  27. {
  28. bool lost = false;
  29. char *bufp;
  30. const u32 *barrier;
  31. u32 read_data, status;
  32. int i;
  33. /*
  34. * Get a hold of the status register and see if a wrap around
  35. * has occurred.
  36. */
  37. status = readl_relaxed(drvdata->base + TMC_STS);
  38. if (status & TMC_STS_FULL)
  39. lost = true;
  40. bufp = drvdata->buf;
  41. drvdata->len = 0;
  42. barrier = barrier_pkt;
  43. while (1) {
  44. for (i = 0; i < drvdata->memwidth; i++) {
  45. read_data = readl_relaxed(drvdata->base + TMC_RRD);
  46. if (read_data == 0xFFFFFFFF)
  47. return;
  48. if (lost && *barrier) {
  49. read_data = *barrier;
  50. barrier++;
  51. }
  52. memcpy(bufp, &read_data, 4);
  53. bufp += 4;
  54. drvdata->len += 4;
  55. }
  56. }
  57. }
  58. static void tmc_etb_disable_hw(struct tmc_drvdata *drvdata)
  59. {
  60. CS_UNLOCK(drvdata->base);
  61. tmc_flush_and_stop(drvdata);
  62. /*
  63. * When operating in sysFS mode the content of the buffer needs to be
  64. * read before the TMC is disabled.
  65. */
  66. if (drvdata->mode == CS_MODE_SYSFS)
  67. tmc_etb_dump_hw(drvdata);
  68. tmc_disable_hw(drvdata);
  69. CS_LOCK(drvdata->base);
  70. }
  71. static void tmc_etf_enable_hw(struct tmc_drvdata *drvdata)
  72. {
  73. CS_UNLOCK(drvdata->base);
  74. /* Wait for TMCSReady bit to be set */
  75. tmc_wait_for_tmcready(drvdata);
  76. writel_relaxed(TMC_MODE_HARDWARE_FIFO, drvdata->base + TMC_MODE);
  77. writel_relaxed(TMC_FFCR_EN_FMT | TMC_FFCR_EN_TI,
  78. drvdata->base + TMC_FFCR);
  79. writel_relaxed(0x0, drvdata->base + TMC_BUFWM);
  80. tmc_enable_hw(drvdata);
  81. CS_LOCK(drvdata->base);
  82. }
  83. static void tmc_etf_disable_hw(struct tmc_drvdata *drvdata)
  84. {
  85. CS_UNLOCK(drvdata->base);
  86. tmc_flush_and_stop(drvdata);
  87. tmc_disable_hw(drvdata);
  88. CS_LOCK(drvdata->base);
  89. }
  90. static int tmc_enable_etf_sink_sysfs(struct coresight_device *csdev)
  91. {
  92. int ret = 0;
  93. bool used = false;
  94. char *buf = NULL;
  95. unsigned long flags;
  96. struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
  97. /*
  98. * If we don't have a buffer release the lock and allocate memory.
  99. * Otherwise keep the lock and move along.
  100. */
  101. spin_lock_irqsave(&drvdata->spinlock, flags);
  102. if (!drvdata->buf) {
  103. spin_unlock_irqrestore(&drvdata->spinlock, flags);
  104. /* Allocating the memory here while outside of the spinlock */
  105. buf = kzalloc(drvdata->size, GFP_KERNEL);
  106. if (!buf)
  107. return -ENOMEM;
  108. /* Let's try again */
  109. spin_lock_irqsave(&drvdata->spinlock, flags);
  110. }
  111. if (drvdata->reading) {
  112. ret = -EBUSY;
  113. goto out;
  114. }
  115. /*
  116. * In sysFS mode we can have multiple writers per sink. Since this
  117. * sink is already enabled no memory is needed and the HW need not be
  118. * touched.
  119. */
  120. if (drvdata->mode == CS_MODE_SYSFS)
  121. goto out;
  122. /*
  123. * If drvdata::buf isn't NULL, memory was allocated for a previous
  124. * trace run but wasn't read. If so simply zero-out the memory.
  125. * Otherwise use the memory allocated above.
  126. *
  127. * The memory is freed when users read the buffer using the
  128. * /dev/xyz.{etf|etb} interface. See tmc_read_unprepare_etf() for
  129. * details.
  130. */
  131. if (drvdata->buf) {
  132. memset(drvdata->buf, 0, drvdata->size);
  133. } else {
  134. used = true;
  135. drvdata->buf = buf;
  136. }
  137. drvdata->mode = CS_MODE_SYSFS;
  138. tmc_etb_enable_hw(drvdata);
  139. out:
  140. spin_unlock_irqrestore(&drvdata->spinlock, flags);
  141. /* Free memory outside the spinlock if need be */
  142. if (!used)
  143. kfree(buf);
  144. return ret;
  145. }
  146. static int tmc_enable_etf_sink_perf(struct coresight_device *csdev)
  147. {
  148. int ret = 0;
  149. unsigned long flags;
  150. struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
  151. spin_lock_irqsave(&drvdata->spinlock, flags);
  152. if (drvdata->reading) {
  153. ret = -EINVAL;
  154. goto out;
  155. }
  156. /*
  157. * In Perf mode there can be only one writer per sink. There
  158. * is also no need to continue if the ETB/ETR is already operated
  159. * from sysFS.
  160. */
  161. if (drvdata->mode != CS_MODE_DISABLED) {
  162. ret = -EINVAL;
  163. goto out;
  164. }
  165. drvdata->mode = CS_MODE_PERF;
  166. tmc_etb_enable_hw(drvdata);
  167. out:
  168. spin_unlock_irqrestore(&drvdata->spinlock, flags);
  169. return ret;
  170. }
  171. static int tmc_enable_etf_sink(struct coresight_device *csdev, u32 mode)
  172. {
  173. int ret;
  174. struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
  175. switch (mode) {
  176. case CS_MODE_SYSFS:
  177. ret = tmc_enable_etf_sink_sysfs(csdev);
  178. break;
  179. case CS_MODE_PERF:
  180. ret = tmc_enable_etf_sink_perf(csdev);
  181. break;
  182. /* We shouldn't be here */
  183. default:
  184. ret = -EINVAL;
  185. break;
  186. }
  187. if (ret)
  188. return ret;
  189. dev_info(drvdata->dev, "TMC-ETB/ETF enabled\n");
  190. return 0;
  191. }
  192. static void tmc_disable_etf_sink(struct coresight_device *csdev)
  193. {
  194. unsigned long flags;
  195. struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
  196. spin_lock_irqsave(&drvdata->spinlock, flags);
  197. if (drvdata->reading) {
  198. spin_unlock_irqrestore(&drvdata->spinlock, flags);
  199. return;
  200. }
  201. /* Disable the TMC only if it needs to */
  202. if (drvdata->mode != CS_MODE_DISABLED) {
  203. tmc_etb_disable_hw(drvdata);
  204. drvdata->mode = CS_MODE_DISABLED;
  205. }
  206. spin_unlock_irqrestore(&drvdata->spinlock, flags);
  207. dev_info(drvdata->dev, "TMC-ETB/ETF disabled\n");
  208. }
  209. static int tmc_enable_etf_link(struct coresight_device *csdev,
  210. int inport, int outport)
  211. {
  212. unsigned long flags;
  213. struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
  214. spin_lock_irqsave(&drvdata->spinlock, flags);
  215. if (drvdata->reading) {
  216. spin_unlock_irqrestore(&drvdata->spinlock, flags);
  217. return -EBUSY;
  218. }
  219. tmc_etf_enable_hw(drvdata);
  220. drvdata->mode = CS_MODE_SYSFS;
  221. spin_unlock_irqrestore(&drvdata->spinlock, flags);
  222. dev_info(drvdata->dev, "TMC-ETF enabled\n");
  223. return 0;
  224. }
  225. static void tmc_disable_etf_link(struct coresight_device *csdev,
  226. int inport, int outport)
  227. {
  228. unsigned long flags;
  229. struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
  230. spin_lock_irqsave(&drvdata->spinlock, flags);
  231. if (drvdata->reading) {
  232. spin_unlock_irqrestore(&drvdata->spinlock, flags);
  233. return;
  234. }
  235. tmc_etf_disable_hw(drvdata);
  236. drvdata->mode = CS_MODE_DISABLED;
  237. spin_unlock_irqrestore(&drvdata->spinlock, flags);
  238. dev_info(drvdata->dev, "TMC-ETF disabled\n");
  239. }
  240. static void *tmc_alloc_etf_buffer(struct coresight_device *csdev, int cpu,
  241. void **pages, int nr_pages, bool overwrite)
  242. {
  243. int node;
  244. struct cs_buffers *buf;
  245. if (cpu == -1)
  246. cpu = smp_processor_id();
  247. node = cpu_to_node(cpu);
  248. /* Allocate memory structure for interaction with Perf */
  249. buf = kzalloc_node(sizeof(struct cs_buffers), GFP_KERNEL, node);
  250. if (!buf)
  251. return NULL;
  252. buf->snapshot = overwrite;
  253. buf->nr_pages = nr_pages;
  254. buf->data_pages = pages;
  255. return buf;
  256. }
  257. static void tmc_free_etf_buffer(void *config)
  258. {
  259. struct cs_buffers *buf = config;
  260. kfree(buf);
  261. }
  262. static int tmc_set_etf_buffer(struct coresight_device *csdev,
  263. struct perf_output_handle *handle,
  264. void *sink_config)
  265. {
  266. int ret = 0;
  267. unsigned long head;
  268. struct cs_buffers *buf = sink_config;
  269. /* wrap head around to the amount of space we have */
  270. head = handle->head & ((buf->nr_pages << PAGE_SHIFT) - 1);
  271. /* find the page to write to */
  272. buf->cur = head / PAGE_SIZE;
  273. /* and offset within that page */
  274. buf->offset = head % PAGE_SIZE;
  275. local_set(&buf->data_size, 0);
  276. return ret;
  277. }
  278. static unsigned long tmc_reset_etf_buffer(struct coresight_device *csdev,
  279. struct perf_output_handle *handle,
  280. void *sink_config)
  281. {
  282. long size = 0;
  283. struct cs_buffers *buf = sink_config;
  284. if (buf) {
  285. /*
  286. * In snapshot mode ->data_size holds the new address of the
  287. * ring buffer's head. The size itself is the whole address
  288. * range since we want the latest information.
  289. */
  290. if (buf->snapshot)
  291. handle->head = local_xchg(&buf->data_size,
  292. buf->nr_pages << PAGE_SHIFT);
  293. /*
  294. * Tell the tracer PMU how much we got in this run and if
  295. * something went wrong along the way. Nobody else can use
  296. * this cs_buffers instance until we are done. As such
  297. * resetting parameters here and squaring off with the ring
  298. * buffer API in the tracer PMU is fine.
  299. */
  300. size = local_xchg(&buf->data_size, 0);
  301. }
  302. return size;
  303. }
  304. static void tmc_update_etf_buffer(struct coresight_device *csdev,
  305. struct perf_output_handle *handle,
  306. void *sink_config)
  307. {
  308. bool lost = false;
  309. int i, cur;
  310. const u32 *barrier;
  311. u32 *buf_ptr;
  312. u64 read_ptr, write_ptr;
  313. u32 status, to_read;
  314. unsigned long offset;
  315. struct cs_buffers *buf = sink_config;
  316. struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
  317. if (!buf)
  318. return;
  319. /* This shouldn't happen */
  320. if (WARN_ON_ONCE(drvdata->mode != CS_MODE_PERF))
  321. return;
  322. CS_UNLOCK(drvdata->base);
  323. tmc_flush_and_stop(drvdata);
  324. read_ptr = tmc_read_rrp(drvdata);
  325. write_ptr = tmc_read_rwp(drvdata);
  326. /*
  327. * Get a hold of the status register and see if a wrap around
  328. * has occurred. If so adjust things accordingly.
  329. */
  330. status = readl_relaxed(drvdata->base + TMC_STS);
  331. if (status & TMC_STS_FULL) {
  332. lost = true;
  333. to_read = drvdata->size;
  334. } else {
  335. to_read = CIRC_CNT(write_ptr, read_ptr, drvdata->size);
  336. }
  337. /*
  338. * The TMC RAM buffer may be bigger than the space available in the
  339. * perf ring buffer (handle->size). If so advance the RRP so that we
  340. * get the latest trace data.
  341. */
  342. if (to_read > handle->size) {
  343. u32 mask = 0;
  344. /*
  345. * The value written to RRP must be byte-address aligned to
  346. * the width of the trace memory databus _and_ to a frame
  347. * boundary (16 byte), whichever is the biggest. For example,
  348. * for 32-bit, 64-bit and 128-bit wide trace memory, the four
  349. * LSBs must be 0s. For 256-bit wide trace memory, the five
  350. * LSBs must be 0s.
  351. */
  352. switch (drvdata->memwidth) {
  353. case TMC_MEM_INTF_WIDTH_32BITS:
  354. case TMC_MEM_INTF_WIDTH_64BITS:
  355. case TMC_MEM_INTF_WIDTH_128BITS:
  356. mask = GENMASK(31, 5);
  357. break;
  358. case TMC_MEM_INTF_WIDTH_256BITS:
  359. mask = GENMASK(31, 6);
  360. break;
  361. }
  362. /*
  363. * Make sure the new size is aligned in accordance with the
  364. * requirement explained above.
  365. */
  366. to_read = handle->size & mask;
  367. /* Move the RAM read pointer up */
  368. read_ptr = (write_ptr + drvdata->size) - to_read;
  369. /* Make sure we are still within our limits */
  370. if (read_ptr > (drvdata->size - 1))
  371. read_ptr -= drvdata->size;
  372. /* Tell the HW */
  373. tmc_write_rrp(drvdata, read_ptr);
  374. lost = true;
  375. }
  376. if (lost)
  377. perf_aux_output_flag(handle, PERF_AUX_FLAG_TRUNCATED);
  378. cur = buf->cur;
  379. offset = buf->offset;
  380. barrier = barrier_pkt;
  381. /* for every byte to read */
  382. for (i = 0; i < to_read; i += 4) {
  383. buf_ptr = buf->data_pages[cur] + offset;
  384. *buf_ptr = readl_relaxed(drvdata->base + TMC_RRD);
  385. if (lost && *barrier) {
  386. *buf_ptr = *barrier;
  387. barrier++;
  388. }
  389. offset += 4;
  390. if (offset >= PAGE_SIZE) {
  391. offset = 0;
  392. cur++;
  393. /* wrap around at the end of the buffer */
  394. cur &= buf->nr_pages - 1;
  395. }
  396. }
  397. /*
  398. * In snapshot mode all we have to do is communicate to
  399. * perf_aux_output_end() the address of the current head. In full
  400. * trace mode the same function expects a size to move rb->aux_head
  401. * forward.
  402. */
  403. if (buf->snapshot)
  404. local_set(&buf->data_size, (cur * PAGE_SIZE) + offset);
  405. else
  406. local_add(to_read, &buf->data_size);
  407. CS_LOCK(drvdata->base);
  408. }
  409. static const struct coresight_ops_sink tmc_etf_sink_ops = {
  410. .enable = tmc_enable_etf_sink,
  411. .disable = tmc_disable_etf_sink,
  412. .alloc_buffer = tmc_alloc_etf_buffer,
  413. .free_buffer = tmc_free_etf_buffer,
  414. .set_buffer = tmc_set_etf_buffer,
  415. .reset_buffer = tmc_reset_etf_buffer,
  416. .update_buffer = tmc_update_etf_buffer,
  417. };
  418. static const struct coresight_ops_link tmc_etf_link_ops = {
  419. .enable = tmc_enable_etf_link,
  420. .disable = tmc_disable_etf_link,
  421. };
  422. const struct coresight_ops tmc_etb_cs_ops = {
  423. .sink_ops = &tmc_etf_sink_ops,
  424. };
  425. const struct coresight_ops tmc_etf_cs_ops = {
  426. .sink_ops = &tmc_etf_sink_ops,
  427. .link_ops = &tmc_etf_link_ops,
  428. };
  429. int tmc_read_prepare_etb(struct tmc_drvdata *drvdata)
  430. {
  431. enum tmc_mode mode;
  432. int ret = 0;
  433. unsigned long flags;
  434. /* config types are set a boot time and never change */
  435. if (WARN_ON_ONCE(drvdata->config_type != TMC_CONFIG_TYPE_ETB &&
  436. drvdata->config_type != TMC_CONFIG_TYPE_ETF))
  437. return -EINVAL;
  438. spin_lock_irqsave(&drvdata->spinlock, flags);
  439. if (drvdata->reading) {
  440. ret = -EBUSY;
  441. goto out;
  442. }
  443. /* There is no point in reading a TMC in HW FIFO mode */
  444. mode = readl_relaxed(drvdata->base + TMC_MODE);
  445. if (mode != TMC_MODE_CIRCULAR_BUFFER) {
  446. ret = -EINVAL;
  447. goto out;
  448. }
  449. /* Don't interfere if operated from Perf */
  450. if (drvdata->mode == CS_MODE_PERF) {
  451. ret = -EINVAL;
  452. goto out;
  453. }
  454. /* If drvdata::buf is NULL the trace data has been read already */
  455. if (drvdata->buf == NULL) {
  456. ret = -EINVAL;
  457. goto out;
  458. }
  459. /* Disable the TMC if need be */
  460. if (drvdata->mode == CS_MODE_SYSFS)
  461. tmc_etb_disable_hw(drvdata);
  462. drvdata->reading = true;
  463. out:
  464. spin_unlock_irqrestore(&drvdata->spinlock, flags);
  465. return ret;
  466. }
  467. int tmc_read_unprepare_etb(struct tmc_drvdata *drvdata)
  468. {
  469. char *buf = NULL;
  470. enum tmc_mode mode;
  471. unsigned long flags;
  472. /* config types are set a boot time and never change */
  473. if (WARN_ON_ONCE(drvdata->config_type != TMC_CONFIG_TYPE_ETB &&
  474. drvdata->config_type != TMC_CONFIG_TYPE_ETF))
  475. return -EINVAL;
  476. spin_lock_irqsave(&drvdata->spinlock, flags);
  477. /* There is no point in reading a TMC in HW FIFO mode */
  478. mode = readl_relaxed(drvdata->base + TMC_MODE);
  479. if (mode != TMC_MODE_CIRCULAR_BUFFER) {
  480. spin_unlock_irqrestore(&drvdata->spinlock, flags);
  481. return -EINVAL;
  482. }
  483. /* Re-enable the TMC if need be */
  484. if (drvdata->mode == CS_MODE_SYSFS) {
  485. /*
  486. * The trace run will continue with the same allocated trace
  487. * buffer. As such zero-out the buffer so that we don't end
  488. * up with stale data.
  489. *
  490. * Since the tracer is still enabled drvdata::buf
  491. * can't be NULL.
  492. */
  493. memset(drvdata->buf, 0, drvdata->size);
  494. tmc_etb_enable_hw(drvdata);
  495. } else {
  496. /*
  497. * The ETB/ETF is not tracing and the buffer was just read.
  498. * As such prepare to free the trace buffer.
  499. */
  500. buf = drvdata->buf;
  501. drvdata->buf = NULL;
  502. }
  503. drvdata->reading = false;
  504. spin_unlock_irqrestore(&drvdata->spinlock, flags);
  505. /*
  506. * Free allocated memory outside of the spinlock. There is no need
  507. * to assert the validity of 'buf' since calling kfree(NULL) is safe.
  508. */
  509. kfree(buf);
  510. return 0;
  511. }