coresight-tmc-etf.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright(C) 2016 Linaro Limited. All rights reserved.
  4. * Author: Mathieu Poirier <mathieu.poirier@linaro.org>
  5. */
  6. #include <linux/circ_buf.h>
  7. #include <linux/coresight.h>
  8. #include <linux/perf_event.h>
  9. #include <linux/slab.h>
  10. #include "coresight-priv.h"
  11. #include "coresight-tmc.h"
  12. #include "coresight-etm-perf.h"
  13. static int tmc_set_etf_buffer(struct coresight_device *csdev,
  14. struct perf_output_handle *handle);
  15. static void __tmc_etb_enable_hw(struct tmc_drvdata *drvdata)
  16. {
  17. CS_UNLOCK(drvdata->base);
  18. /* Wait for TMCSReady bit to be set */
  19. tmc_wait_for_tmcready(drvdata);
  20. writel_relaxed(TMC_MODE_CIRCULAR_BUFFER, drvdata->base + TMC_MODE);
  21. writel_relaxed(TMC_FFCR_EN_FMT | TMC_FFCR_EN_TI |
  22. TMC_FFCR_FON_FLIN | TMC_FFCR_FON_TRIG_EVT |
  23. TMC_FFCR_TRIGON_TRIGIN,
  24. drvdata->base + TMC_FFCR);
  25. writel_relaxed(drvdata->trigger_cntr, drvdata->base + TMC_TRG);
  26. tmc_enable_hw(drvdata);
  27. CS_LOCK(drvdata->base);
  28. }
  29. static int tmc_etb_enable_hw(struct tmc_drvdata *drvdata)
  30. {
  31. int rc = coresight_claim_device(drvdata->base);
  32. if (rc)
  33. return rc;
  34. __tmc_etb_enable_hw(drvdata);
  35. return 0;
  36. }
  37. static void tmc_etb_dump_hw(struct tmc_drvdata *drvdata)
  38. {
  39. char *bufp;
  40. u32 read_data, lost;
  41. /* Check if the buffer wrapped around. */
  42. lost = readl_relaxed(drvdata->base + TMC_STS) & TMC_STS_FULL;
  43. bufp = drvdata->buf;
  44. drvdata->len = 0;
  45. while (1) {
  46. read_data = readl_relaxed(drvdata->base + TMC_RRD);
  47. if (read_data == 0xFFFFFFFF)
  48. break;
  49. memcpy(bufp, &read_data, 4);
  50. bufp += 4;
  51. drvdata->len += 4;
  52. }
  53. if (lost)
  54. coresight_insert_barrier_packet(drvdata->buf);
  55. return;
  56. }
  57. static void __tmc_etb_disable_hw(struct tmc_drvdata *drvdata)
  58. {
  59. CS_UNLOCK(drvdata->base);
  60. tmc_flush_and_stop(drvdata);
  61. /*
  62. * When operating in sysFS mode the content of the buffer needs to be
  63. * read before the TMC is disabled.
  64. */
  65. if (drvdata->mode == CS_MODE_SYSFS)
  66. tmc_etb_dump_hw(drvdata);
  67. tmc_disable_hw(drvdata);
  68. CS_LOCK(drvdata->base);
  69. }
  70. static void tmc_etb_disable_hw(struct tmc_drvdata *drvdata)
  71. {
  72. coresight_disclaim_device(drvdata);
  73. __tmc_etb_disable_hw(drvdata);
  74. }
  75. static void __tmc_etf_enable_hw(struct tmc_drvdata *drvdata)
  76. {
  77. CS_UNLOCK(drvdata->base);
  78. /* Wait for TMCSReady bit to be set */
  79. tmc_wait_for_tmcready(drvdata);
  80. writel_relaxed(TMC_MODE_HARDWARE_FIFO, drvdata->base + TMC_MODE);
  81. writel_relaxed(TMC_FFCR_EN_FMT | TMC_FFCR_EN_TI,
  82. drvdata->base + TMC_FFCR);
  83. writel_relaxed(0x0, drvdata->base + TMC_BUFWM);
  84. tmc_enable_hw(drvdata);
  85. CS_LOCK(drvdata->base);
  86. }
  87. static int tmc_etf_enable_hw(struct tmc_drvdata *drvdata)
  88. {
  89. int rc = coresight_claim_device(drvdata->base);
  90. if (rc)
  91. return rc;
  92. __tmc_etf_enable_hw(drvdata);
  93. return 0;
  94. }
  95. static void tmc_etf_disable_hw(struct tmc_drvdata *drvdata)
  96. {
  97. CS_UNLOCK(drvdata->base);
  98. tmc_flush_and_stop(drvdata);
  99. tmc_disable_hw(drvdata);
  100. coresight_disclaim_device_unlocked(drvdata->base);
  101. CS_LOCK(drvdata->base);
  102. }
  103. /*
  104. * Return the available trace data in the buffer from @pos, with
  105. * a maximum limit of @len, updating the @bufpp on where to
  106. * find it.
  107. */
  108. ssize_t tmc_etb_get_sysfs_trace(struct tmc_drvdata *drvdata,
  109. loff_t pos, size_t len, char **bufpp)
  110. {
  111. ssize_t actual = len;
  112. /* Adjust the len to available size @pos */
  113. if (pos + actual > drvdata->len)
  114. actual = drvdata->len - pos;
  115. if (actual > 0)
  116. *bufpp = drvdata->buf + pos;
  117. return actual;
  118. }
  119. static int tmc_enable_etf_sink_sysfs(struct coresight_device *csdev)
  120. {
  121. int ret = 0;
  122. bool used = false;
  123. char *buf = NULL;
  124. unsigned long flags;
  125. struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
  126. /*
  127. * If we don't have a buffer release the lock and allocate memory.
  128. * Otherwise keep the lock and move along.
  129. */
  130. spin_lock_irqsave(&drvdata->spinlock, flags);
  131. if (!drvdata->buf) {
  132. spin_unlock_irqrestore(&drvdata->spinlock, flags);
  133. /* Allocating the memory here while outside of the spinlock */
  134. buf = kzalloc(drvdata->size, GFP_KERNEL);
  135. if (!buf)
  136. return -ENOMEM;
  137. /* Let's try again */
  138. spin_lock_irqsave(&drvdata->spinlock, flags);
  139. }
  140. if (drvdata->reading) {
  141. ret = -EBUSY;
  142. goto out;
  143. }
  144. /*
  145. * In sysFS mode we can have multiple writers per sink. Since this
  146. * sink is already enabled no memory is needed and the HW need not be
  147. * touched.
  148. */
  149. if (drvdata->mode == CS_MODE_SYSFS)
  150. goto out;
  151. /*
  152. * If drvdata::buf isn't NULL, memory was allocated for a previous
  153. * trace run but wasn't read. If so simply zero-out the memory.
  154. * Otherwise use the memory allocated above.
  155. *
  156. * The memory is freed when users read the buffer using the
  157. * /dev/xyz.{etf|etb} interface. See tmc_read_unprepare_etf() for
  158. * details.
  159. */
  160. if (drvdata->buf) {
  161. memset(drvdata->buf, 0, drvdata->size);
  162. } else {
  163. used = true;
  164. drvdata->buf = buf;
  165. }
  166. ret = tmc_etb_enable_hw(drvdata);
  167. if (!ret)
  168. drvdata->mode = CS_MODE_SYSFS;
  169. else
  170. /* Free up the buffer if we failed to enable */
  171. used = false;
  172. out:
  173. spin_unlock_irqrestore(&drvdata->spinlock, flags);
  174. /* Free memory outside the spinlock if need be */
  175. if (!used)
  176. kfree(buf);
  177. return ret;
  178. }
  179. static int tmc_enable_etf_sink_perf(struct coresight_device *csdev, void *data)
  180. {
  181. int ret = 0;
  182. unsigned long flags;
  183. struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
  184. struct perf_output_handle *handle = data;
  185. spin_lock_irqsave(&drvdata->spinlock, flags);
  186. do {
  187. ret = -EINVAL;
  188. if (drvdata->reading)
  189. break;
  190. /*
  191. * In Perf mode there can be only one writer per sink. There
  192. * is also no need to continue if the ETB/ETF is already
  193. * operated from sysFS.
  194. */
  195. if (drvdata->mode != CS_MODE_DISABLED)
  196. break;
  197. ret = tmc_set_etf_buffer(csdev, handle);
  198. if (ret)
  199. break;
  200. ret = tmc_etb_enable_hw(drvdata);
  201. if (!ret)
  202. drvdata->mode = CS_MODE_PERF;
  203. } while (0);
  204. spin_unlock_irqrestore(&drvdata->spinlock, flags);
  205. return ret;
  206. }
  207. static int tmc_enable_etf_sink(struct coresight_device *csdev,
  208. u32 mode, void *data)
  209. {
  210. int ret;
  211. struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
  212. switch (mode) {
  213. case CS_MODE_SYSFS:
  214. ret = tmc_enable_etf_sink_sysfs(csdev);
  215. break;
  216. case CS_MODE_PERF:
  217. ret = tmc_enable_etf_sink_perf(csdev, data);
  218. break;
  219. /* We shouldn't be here */
  220. default:
  221. ret = -EINVAL;
  222. break;
  223. }
  224. if (ret)
  225. return ret;
  226. dev_dbg(drvdata->dev, "TMC-ETB/ETF enabled\n");
  227. return 0;
  228. }
  229. static void tmc_disable_etf_sink(struct coresight_device *csdev)
  230. {
  231. unsigned long flags;
  232. struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
  233. spin_lock_irqsave(&drvdata->spinlock, flags);
  234. if (drvdata->reading) {
  235. spin_unlock_irqrestore(&drvdata->spinlock, flags);
  236. return;
  237. }
  238. /* Disable the TMC only if it needs to */
  239. if (drvdata->mode != CS_MODE_DISABLED) {
  240. tmc_etb_disable_hw(drvdata);
  241. drvdata->mode = CS_MODE_DISABLED;
  242. }
  243. spin_unlock_irqrestore(&drvdata->spinlock, flags);
  244. dev_dbg(drvdata->dev, "TMC-ETB/ETF disabled\n");
  245. }
  246. static int tmc_enable_etf_link(struct coresight_device *csdev,
  247. int inport, int outport)
  248. {
  249. int ret;
  250. unsigned long flags;
  251. struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
  252. spin_lock_irqsave(&drvdata->spinlock, flags);
  253. if (drvdata->reading) {
  254. spin_unlock_irqrestore(&drvdata->spinlock, flags);
  255. return -EBUSY;
  256. }
  257. ret = tmc_etf_enable_hw(drvdata);
  258. if (!ret)
  259. drvdata->mode = CS_MODE_SYSFS;
  260. spin_unlock_irqrestore(&drvdata->spinlock, flags);
  261. if (!ret)
  262. dev_dbg(drvdata->dev, "TMC-ETF enabled\n");
  263. return ret;
  264. }
  265. static void tmc_disable_etf_link(struct coresight_device *csdev,
  266. int inport, int outport)
  267. {
  268. unsigned long flags;
  269. struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
  270. spin_lock_irqsave(&drvdata->spinlock, flags);
  271. if (drvdata->reading) {
  272. spin_unlock_irqrestore(&drvdata->spinlock, flags);
  273. return;
  274. }
  275. tmc_etf_disable_hw(drvdata);
  276. drvdata->mode = CS_MODE_DISABLED;
  277. spin_unlock_irqrestore(&drvdata->spinlock, flags);
  278. dev_dbg(drvdata->dev, "TMC-ETF disabled\n");
  279. }
  280. static void *tmc_alloc_etf_buffer(struct coresight_device *csdev, int cpu,
  281. void **pages, int nr_pages, bool overwrite)
  282. {
  283. int node;
  284. struct cs_buffers *buf;
  285. if (cpu == -1)
  286. cpu = smp_processor_id();
  287. node = cpu_to_node(cpu);
  288. /* Allocate memory structure for interaction with Perf */
  289. buf = kzalloc_node(sizeof(struct cs_buffers), GFP_KERNEL, node);
  290. if (!buf)
  291. return NULL;
  292. buf->snapshot = overwrite;
  293. buf->nr_pages = nr_pages;
  294. buf->data_pages = pages;
  295. return buf;
  296. }
  297. static void tmc_free_etf_buffer(void *config)
  298. {
  299. struct cs_buffers *buf = config;
  300. kfree(buf);
  301. }
  302. static int tmc_set_etf_buffer(struct coresight_device *csdev,
  303. struct perf_output_handle *handle)
  304. {
  305. int ret = 0;
  306. unsigned long head;
  307. struct cs_buffers *buf = etm_perf_sink_config(handle);
  308. if (!buf)
  309. return -EINVAL;
  310. /* wrap head around to the amount of space we have */
  311. head = handle->head & ((buf->nr_pages << PAGE_SHIFT) - 1);
  312. /* find the page to write to */
  313. buf->cur = head / PAGE_SIZE;
  314. /* and offset within that page */
  315. buf->offset = head % PAGE_SIZE;
  316. local_set(&buf->data_size, 0);
  317. return ret;
  318. }
  319. static unsigned long tmc_update_etf_buffer(struct coresight_device *csdev,
  320. struct perf_output_handle *handle,
  321. void *sink_config)
  322. {
  323. bool lost = false;
  324. int i, cur;
  325. const u32 *barrier;
  326. u32 *buf_ptr;
  327. u64 read_ptr, write_ptr;
  328. u32 status;
  329. unsigned long offset, to_read;
  330. struct cs_buffers *buf = sink_config;
  331. struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
  332. if (!buf)
  333. return 0;
  334. /* This shouldn't happen */
  335. if (WARN_ON_ONCE(drvdata->mode != CS_MODE_PERF))
  336. return 0;
  337. CS_UNLOCK(drvdata->base);
  338. tmc_flush_and_stop(drvdata);
  339. read_ptr = tmc_read_rrp(drvdata);
  340. write_ptr = tmc_read_rwp(drvdata);
  341. /*
  342. * Get a hold of the status register and see if a wrap around
  343. * has occurred. If so adjust things accordingly.
  344. */
  345. status = readl_relaxed(drvdata->base + TMC_STS);
  346. if (status & TMC_STS_FULL) {
  347. lost = true;
  348. to_read = drvdata->size;
  349. } else {
  350. to_read = CIRC_CNT(write_ptr, read_ptr, drvdata->size);
  351. }
  352. /*
  353. * The TMC RAM buffer may be bigger than the space available in the
  354. * perf ring buffer (handle->size). If so advance the RRP so that we
  355. * get the latest trace data.
  356. */
  357. if (to_read > handle->size) {
  358. u32 mask = 0;
  359. /*
  360. * The value written to RRP must be byte-address aligned to
  361. * the width of the trace memory databus _and_ to a frame
  362. * boundary (16 byte), whichever is the biggest. For example,
  363. * for 32-bit, 64-bit and 128-bit wide trace memory, the four
  364. * LSBs must be 0s. For 256-bit wide trace memory, the five
  365. * LSBs must be 0s.
  366. */
  367. switch (drvdata->memwidth) {
  368. case TMC_MEM_INTF_WIDTH_32BITS:
  369. case TMC_MEM_INTF_WIDTH_64BITS:
  370. case TMC_MEM_INTF_WIDTH_128BITS:
  371. mask = GENMASK(31, 4);
  372. break;
  373. case TMC_MEM_INTF_WIDTH_256BITS:
  374. mask = GENMASK(31, 5);
  375. break;
  376. }
  377. /*
  378. * Make sure the new size is aligned in accordance with the
  379. * requirement explained above.
  380. */
  381. to_read = handle->size & mask;
  382. /* Move the RAM read pointer up */
  383. read_ptr = (write_ptr + drvdata->size) - to_read;
  384. /* Make sure we are still within our limits */
  385. if (read_ptr > (drvdata->size - 1))
  386. read_ptr -= drvdata->size;
  387. /* Tell the HW */
  388. tmc_write_rrp(drvdata, read_ptr);
  389. lost = true;
  390. }
  391. if (lost)
  392. perf_aux_output_flag(handle, PERF_AUX_FLAG_TRUNCATED);
  393. cur = buf->cur;
  394. offset = buf->offset;
  395. barrier = barrier_pkt;
  396. /* for every byte to read */
  397. for (i = 0; i < to_read; i += 4) {
  398. buf_ptr = buf->data_pages[cur] + offset;
  399. *buf_ptr = readl_relaxed(drvdata->base + TMC_RRD);
  400. if (lost && *barrier) {
  401. *buf_ptr = *barrier;
  402. barrier++;
  403. }
  404. offset += 4;
  405. if (offset >= PAGE_SIZE) {
  406. offset = 0;
  407. cur++;
  408. /* wrap around at the end of the buffer */
  409. cur &= buf->nr_pages - 1;
  410. }
  411. }
  412. /* In snapshot mode we have to update the head */
  413. if (buf->snapshot) {
  414. handle->head = (cur * PAGE_SIZE) + offset;
  415. to_read = buf->nr_pages << PAGE_SHIFT;
  416. }
  417. CS_LOCK(drvdata->base);
  418. return to_read;
  419. }
  420. static const struct coresight_ops_sink tmc_etf_sink_ops = {
  421. .enable = tmc_enable_etf_sink,
  422. .disable = tmc_disable_etf_sink,
  423. .alloc_buffer = tmc_alloc_etf_buffer,
  424. .free_buffer = tmc_free_etf_buffer,
  425. .update_buffer = tmc_update_etf_buffer,
  426. };
  427. static const struct coresight_ops_link tmc_etf_link_ops = {
  428. .enable = tmc_enable_etf_link,
  429. .disable = tmc_disable_etf_link,
  430. };
  431. const struct coresight_ops tmc_etb_cs_ops = {
  432. .sink_ops = &tmc_etf_sink_ops,
  433. };
  434. const struct coresight_ops tmc_etf_cs_ops = {
  435. .sink_ops = &tmc_etf_sink_ops,
  436. .link_ops = &tmc_etf_link_ops,
  437. };
  438. int tmc_read_prepare_etb(struct tmc_drvdata *drvdata)
  439. {
  440. enum tmc_mode mode;
  441. int ret = 0;
  442. unsigned long flags;
  443. /* config types are set a boot time and never change */
  444. if (WARN_ON_ONCE(drvdata->config_type != TMC_CONFIG_TYPE_ETB &&
  445. drvdata->config_type != TMC_CONFIG_TYPE_ETF))
  446. return -EINVAL;
  447. spin_lock_irqsave(&drvdata->spinlock, flags);
  448. if (drvdata->reading) {
  449. ret = -EBUSY;
  450. goto out;
  451. }
  452. /* There is no point in reading a TMC in HW FIFO mode */
  453. mode = readl_relaxed(drvdata->base + TMC_MODE);
  454. if (mode != TMC_MODE_CIRCULAR_BUFFER) {
  455. ret = -EINVAL;
  456. goto out;
  457. }
  458. /* Don't interfere if operated from Perf */
  459. if (drvdata->mode == CS_MODE_PERF) {
  460. ret = -EINVAL;
  461. goto out;
  462. }
  463. /* If drvdata::buf is NULL the trace data has been read already */
  464. if (drvdata->buf == NULL) {
  465. ret = -EINVAL;
  466. goto out;
  467. }
  468. /* Disable the TMC if need be */
  469. if (drvdata->mode == CS_MODE_SYSFS)
  470. __tmc_etb_disable_hw(drvdata);
  471. drvdata->reading = true;
  472. out:
  473. spin_unlock_irqrestore(&drvdata->spinlock, flags);
  474. return ret;
  475. }
  476. int tmc_read_unprepare_etb(struct tmc_drvdata *drvdata)
  477. {
  478. char *buf = NULL;
  479. enum tmc_mode mode;
  480. unsigned long flags;
  481. /* config types are set a boot time and never change */
  482. if (WARN_ON_ONCE(drvdata->config_type != TMC_CONFIG_TYPE_ETB &&
  483. drvdata->config_type != TMC_CONFIG_TYPE_ETF))
  484. return -EINVAL;
  485. spin_lock_irqsave(&drvdata->spinlock, flags);
  486. /* There is no point in reading a TMC in HW FIFO mode */
  487. mode = readl_relaxed(drvdata->base + TMC_MODE);
  488. if (mode != TMC_MODE_CIRCULAR_BUFFER) {
  489. spin_unlock_irqrestore(&drvdata->spinlock, flags);
  490. return -EINVAL;
  491. }
  492. /* Re-enable the TMC if need be */
  493. if (drvdata->mode == CS_MODE_SYSFS) {
  494. /*
  495. * The trace run will continue with the same allocated trace
  496. * buffer. As such zero-out the buffer so that we don't end
  497. * up with stale data.
  498. *
  499. * Since the tracer is still enabled drvdata::buf
  500. * can't be NULL.
  501. */
  502. memset(drvdata->buf, 0, drvdata->size);
  503. __tmc_etb_enable_hw(drvdata);
  504. } else {
  505. /*
  506. * The ETB/ETF is not tracing and the buffer was just read.
  507. * As such prepare to free the trace buffer.
  508. */
  509. buf = drvdata->buf;
  510. drvdata->buf = NULL;
  511. }
  512. drvdata->reading = false;
  513. spin_unlock_irqrestore(&drvdata->spinlock, flags);
  514. /*
  515. * Free allocated memory outside of the spinlock. There is no need
  516. * to assert the validity of 'buf' since calling kfree(NULL) is safe.
  517. */
  518. kfree(buf);
  519. return 0;
  520. }