coresight-tmc-etf.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594
  1. /*
  2. * Copyright(C) 2016 Linaro Limited. All rights reserved.
  3. * Author: Mathieu Poirier <mathieu.poirier@linaro.org>
  4. *
  5. * This program is free software; you can redistribute it and/or modify it
  6. * under the terms of the GNU General Public License version 2 as published by
  7. * the Free Software Foundation.
  8. *
  9. * This program is distributed in the hope that it will be useful, but WITHOUT
  10. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  12. * more details.
  13. *
  14. * You should have received a copy of the GNU General Public License along with
  15. * this program. If not, see <http://www.gnu.org/licenses/>.
  16. */
  17. #include <linux/circ_buf.h>
  18. #include <linux/coresight.h>
  19. #include <linux/perf_event.h>
  20. #include <linux/slab.h>
  21. #include "coresight-priv.h"
  22. #include "coresight-tmc.h"
  23. static void tmc_etb_enable_hw(struct tmc_drvdata *drvdata)
  24. {
  25. CS_UNLOCK(drvdata->base);
  26. /* Wait for TMCSReady bit to be set */
  27. tmc_wait_for_tmcready(drvdata);
  28. writel_relaxed(TMC_MODE_CIRCULAR_BUFFER, drvdata->base + TMC_MODE);
  29. writel_relaxed(TMC_FFCR_EN_FMT | TMC_FFCR_EN_TI |
  30. TMC_FFCR_FON_FLIN | TMC_FFCR_FON_TRIG_EVT |
  31. TMC_FFCR_TRIGON_TRIGIN,
  32. drvdata->base + TMC_FFCR);
  33. writel_relaxed(drvdata->trigger_cntr, drvdata->base + TMC_TRG);
  34. tmc_enable_hw(drvdata);
  35. CS_LOCK(drvdata->base);
  36. }
  37. static void tmc_etb_dump_hw(struct tmc_drvdata *drvdata)
  38. {
  39. char *bufp;
  40. u32 read_data;
  41. int i;
  42. bufp = drvdata->buf;
  43. drvdata->len = 0;
  44. while (1) {
  45. for (i = 0; i < drvdata->memwidth; i++) {
  46. read_data = readl_relaxed(drvdata->base + TMC_RRD);
  47. if (read_data == 0xFFFFFFFF)
  48. return;
  49. memcpy(bufp, &read_data, 4);
  50. bufp += 4;
  51. drvdata->len += 4;
  52. }
  53. }
  54. }
  55. static void tmc_etb_disable_hw(struct tmc_drvdata *drvdata)
  56. {
  57. CS_UNLOCK(drvdata->base);
  58. tmc_flush_and_stop(drvdata);
  59. /*
  60. * When operating in sysFS mode the content of the buffer needs to be
  61. * read before the TMC is disabled.
  62. */
  63. if (drvdata->mode == CS_MODE_SYSFS)
  64. tmc_etb_dump_hw(drvdata);
  65. tmc_disable_hw(drvdata);
  66. CS_LOCK(drvdata->base);
  67. }
  68. static void tmc_etf_enable_hw(struct tmc_drvdata *drvdata)
  69. {
  70. CS_UNLOCK(drvdata->base);
  71. /* Wait for TMCSReady bit to be set */
  72. tmc_wait_for_tmcready(drvdata);
  73. writel_relaxed(TMC_MODE_HARDWARE_FIFO, drvdata->base + TMC_MODE);
  74. writel_relaxed(TMC_FFCR_EN_FMT | TMC_FFCR_EN_TI,
  75. drvdata->base + TMC_FFCR);
  76. writel_relaxed(0x0, drvdata->base + TMC_BUFWM);
  77. tmc_enable_hw(drvdata);
  78. CS_LOCK(drvdata->base);
  79. }
  80. static void tmc_etf_disable_hw(struct tmc_drvdata *drvdata)
  81. {
  82. CS_UNLOCK(drvdata->base);
  83. tmc_flush_and_stop(drvdata);
  84. tmc_disable_hw(drvdata);
  85. CS_LOCK(drvdata->base);
  86. }
  87. static int tmc_enable_etf_sink_sysfs(struct coresight_device *csdev)
  88. {
  89. int ret = 0;
  90. bool used = false;
  91. char *buf = NULL;
  92. unsigned long flags;
  93. struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
  94. /*
  95. * If we don't have a buffer release the lock and allocate memory.
  96. * Otherwise keep the lock and move along.
  97. */
  98. spin_lock_irqsave(&drvdata->spinlock, flags);
  99. if (!drvdata->buf) {
  100. spin_unlock_irqrestore(&drvdata->spinlock, flags);
  101. /* Allocating the memory here while outside of the spinlock */
  102. buf = kzalloc(drvdata->size, GFP_KERNEL);
  103. if (!buf)
  104. return -ENOMEM;
  105. /* Let's try again */
  106. spin_lock_irqsave(&drvdata->spinlock, flags);
  107. }
  108. if (drvdata->reading) {
  109. ret = -EBUSY;
  110. goto out;
  111. }
  112. /*
  113. * In sysFS mode we can have multiple writers per sink. Since this
  114. * sink is already enabled no memory is needed and the HW need not be
  115. * touched.
  116. */
  117. if (drvdata->mode == CS_MODE_SYSFS)
  118. goto out;
  119. /*
  120. * If drvdata::buf isn't NULL, memory was allocated for a previous
  121. * trace run but wasn't read. If so simply zero-out the memory.
  122. * Otherwise use the memory allocated above.
  123. *
  124. * The memory is freed when users read the buffer using the
  125. * /dev/xyz.{etf|etb} interface. See tmc_read_unprepare_etf() for
  126. * details.
  127. */
  128. if (drvdata->buf) {
  129. memset(drvdata->buf, 0, drvdata->size);
  130. } else {
  131. used = true;
  132. drvdata->buf = buf;
  133. }
  134. drvdata->mode = CS_MODE_SYSFS;
  135. tmc_etb_enable_hw(drvdata);
  136. out:
  137. spin_unlock_irqrestore(&drvdata->spinlock, flags);
  138. /* Free memory outside the spinlock if need be */
  139. if (!used)
  140. kfree(buf);
  141. if (!ret)
  142. dev_info(drvdata->dev, "TMC-ETB/ETF enabled\n");
  143. return ret;
  144. }
  145. static int tmc_enable_etf_sink_perf(struct coresight_device *csdev)
  146. {
  147. int ret = 0;
  148. unsigned long flags;
  149. struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
  150. spin_lock_irqsave(&drvdata->spinlock, flags);
  151. if (drvdata->reading) {
  152. ret = -EINVAL;
  153. goto out;
  154. }
  155. /*
  156. * In Perf mode there can be only one writer per sink. There
  157. * is also no need to continue if the ETB/ETR is already operated
  158. * from sysFS.
  159. */
  160. if (drvdata->mode != CS_MODE_DISABLED) {
  161. ret = -EINVAL;
  162. goto out;
  163. }
  164. drvdata->mode = CS_MODE_PERF;
  165. tmc_etb_enable_hw(drvdata);
  166. out:
  167. spin_unlock_irqrestore(&drvdata->spinlock, flags);
  168. return ret;
  169. }
  170. static int tmc_enable_etf_sink(struct coresight_device *csdev, u32 mode)
  171. {
  172. switch (mode) {
  173. case CS_MODE_SYSFS:
  174. return tmc_enable_etf_sink_sysfs(csdev);
  175. case CS_MODE_PERF:
  176. return tmc_enable_etf_sink_perf(csdev);
  177. }
  178. /* We shouldn't be here */
  179. return -EINVAL;
  180. }
  181. static void tmc_disable_etf_sink(struct coresight_device *csdev)
  182. {
  183. unsigned long flags;
  184. struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
  185. spin_lock_irqsave(&drvdata->spinlock, flags);
  186. if (drvdata->reading) {
  187. spin_unlock_irqrestore(&drvdata->spinlock, flags);
  188. return;
  189. }
  190. /* Disable the TMC only if it needs to */
  191. if (drvdata->mode != CS_MODE_DISABLED) {
  192. tmc_etb_disable_hw(drvdata);
  193. drvdata->mode = CS_MODE_DISABLED;
  194. }
  195. spin_unlock_irqrestore(&drvdata->spinlock, flags);
  196. dev_info(drvdata->dev, "TMC-ETB/ETF disabled\n");
  197. }
  198. static int tmc_enable_etf_link(struct coresight_device *csdev,
  199. int inport, int outport)
  200. {
  201. unsigned long flags;
  202. struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
  203. spin_lock_irqsave(&drvdata->spinlock, flags);
  204. if (drvdata->reading) {
  205. spin_unlock_irqrestore(&drvdata->spinlock, flags);
  206. return -EBUSY;
  207. }
  208. tmc_etf_enable_hw(drvdata);
  209. drvdata->mode = CS_MODE_SYSFS;
  210. spin_unlock_irqrestore(&drvdata->spinlock, flags);
  211. dev_info(drvdata->dev, "TMC-ETF enabled\n");
  212. return 0;
  213. }
  214. static void tmc_disable_etf_link(struct coresight_device *csdev,
  215. int inport, int outport)
  216. {
  217. unsigned long flags;
  218. struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
  219. spin_lock_irqsave(&drvdata->spinlock, flags);
  220. if (drvdata->reading) {
  221. spin_unlock_irqrestore(&drvdata->spinlock, flags);
  222. return;
  223. }
  224. tmc_etf_disable_hw(drvdata);
  225. drvdata->mode = CS_MODE_DISABLED;
  226. spin_unlock_irqrestore(&drvdata->spinlock, flags);
  227. dev_info(drvdata->dev, "TMC disabled\n");
  228. }
  229. static void *tmc_alloc_etf_buffer(struct coresight_device *csdev, int cpu,
  230. void **pages, int nr_pages, bool overwrite)
  231. {
  232. int node;
  233. struct cs_buffers *buf;
  234. if (cpu == -1)
  235. cpu = smp_processor_id();
  236. node = cpu_to_node(cpu);
  237. /* Allocate memory structure for interaction with Perf */
  238. buf = kzalloc_node(sizeof(struct cs_buffers), GFP_KERNEL, node);
  239. if (!buf)
  240. return NULL;
  241. buf->snapshot = overwrite;
  242. buf->nr_pages = nr_pages;
  243. buf->data_pages = pages;
  244. return buf;
  245. }
  246. static void tmc_free_etf_buffer(void *config)
  247. {
  248. struct cs_buffers *buf = config;
  249. kfree(buf);
  250. }
  251. static int tmc_set_etf_buffer(struct coresight_device *csdev,
  252. struct perf_output_handle *handle,
  253. void *sink_config)
  254. {
  255. int ret = 0;
  256. unsigned long head;
  257. struct cs_buffers *buf = sink_config;
  258. /* wrap head around to the amount of space we have */
  259. head = handle->head & ((buf->nr_pages << PAGE_SHIFT) - 1);
  260. /* find the page to write to */
  261. buf->cur = head / PAGE_SIZE;
  262. /* and offset within that page */
  263. buf->offset = head % PAGE_SIZE;
  264. local_set(&buf->data_size, 0);
  265. return ret;
  266. }
  267. static unsigned long tmc_reset_etf_buffer(struct coresight_device *csdev,
  268. struct perf_output_handle *handle,
  269. void *sink_config, bool *lost)
  270. {
  271. long size = 0;
  272. struct cs_buffers *buf = sink_config;
  273. if (buf) {
  274. /*
  275. * In snapshot mode ->data_size holds the new address of the
  276. * ring buffer's head. The size itself is the whole address
  277. * range since we want the latest information.
  278. */
  279. if (buf->snapshot)
  280. handle->head = local_xchg(&buf->data_size,
  281. buf->nr_pages << PAGE_SHIFT);
  282. /*
  283. * Tell the tracer PMU how much we got in this run and if
  284. * something went wrong along the way. Nobody else can use
  285. * this cs_buffers instance until we are done. As such
  286. * resetting parameters here and squaring off with the ring
  287. * buffer API in the tracer PMU is fine.
  288. */
  289. *lost = !!local_xchg(&buf->lost, 0);
  290. size = local_xchg(&buf->data_size, 0);
  291. }
  292. return size;
  293. }
  294. static void tmc_update_etf_buffer(struct coresight_device *csdev,
  295. struct perf_output_handle *handle,
  296. void *sink_config)
  297. {
  298. int i, cur;
  299. u32 *buf_ptr;
  300. u32 read_ptr, write_ptr;
  301. u32 status, to_read;
  302. unsigned long offset;
  303. struct cs_buffers *buf = sink_config;
  304. struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
  305. if (!buf)
  306. return;
  307. /* This shouldn't happen */
  308. if (WARN_ON_ONCE(drvdata->mode != CS_MODE_PERF))
  309. return;
  310. CS_UNLOCK(drvdata->base);
  311. tmc_flush_and_stop(drvdata);
  312. read_ptr = readl_relaxed(drvdata->base + TMC_RRP);
  313. write_ptr = readl_relaxed(drvdata->base + TMC_RWP);
  314. /*
  315. * Get a hold of the status register and see if a wrap around
  316. * has occurred. If so adjust things accordingly.
  317. */
  318. status = readl_relaxed(drvdata->base + TMC_STS);
  319. if (status & TMC_STS_FULL) {
  320. local_inc(&buf->lost);
  321. to_read = drvdata->size;
  322. } else {
  323. to_read = CIRC_CNT(write_ptr, read_ptr, drvdata->size);
  324. }
  325. /*
  326. * The TMC RAM buffer may be bigger than the space available in the
  327. * perf ring buffer (handle->size). If so advance the RRP so that we
  328. * get the latest trace data.
  329. */
  330. if (to_read > handle->size) {
  331. u32 mask = 0;
  332. /*
  333. * The value written to RRP must be byte-address aligned to
  334. * the width of the trace memory databus _and_ to a frame
  335. * boundary (16 byte), whichever is the biggest. For example,
  336. * for 32-bit, 64-bit and 128-bit wide trace memory, the four
  337. * LSBs must be 0s. For 256-bit wide trace memory, the five
  338. * LSBs must be 0s.
  339. */
  340. switch (drvdata->memwidth) {
  341. case TMC_MEM_INTF_WIDTH_32BITS:
  342. case TMC_MEM_INTF_WIDTH_64BITS:
  343. case TMC_MEM_INTF_WIDTH_128BITS:
  344. mask = GENMASK(31, 5);
  345. break;
  346. case TMC_MEM_INTF_WIDTH_256BITS:
  347. mask = GENMASK(31, 6);
  348. break;
  349. }
  350. /*
  351. * Make sure the new size is aligned in accordance with the
  352. * requirement explained above.
  353. */
  354. to_read = handle->size & mask;
  355. /* Move the RAM read pointer up */
  356. read_ptr = (write_ptr + drvdata->size) - to_read;
  357. /* Make sure we are still within our limits */
  358. if (read_ptr > (drvdata->size - 1))
  359. read_ptr -= drvdata->size;
  360. /* Tell the HW */
  361. writel_relaxed(read_ptr, drvdata->base + TMC_RRP);
  362. local_inc(&buf->lost);
  363. }
  364. cur = buf->cur;
  365. offset = buf->offset;
  366. /* for every byte to read */
  367. for (i = 0; i < to_read; i += 4) {
  368. buf_ptr = buf->data_pages[cur] + offset;
  369. *buf_ptr = readl_relaxed(drvdata->base + TMC_RRD);
  370. offset += 4;
  371. if (offset >= PAGE_SIZE) {
  372. offset = 0;
  373. cur++;
  374. /* wrap around at the end of the buffer */
  375. cur &= buf->nr_pages - 1;
  376. }
  377. }
  378. /*
  379. * In snapshot mode all we have to do is communicate to
  380. * perf_aux_output_end() the address of the current head. In full
  381. * trace mode the same function expects a size to move rb->aux_head
  382. * forward.
  383. */
  384. if (buf->snapshot)
  385. local_set(&buf->data_size, (cur * PAGE_SIZE) + offset);
  386. else
  387. local_add(to_read, &buf->data_size);
  388. CS_LOCK(drvdata->base);
  389. }
  390. static const struct coresight_ops_sink tmc_etf_sink_ops = {
  391. .enable = tmc_enable_etf_sink,
  392. .disable = tmc_disable_etf_sink,
  393. .alloc_buffer = tmc_alloc_etf_buffer,
  394. .free_buffer = tmc_free_etf_buffer,
  395. .set_buffer = tmc_set_etf_buffer,
  396. .reset_buffer = tmc_reset_etf_buffer,
  397. .update_buffer = tmc_update_etf_buffer,
  398. };
  399. static const struct coresight_ops_link tmc_etf_link_ops = {
  400. .enable = tmc_enable_etf_link,
  401. .disable = tmc_disable_etf_link,
  402. };
  403. const struct coresight_ops tmc_etb_cs_ops = {
  404. .sink_ops = &tmc_etf_sink_ops,
  405. };
  406. const struct coresight_ops tmc_etf_cs_ops = {
  407. .sink_ops = &tmc_etf_sink_ops,
  408. .link_ops = &tmc_etf_link_ops,
  409. };
  410. int tmc_read_prepare_etb(struct tmc_drvdata *drvdata)
  411. {
  412. enum tmc_mode mode;
  413. int ret = 0;
  414. unsigned long flags;
  415. /* config types are set a boot time and never change */
  416. if (WARN_ON_ONCE(drvdata->config_type != TMC_CONFIG_TYPE_ETB &&
  417. drvdata->config_type != TMC_CONFIG_TYPE_ETF))
  418. return -EINVAL;
  419. spin_lock_irqsave(&drvdata->spinlock, flags);
  420. if (drvdata->reading) {
  421. ret = -EBUSY;
  422. goto out;
  423. }
  424. /* There is no point in reading a TMC in HW FIFO mode */
  425. mode = readl_relaxed(drvdata->base + TMC_MODE);
  426. if (mode != TMC_MODE_CIRCULAR_BUFFER) {
  427. ret = -EINVAL;
  428. goto out;
  429. }
  430. /* Don't interfere if operated from Perf */
  431. if (drvdata->mode == CS_MODE_PERF) {
  432. ret = -EINVAL;
  433. goto out;
  434. }
  435. /* If drvdata::buf is NULL the trace data has been read already */
  436. if (drvdata->buf == NULL) {
  437. ret = -EINVAL;
  438. goto out;
  439. }
  440. /* Disable the TMC if need be */
  441. if (drvdata->mode == CS_MODE_SYSFS)
  442. tmc_etb_disable_hw(drvdata);
  443. drvdata->reading = true;
  444. out:
  445. spin_unlock_irqrestore(&drvdata->spinlock, flags);
  446. return ret;
  447. }
  448. int tmc_read_unprepare_etb(struct tmc_drvdata *drvdata)
  449. {
  450. char *buf = NULL;
  451. enum tmc_mode mode;
  452. unsigned long flags;
  453. /* config types are set a boot time and never change */
  454. if (WARN_ON_ONCE(drvdata->config_type != TMC_CONFIG_TYPE_ETB &&
  455. drvdata->config_type != TMC_CONFIG_TYPE_ETF))
  456. return -EINVAL;
  457. spin_lock_irqsave(&drvdata->spinlock, flags);
  458. /* There is no point in reading a TMC in HW FIFO mode */
  459. mode = readl_relaxed(drvdata->base + TMC_MODE);
  460. if (mode != TMC_MODE_CIRCULAR_BUFFER) {
  461. spin_unlock_irqrestore(&drvdata->spinlock, flags);
  462. return -EINVAL;
  463. }
  464. /* Re-enable the TMC if need be */
  465. if (drvdata->mode == CS_MODE_SYSFS) {
  466. /*
  467. * The trace run will continue with the same allocated trace
  468. * buffer. As such zero-out the buffer so that we don't end
  469. * up with stale data.
  470. *
  471. * Since the tracer is still enabled drvdata::buf
  472. * can't be NULL.
  473. */
  474. memset(drvdata->buf, 0, drvdata->size);
  475. tmc_etb_enable_hw(drvdata);
  476. } else {
  477. /*
  478. * The ETB/ETF is not tracing and the buffer was just read.
  479. * As such prepare to free the trace buffer.
  480. */
  481. buf = drvdata->buf;
  482. drvdata->buf = NULL;
  483. }
  484. drvdata->reading = false;
  485. spin_unlock_irqrestore(&drvdata->spinlock, flags);
  486. /*
  487. * Free allocated memory outside of the spinlock. There is no need
  488. * to assert the validity of 'buf' since calling kfree(NULL) is safe.
  489. */
  490. kfree(buf);
  491. return 0;
  492. }