coresight-tmc-etf.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593
  1. /*
  2. * Copyright(C) 2016 Linaro Limited. All rights reserved.
  3. * Author: Mathieu Poirier <mathieu.poirier@linaro.org>
  4. *
  5. * This program is free software; you can redistribute it and/or modify it
  6. * under the terms of the GNU General Public License version 2 as published by
  7. * the Free Software Foundation.
  8. *
  9. * This program is distributed in the hope that it will be useful, but WITHOUT
  10. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  12. * more details.
  13. *
  14. * You should have received a copy of the GNU General Public License along with
  15. * this program. If not, see <http://www.gnu.org/licenses/>.
  16. */
  17. #include <linux/circ_buf.h>
  18. #include <linux/coresight.h>
  19. #include <linux/perf_event.h>
  20. #include <linux/slab.h>
  21. #include "coresight-priv.h"
  22. #include "coresight-tmc.h"
  23. static void tmc_etb_enable_hw(struct tmc_drvdata *drvdata)
  24. {
  25. CS_UNLOCK(drvdata->base);
  26. /* Wait for TMCSReady bit to be set */
  27. tmc_wait_for_tmcready(drvdata);
  28. writel_relaxed(TMC_MODE_CIRCULAR_BUFFER, drvdata->base + TMC_MODE);
  29. writel_relaxed(TMC_FFCR_EN_FMT | TMC_FFCR_EN_TI |
  30. TMC_FFCR_FON_FLIN | TMC_FFCR_FON_TRIG_EVT |
  31. TMC_FFCR_TRIGON_TRIGIN,
  32. drvdata->base + TMC_FFCR);
  33. writel_relaxed(drvdata->trigger_cntr, drvdata->base + TMC_TRG);
  34. tmc_enable_hw(drvdata);
  35. CS_LOCK(drvdata->base);
  36. }
  37. static void tmc_etb_dump_hw(struct tmc_drvdata *drvdata)
  38. {
  39. char *bufp;
  40. u32 read_data;
  41. int i;
  42. bufp = drvdata->buf;
  43. drvdata->len = 0;
  44. while (1) {
  45. for (i = 0; i < drvdata->memwidth; i++) {
  46. read_data = readl_relaxed(drvdata->base + TMC_RRD);
  47. if (read_data == 0xFFFFFFFF)
  48. return;
  49. memcpy(bufp, &read_data, 4);
  50. bufp += 4;
  51. drvdata->len += 4;
  52. }
  53. }
  54. }
  55. static void tmc_etb_disable_hw(struct tmc_drvdata *drvdata)
  56. {
  57. CS_UNLOCK(drvdata->base);
  58. tmc_flush_and_stop(drvdata);
  59. /*
  60. * When operating in sysFS mode the content of the buffer needs to be
  61. * read before the TMC is disabled.
  62. */
  63. if (drvdata->mode == CS_MODE_SYSFS)
  64. tmc_etb_dump_hw(drvdata);
  65. tmc_disable_hw(drvdata);
  66. CS_LOCK(drvdata->base);
  67. }
  68. static void tmc_etf_enable_hw(struct tmc_drvdata *drvdata)
  69. {
  70. CS_UNLOCK(drvdata->base);
  71. /* Wait for TMCSReady bit to be set */
  72. tmc_wait_for_tmcready(drvdata);
  73. writel_relaxed(TMC_MODE_HARDWARE_FIFO, drvdata->base + TMC_MODE);
  74. writel_relaxed(TMC_FFCR_EN_FMT | TMC_FFCR_EN_TI,
  75. drvdata->base + TMC_FFCR);
  76. writel_relaxed(0x0, drvdata->base + TMC_BUFWM);
  77. tmc_enable_hw(drvdata);
  78. CS_LOCK(drvdata->base);
  79. }
  80. static void tmc_etf_disable_hw(struct tmc_drvdata *drvdata)
  81. {
  82. CS_UNLOCK(drvdata->base);
  83. tmc_flush_and_stop(drvdata);
  84. tmc_disable_hw(drvdata);
  85. CS_LOCK(drvdata->base);
  86. }
  87. static int tmc_enable_etf_sink_sysfs(struct coresight_device *csdev)
  88. {
  89. int ret = 0;
  90. bool used = false;
  91. char *buf = NULL;
  92. unsigned long flags;
  93. struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
  94. /*
  95. * If we don't have a buffer release the lock and allocate memory.
  96. * Otherwise keep the lock and move along.
  97. */
  98. spin_lock_irqsave(&drvdata->spinlock, flags);
  99. if (!drvdata->buf) {
  100. spin_unlock_irqrestore(&drvdata->spinlock, flags);
  101. /* Allocating the memory here while outside of the spinlock */
  102. buf = kzalloc(drvdata->size, GFP_KERNEL);
  103. if (!buf)
  104. return -ENOMEM;
  105. /* Let's try again */
  106. spin_lock_irqsave(&drvdata->spinlock, flags);
  107. }
  108. if (drvdata->reading) {
  109. ret = -EBUSY;
  110. goto out;
  111. }
  112. /*
  113. * In sysFS mode we can have multiple writers per sink. Since this
  114. * sink is already enabled no memory is needed and the HW need not be
  115. * touched.
  116. */
  117. if (drvdata->mode == CS_MODE_SYSFS)
  118. goto out;
  119. /*
  120. * If drvdata::buf isn't NULL, memory was allocated for a previous
  121. * trace run but wasn't read. If so simply zero-out the memory.
  122. * Otherwise use the memory allocated above.
  123. *
  124. * The memory is freed when users read the buffer using the
  125. * /dev/xyz.{etf|etb} interface. See tmc_read_unprepare_etf() for
  126. * details.
  127. */
  128. if (drvdata->buf) {
  129. memset(drvdata->buf, 0, drvdata->size);
  130. } else {
  131. used = true;
  132. drvdata->buf = buf;
  133. }
  134. drvdata->mode = CS_MODE_SYSFS;
  135. tmc_etb_enable_hw(drvdata);
  136. out:
  137. spin_unlock_irqrestore(&drvdata->spinlock, flags);
  138. /* Free memory outside the spinlock if need be */
  139. if (!used)
  140. kfree(buf);
  141. if (!ret)
  142. dev_info(drvdata->dev, "TMC-ETB/ETF enabled\n");
  143. return ret;
  144. }
  145. static int tmc_enable_etf_sink_perf(struct coresight_device *csdev)
  146. {
  147. int ret = 0;
  148. unsigned long flags;
  149. struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
  150. spin_lock_irqsave(&drvdata->spinlock, flags);
  151. if (drvdata->reading) {
  152. ret = -EINVAL;
  153. goto out;
  154. }
  155. /*
  156. * In Perf mode there can be only one writer per sink. There
  157. * is also no need to continue if the ETB/ETR is already operated
  158. * from sysFS.
  159. */
  160. if (drvdata->mode != CS_MODE_DISABLED) {
  161. ret = -EINVAL;
  162. goto out;
  163. }
  164. drvdata->mode = CS_MODE_PERF;
  165. tmc_etb_enable_hw(drvdata);
  166. out:
  167. spin_unlock_irqrestore(&drvdata->spinlock, flags);
  168. return ret;
  169. }
  170. static int tmc_enable_etf_sink(struct coresight_device *csdev, u32 mode)
  171. {
  172. switch (mode) {
  173. case CS_MODE_SYSFS:
  174. return tmc_enable_etf_sink_sysfs(csdev);
  175. case CS_MODE_PERF:
  176. return tmc_enable_etf_sink_perf(csdev);
  177. }
  178. /* We shouldn't be here */
  179. return -EINVAL;
  180. }
  181. static void tmc_disable_etf_sink(struct coresight_device *csdev)
  182. {
  183. unsigned long flags;
  184. struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
  185. spin_lock_irqsave(&drvdata->spinlock, flags);
  186. if (drvdata->reading) {
  187. spin_unlock_irqrestore(&drvdata->spinlock, flags);
  188. return;
  189. }
  190. /* Disable the TMC only if it needs to */
  191. if (drvdata->mode != CS_MODE_DISABLED) {
  192. tmc_etb_disable_hw(drvdata);
  193. drvdata->mode = CS_MODE_DISABLED;
  194. }
  195. spin_unlock_irqrestore(&drvdata->spinlock, flags);
  196. dev_info(drvdata->dev, "TMC-ETB/ETF disabled\n");
  197. }
  198. static int tmc_enable_etf_link(struct coresight_device *csdev,
  199. int inport, int outport)
  200. {
  201. unsigned long flags;
  202. struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
  203. spin_lock_irqsave(&drvdata->spinlock, flags);
  204. if (drvdata->reading) {
  205. spin_unlock_irqrestore(&drvdata->spinlock, flags);
  206. return -EBUSY;
  207. }
  208. tmc_etf_enable_hw(drvdata);
  209. drvdata->mode = CS_MODE_SYSFS;
  210. spin_unlock_irqrestore(&drvdata->spinlock, flags);
  211. dev_info(drvdata->dev, "TMC-ETF enabled\n");
  212. return 0;
  213. }
  214. static void tmc_disable_etf_link(struct coresight_device *csdev,
  215. int inport, int outport)
  216. {
  217. unsigned long flags;
  218. struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
  219. spin_lock_irqsave(&drvdata->spinlock, flags);
  220. if (drvdata->reading) {
  221. spin_unlock_irqrestore(&drvdata->spinlock, flags);
  222. return;
  223. }
  224. tmc_etf_disable_hw(drvdata);
  225. drvdata->mode = CS_MODE_DISABLED;
  226. spin_unlock_irqrestore(&drvdata->spinlock, flags);
  227. dev_info(drvdata->dev, "TMC disabled\n");
  228. }
  229. static void *tmc_alloc_etf_buffer(struct coresight_device *csdev, int cpu,
  230. void **pages, int nr_pages, bool overwrite)
  231. {
  232. int node;
  233. struct cs_buffers *buf;
  234. if (cpu == -1)
  235. cpu = smp_processor_id();
  236. node = cpu_to_node(cpu);
  237. /* Allocate memory structure for interaction with Perf */
  238. buf = kzalloc_node(sizeof(struct cs_buffers), GFP_KERNEL, node);
  239. if (!buf)
  240. return NULL;
  241. buf->snapshot = overwrite;
  242. buf->nr_pages = nr_pages;
  243. buf->data_pages = pages;
  244. return buf;
  245. }
  246. static void tmc_free_etf_buffer(void *config)
  247. {
  248. struct cs_buffers *buf = config;
  249. kfree(buf);
  250. }
  251. static int tmc_set_etf_buffer(struct coresight_device *csdev,
  252. struct perf_output_handle *handle,
  253. void *sink_config)
  254. {
  255. int ret = 0;
  256. unsigned long head;
  257. struct cs_buffers *buf = sink_config;
  258. /* wrap head around to the amount of space we have */
  259. head = handle->head & ((buf->nr_pages << PAGE_SHIFT) - 1);
  260. /* find the page to write to */
  261. buf->cur = head / PAGE_SIZE;
  262. /* and offset within that page */
  263. buf->offset = head % PAGE_SIZE;
  264. local_set(&buf->data_size, 0);
  265. return ret;
  266. }
  267. static unsigned long tmc_reset_etf_buffer(struct coresight_device *csdev,
  268. struct perf_output_handle *handle,
  269. void *sink_config)
  270. {
  271. long size = 0;
  272. struct cs_buffers *buf = sink_config;
  273. if (buf) {
  274. /*
  275. * In snapshot mode ->data_size holds the new address of the
  276. * ring buffer's head. The size itself is the whole address
  277. * range since we want the latest information.
  278. */
  279. if (buf->snapshot)
  280. handle->head = local_xchg(&buf->data_size,
  281. buf->nr_pages << PAGE_SHIFT);
  282. /*
  283. * Tell the tracer PMU how much we got in this run and if
  284. * something went wrong along the way. Nobody else can use
  285. * this cs_buffers instance until we are done. As such
  286. * resetting parameters here and squaring off with the ring
  287. * buffer API in the tracer PMU is fine.
  288. */
  289. size = local_xchg(&buf->data_size, 0);
  290. }
  291. return size;
  292. }
  293. static void tmc_update_etf_buffer(struct coresight_device *csdev,
  294. struct perf_output_handle *handle,
  295. void *sink_config)
  296. {
  297. int i, cur;
  298. u32 *buf_ptr;
  299. u32 read_ptr, write_ptr;
  300. u32 status, to_read;
  301. unsigned long offset;
  302. struct cs_buffers *buf = sink_config;
  303. struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
  304. if (!buf)
  305. return;
  306. /* This shouldn't happen */
  307. if (WARN_ON_ONCE(drvdata->mode != CS_MODE_PERF))
  308. return;
  309. CS_UNLOCK(drvdata->base);
  310. tmc_flush_and_stop(drvdata);
  311. read_ptr = readl_relaxed(drvdata->base + TMC_RRP);
  312. write_ptr = readl_relaxed(drvdata->base + TMC_RWP);
  313. /*
  314. * Get a hold of the status register and see if a wrap around
  315. * has occurred. If so adjust things accordingly.
  316. */
  317. status = readl_relaxed(drvdata->base + TMC_STS);
  318. if (status & TMC_STS_FULL) {
  319. perf_aux_output_flag(handle, PERF_AUX_FLAG_TRUNCATED);
  320. to_read = drvdata->size;
  321. } else {
  322. to_read = CIRC_CNT(write_ptr, read_ptr, drvdata->size);
  323. }
  324. /*
  325. * The TMC RAM buffer may be bigger than the space available in the
  326. * perf ring buffer (handle->size). If so advance the RRP so that we
  327. * get the latest trace data.
  328. */
  329. if (to_read > handle->size) {
  330. u32 mask = 0;
  331. /*
  332. * The value written to RRP must be byte-address aligned to
  333. * the width of the trace memory databus _and_ to a frame
  334. * boundary (16 byte), whichever is the biggest. For example,
  335. * for 32-bit, 64-bit and 128-bit wide trace memory, the four
  336. * LSBs must be 0s. For 256-bit wide trace memory, the five
  337. * LSBs must be 0s.
  338. */
  339. switch (drvdata->memwidth) {
  340. case TMC_MEM_INTF_WIDTH_32BITS:
  341. case TMC_MEM_INTF_WIDTH_64BITS:
  342. case TMC_MEM_INTF_WIDTH_128BITS:
  343. mask = GENMASK(31, 5);
  344. break;
  345. case TMC_MEM_INTF_WIDTH_256BITS:
  346. mask = GENMASK(31, 6);
  347. break;
  348. }
  349. /*
  350. * Make sure the new size is aligned in accordance with the
  351. * requirement explained above.
  352. */
  353. to_read = handle->size & mask;
  354. /* Move the RAM read pointer up */
  355. read_ptr = (write_ptr + drvdata->size) - to_read;
  356. /* Make sure we are still within our limits */
  357. if (read_ptr > (drvdata->size - 1))
  358. read_ptr -= drvdata->size;
  359. /* Tell the HW */
  360. writel_relaxed(read_ptr, drvdata->base + TMC_RRP);
  361. perf_aux_output_flag(handle, PERF_AUX_FLAG_TRUNCATED);
  362. }
  363. cur = buf->cur;
  364. offset = buf->offset;
  365. /* for every byte to read */
  366. for (i = 0; i < to_read; i += 4) {
  367. buf_ptr = buf->data_pages[cur] + offset;
  368. *buf_ptr = readl_relaxed(drvdata->base + TMC_RRD);
  369. offset += 4;
  370. if (offset >= PAGE_SIZE) {
  371. offset = 0;
  372. cur++;
  373. /* wrap around at the end of the buffer */
  374. cur &= buf->nr_pages - 1;
  375. }
  376. }
  377. /*
  378. * In snapshot mode all we have to do is communicate to
  379. * perf_aux_output_end() the address of the current head. In full
  380. * trace mode the same function expects a size to move rb->aux_head
  381. * forward.
  382. */
  383. if (buf->snapshot)
  384. local_set(&buf->data_size, (cur * PAGE_SIZE) + offset);
  385. else
  386. local_add(to_read, &buf->data_size);
  387. CS_LOCK(drvdata->base);
  388. }
  389. static const struct coresight_ops_sink tmc_etf_sink_ops = {
  390. .enable = tmc_enable_etf_sink,
  391. .disable = tmc_disable_etf_sink,
  392. .alloc_buffer = tmc_alloc_etf_buffer,
  393. .free_buffer = tmc_free_etf_buffer,
  394. .set_buffer = tmc_set_etf_buffer,
  395. .reset_buffer = tmc_reset_etf_buffer,
  396. .update_buffer = tmc_update_etf_buffer,
  397. };
  398. static const struct coresight_ops_link tmc_etf_link_ops = {
  399. .enable = tmc_enable_etf_link,
  400. .disable = tmc_disable_etf_link,
  401. };
  402. const struct coresight_ops tmc_etb_cs_ops = {
  403. .sink_ops = &tmc_etf_sink_ops,
  404. };
  405. const struct coresight_ops tmc_etf_cs_ops = {
  406. .sink_ops = &tmc_etf_sink_ops,
  407. .link_ops = &tmc_etf_link_ops,
  408. };
  409. int tmc_read_prepare_etb(struct tmc_drvdata *drvdata)
  410. {
  411. enum tmc_mode mode;
  412. int ret = 0;
  413. unsigned long flags;
  414. /* config types are set a boot time and never change */
  415. if (WARN_ON_ONCE(drvdata->config_type != TMC_CONFIG_TYPE_ETB &&
  416. drvdata->config_type != TMC_CONFIG_TYPE_ETF))
  417. return -EINVAL;
  418. spin_lock_irqsave(&drvdata->spinlock, flags);
  419. if (drvdata->reading) {
  420. ret = -EBUSY;
  421. goto out;
  422. }
  423. /* There is no point in reading a TMC in HW FIFO mode */
  424. mode = readl_relaxed(drvdata->base + TMC_MODE);
  425. if (mode != TMC_MODE_CIRCULAR_BUFFER) {
  426. ret = -EINVAL;
  427. goto out;
  428. }
  429. /* Don't interfere if operated from Perf */
  430. if (drvdata->mode == CS_MODE_PERF) {
  431. ret = -EINVAL;
  432. goto out;
  433. }
  434. /* If drvdata::buf is NULL the trace data has been read already */
  435. if (drvdata->buf == NULL) {
  436. ret = -EINVAL;
  437. goto out;
  438. }
  439. /* Disable the TMC if need be */
  440. if (drvdata->mode == CS_MODE_SYSFS)
  441. tmc_etb_disable_hw(drvdata);
  442. drvdata->reading = true;
  443. out:
  444. spin_unlock_irqrestore(&drvdata->spinlock, flags);
  445. return ret;
  446. }
  447. int tmc_read_unprepare_etb(struct tmc_drvdata *drvdata)
  448. {
  449. char *buf = NULL;
  450. enum tmc_mode mode;
  451. unsigned long flags;
  452. /* config types are set a boot time and never change */
  453. if (WARN_ON_ONCE(drvdata->config_type != TMC_CONFIG_TYPE_ETB &&
  454. drvdata->config_type != TMC_CONFIG_TYPE_ETF))
  455. return -EINVAL;
  456. spin_lock_irqsave(&drvdata->spinlock, flags);
  457. /* There is no point in reading a TMC in HW FIFO mode */
  458. mode = readl_relaxed(drvdata->base + TMC_MODE);
  459. if (mode != TMC_MODE_CIRCULAR_BUFFER) {
  460. spin_unlock_irqrestore(&drvdata->spinlock, flags);
  461. return -EINVAL;
  462. }
  463. /* Re-enable the TMC if need be */
  464. if (drvdata->mode == CS_MODE_SYSFS) {
  465. /*
  466. * The trace run will continue with the same allocated trace
  467. * buffer. As such zero-out the buffer so that we don't end
  468. * up with stale data.
  469. *
  470. * Since the tracer is still enabled drvdata::buf
  471. * can't be NULL.
  472. */
  473. memset(drvdata->buf, 0, drvdata->size);
  474. tmc_etb_enable_hw(drvdata);
  475. } else {
  476. /*
  477. * The ETB/ETF is not tracing and the buffer was just read.
  478. * As such prepare to free the trace buffer.
  479. */
  480. buf = drvdata->buf;
  481. drvdata->buf = NULL;
  482. }
  483. drvdata->reading = false;
  484. spin_unlock_irqrestore(&drvdata->spinlock, flags);
  485. /*
  486. * Free allocated memory outside of the spinlock. There is no need
  487. * to assert the validity of 'buf' since calling kfree(NULL) is safe.
  488. */
  489. kfree(buf);
  490. return 0;
  491. }