coresight-tmc-etr.c 8.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319
  1. /*
  2. * Copyright(C) 2016 Linaro Limited. All rights reserved.
  3. * Author: Mathieu Poirier <mathieu.poirier@linaro.org>
  4. *
  5. * This program is free software; you can redistribute it and/or modify it
  6. * under the terms of the GNU General Public License version 2 as published by
  7. * the Free Software Foundation.
  8. *
  9. * This program is distributed in the hope that it will be useful, but WITHOUT
  10. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  12. * more details.
  13. *
  14. * You should have received a copy of the GNU General Public License along with
  15. * this program. If not, see <http://www.gnu.org/licenses/>.
  16. */
  17. #include <linux/coresight.h>
  18. #include <linux/dma-mapping.h>
  19. #include "coresight-priv.h"
  20. #include "coresight-tmc.h"
  21. static void tmc_etr_enable_hw(struct tmc_drvdata *drvdata)
  22. {
  23. u32 axictl;
  24. /* Zero out the memory to help with debug */
  25. memset(drvdata->vaddr, 0, drvdata->size);
  26. CS_UNLOCK(drvdata->base);
  27. /* Wait for TMCSReady bit to be set */
  28. tmc_wait_for_tmcready(drvdata);
  29. writel_relaxed(drvdata->size / 4, drvdata->base + TMC_RSZ);
  30. writel_relaxed(TMC_MODE_CIRCULAR_BUFFER, drvdata->base + TMC_MODE);
  31. axictl = readl_relaxed(drvdata->base + TMC_AXICTL);
  32. axictl |= TMC_AXICTL_WR_BURST_16;
  33. writel_relaxed(axictl, drvdata->base + TMC_AXICTL);
  34. axictl &= ~TMC_AXICTL_SCT_GAT_MODE;
  35. writel_relaxed(axictl, drvdata->base + TMC_AXICTL);
  36. axictl = (axictl &
  37. ~(TMC_AXICTL_PROT_CTL_B0 | TMC_AXICTL_PROT_CTL_B1)) |
  38. TMC_AXICTL_PROT_CTL_B1;
  39. writel_relaxed(axictl, drvdata->base + TMC_AXICTL);
  40. writel_relaxed(drvdata->paddr, drvdata->base + TMC_DBALO);
  41. writel_relaxed(0x0, drvdata->base + TMC_DBAHI);
  42. writel_relaxed(TMC_FFCR_EN_FMT | TMC_FFCR_EN_TI |
  43. TMC_FFCR_FON_FLIN | TMC_FFCR_FON_TRIG_EVT |
  44. TMC_FFCR_TRIGON_TRIGIN,
  45. drvdata->base + TMC_FFCR);
  46. writel_relaxed(drvdata->trigger_cntr, drvdata->base + TMC_TRG);
  47. tmc_enable_hw(drvdata);
  48. CS_LOCK(drvdata->base);
  49. }
  50. static void tmc_etr_dump_hw(struct tmc_drvdata *drvdata)
  51. {
  52. u32 rwp, val;
  53. rwp = readl_relaxed(drvdata->base + TMC_RWP);
  54. val = readl_relaxed(drvdata->base + TMC_STS);
  55. /*
  56. * Adjust the buffer to point to the beginning of the trace data
  57. * and update the available trace data.
  58. */
  59. if (val & TMC_STS_FULL) {
  60. drvdata->buf = drvdata->vaddr + rwp - drvdata->paddr;
  61. drvdata->len = drvdata->size;
  62. } else {
  63. drvdata->buf = drvdata->vaddr;
  64. drvdata->len = rwp - drvdata->paddr;
  65. }
  66. }
  67. static void tmc_etr_disable_hw(struct tmc_drvdata *drvdata)
  68. {
  69. CS_UNLOCK(drvdata->base);
  70. tmc_flush_and_stop(drvdata);
  71. /*
  72. * When operating in sysFS mode the content of the buffer needs to be
  73. * read before the TMC is disabled.
  74. */
  75. if (drvdata->mode == CS_MODE_SYSFS)
  76. tmc_etr_dump_hw(drvdata);
  77. tmc_disable_hw(drvdata);
  78. CS_LOCK(drvdata->base);
  79. }
  80. static int tmc_enable_etr_sink_sysfs(struct coresight_device *csdev)
  81. {
  82. int ret = 0;
  83. bool used = false;
  84. unsigned long flags;
  85. void __iomem *vaddr = NULL;
  86. dma_addr_t paddr;
  87. struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
  88. /*
  89. * If we don't have a buffer release the lock and allocate memory.
  90. * Otherwise keep the lock and move along.
  91. */
  92. spin_lock_irqsave(&drvdata->spinlock, flags);
  93. if (!drvdata->vaddr) {
  94. spin_unlock_irqrestore(&drvdata->spinlock, flags);
  95. /*
  96. * Contiguous memory can't be allocated while a spinlock is
  97. * held. As such allocate memory here and free it if a buffer
  98. * has already been allocated (from a previous session).
  99. */
  100. vaddr = dma_alloc_coherent(drvdata->dev, drvdata->size,
  101. &paddr, GFP_KERNEL);
  102. if (!vaddr)
  103. return -ENOMEM;
  104. /* Let's try again */
  105. spin_lock_irqsave(&drvdata->spinlock, flags);
  106. }
  107. if (drvdata->reading) {
  108. ret = -EBUSY;
  109. goto out;
  110. }
  111. /*
  112. * In sysFS mode we can have multiple writers per sink. Since this
  113. * sink is already enabled no memory is needed and the HW need not be
  114. * touched.
  115. */
  116. if (drvdata->mode == CS_MODE_SYSFS)
  117. goto out;
  118. /*
  119. * If drvdata::buf == NULL, use the memory allocated above.
  120. * Otherwise a buffer still exists from a previous session, so
  121. * simply use that.
  122. */
  123. if (drvdata->buf == NULL) {
  124. used = true;
  125. drvdata->vaddr = vaddr;
  126. drvdata->paddr = paddr;
  127. drvdata->buf = drvdata->vaddr;
  128. }
  129. drvdata->mode = CS_MODE_SYSFS;
  130. tmc_etr_enable_hw(drvdata);
  131. out:
  132. spin_unlock_irqrestore(&drvdata->spinlock, flags);
  133. /* Free memory outside the spinlock if need be */
  134. if (!used && vaddr)
  135. dma_free_coherent(drvdata->dev, drvdata->size, vaddr, paddr);
  136. if (!ret)
  137. dev_info(drvdata->dev, "TMC-ETR enabled\n");
  138. return ret;
  139. }
  140. static int tmc_enable_etr_sink_perf(struct coresight_device *csdev)
  141. {
  142. int ret = 0;
  143. unsigned long flags;
  144. struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
  145. spin_lock_irqsave(&drvdata->spinlock, flags);
  146. if (drvdata->reading) {
  147. ret = -EINVAL;
  148. goto out;
  149. }
  150. /*
  151. * In Perf mode there can be only one writer per sink. There
  152. * is also no need to continue if the ETR is already operated
  153. * from sysFS.
  154. */
  155. if (drvdata->mode != CS_MODE_DISABLED) {
  156. ret = -EINVAL;
  157. goto out;
  158. }
  159. drvdata->mode = CS_MODE_PERF;
  160. tmc_etr_enable_hw(drvdata);
  161. out:
  162. spin_unlock_irqrestore(&drvdata->spinlock, flags);
  163. return ret;
  164. }
  165. static int tmc_enable_etr_sink(struct coresight_device *csdev, u32 mode)
  166. {
  167. switch (mode) {
  168. case CS_MODE_SYSFS:
  169. return tmc_enable_etr_sink_sysfs(csdev);
  170. case CS_MODE_PERF:
  171. return tmc_enable_etr_sink_perf(csdev);
  172. }
  173. /* We shouldn't be here */
  174. return -EINVAL;
  175. }
  176. static void tmc_disable_etr_sink(struct coresight_device *csdev)
  177. {
  178. unsigned long flags;
  179. struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
  180. spin_lock_irqsave(&drvdata->spinlock, flags);
  181. if (drvdata->reading) {
  182. spin_unlock_irqrestore(&drvdata->spinlock, flags);
  183. return;
  184. }
  185. /* Disable the TMC only if it needs to */
  186. if (drvdata->mode != CS_MODE_DISABLED) {
  187. tmc_etr_disable_hw(drvdata);
  188. drvdata->mode = CS_MODE_DISABLED;
  189. }
  190. spin_unlock_irqrestore(&drvdata->spinlock, flags);
  191. dev_info(drvdata->dev, "TMC-ETR disabled\n");
  192. }
  193. static const struct coresight_ops_sink tmc_etr_sink_ops = {
  194. .enable = tmc_enable_etr_sink,
  195. .disable = tmc_disable_etr_sink,
  196. };
  197. const struct coresight_ops tmc_etr_cs_ops = {
  198. .sink_ops = &tmc_etr_sink_ops,
  199. };
  200. int tmc_read_prepare_etr(struct tmc_drvdata *drvdata)
  201. {
  202. int ret = 0;
  203. unsigned long flags;
  204. /* config types are set a boot time and never change */
  205. if (WARN_ON_ONCE(drvdata->config_type != TMC_CONFIG_TYPE_ETR))
  206. return -EINVAL;
  207. spin_lock_irqsave(&drvdata->spinlock, flags);
  208. if (drvdata->reading) {
  209. ret = -EBUSY;
  210. goto out;
  211. }
  212. /* Don't interfere if operated from Perf */
  213. if (drvdata->mode == CS_MODE_PERF) {
  214. ret = -EINVAL;
  215. goto out;
  216. }
  217. /* If drvdata::buf is NULL the trace data has been read already */
  218. if (drvdata->buf == NULL) {
  219. ret = -EINVAL;
  220. goto out;
  221. }
  222. /* Disable the TMC if need be */
  223. if (drvdata->mode == CS_MODE_SYSFS)
  224. tmc_etr_disable_hw(drvdata);
  225. drvdata->reading = true;
  226. out:
  227. spin_unlock_irqrestore(&drvdata->spinlock, flags);
  228. return ret;
  229. }
  230. int tmc_read_unprepare_etr(struct tmc_drvdata *drvdata)
  231. {
  232. unsigned long flags;
  233. dma_addr_t paddr;
  234. void __iomem *vaddr = NULL;
  235. /* config types are set a boot time and never change */
  236. if (WARN_ON_ONCE(drvdata->config_type != TMC_CONFIG_TYPE_ETR))
  237. return -EINVAL;
  238. spin_lock_irqsave(&drvdata->spinlock, flags);
  239. /* RE-enable the TMC if need be */
  240. if (drvdata->mode == CS_MODE_SYSFS) {
  241. /*
  242. * The trace run will continue with the same allocated trace
  243. * buffer. The trace buffer is cleared in tmc_etr_enable_hw(),
  244. * so we don't have to explicitly clear it. Also, since the
  245. * tracer is still enabled drvdata::buf can't be NULL.
  246. */
  247. tmc_etr_enable_hw(drvdata);
  248. } else {
  249. /*
  250. * The ETR is not tracing and the buffer was just read.
  251. * As such prepare to free the trace buffer.
  252. */
  253. vaddr = drvdata->vaddr;
  254. paddr = drvdata->paddr;
  255. drvdata->buf = drvdata->vaddr = NULL;
  256. }
  257. drvdata->reading = false;
  258. spin_unlock_irqrestore(&drvdata->spinlock, flags);
  259. /* Free allocated memory out side of the spinlock */
  260. if (vaddr)
  261. dma_free_coherent(drvdata->dev, drvdata->size, vaddr, paddr);
  262. return 0;
  263. }