coresight-tmc-etr.c 8.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344
  1. /*
  2. * Copyright(C) 2016 Linaro Limited. All rights reserved.
  3. * Author: Mathieu Poirier <mathieu.poirier@linaro.org>
  4. *
  5. * This program is free software; you can redistribute it and/or modify it
  6. * under the terms of the GNU General Public License version 2 as published by
  7. * the Free Software Foundation.
  8. *
  9. * This program is distributed in the hope that it will be useful, but WITHOUT
  10. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  12. * more details.
  13. *
  14. * You should have received a copy of the GNU General Public License along with
  15. * this program. If not, see <http://www.gnu.org/licenses/>.
  16. */
  17. #include <linux/coresight.h>
  18. #include <linux/dma-mapping.h>
  19. #include "coresight-priv.h"
  20. #include "coresight-tmc.h"
  21. static void tmc_etr_enable_hw(struct tmc_drvdata *drvdata)
  22. {
  23. u32 axictl, sts;
  24. /* Zero out the memory to help with debug */
  25. memset(drvdata->vaddr, 0, drvdata->size);
  26. CS_UNLOCK(drvdata->base);
  27. /* Wait for TMCSReady bit to be set */
  28. tmc_wait_for_tmcready(drvdata);
  29. writel_relaxed(drvdata->size / 4, drvdata->base + TMC_RSZ);
  30. writel_relaxed(TMC_MODE_CIRCULAR_BUFFER, drvdata->base + TMC_MODE);
  31. axictl = readl_relaxed(drvdata->base + TMC_AXICTL);
  32. axictl &= ~TMC_AXICTL_CLEAR_MASK;
  33. axictl |= (TMC_AXICTL_PROT_CTL_B1 | TMC_AXICTL_WR_BURST_16);
  34. axictl |= TMC_AXICTL_AXCACHE_OS;
  35. if (tmc_etr_has_cap(drvdata, TMC_ETR_AXI_ARCACHE)) {
  36. axictl &= ~TMC_AXICTL_ARCACHE_MASK;
  37. axictl |= TMC_AXICTL_ARCACHE_OS;
  38. }
  39. writel_relaxed(axictl, drvdata->base + TMC_AXICTL);
  40. tmc_write_dba(drvdata, drvdata->paddr);
  41. /*
  42. * If the TMC pointers must be programmed before the session,
  43. * we have to set it properly (i.e, RRP/RWP to base address and
  44. * STS to "not full").
  45. */
  46. if (tmc_etr_has_cap(drvdata, TMC_ETR_SAVE_RESTORE)) {
  47. tmc_write_rrp(drvdata, drvdata->paddr);
  48. tmc_write_rwp(drvdata, drvdata->paddr);
  49. sts = readl_relaxed(drvdata->base + TMC_STS) & ~TMC_STS_FULL;
  50. writel_relaxed(sts, drvdata->base + TMC_STS);
  51. }
  52. writel_relaxed(TMC_FFCR_EN_FMT | TMC_FFCR_EN_TI |
  53. TMC_FFCR_FON_FLIN | TMC_FFCR_FON_TRIG_EVT |
  54. TMC_FFCR_TRIGON_TRIGIN,
  55. drvdata->base + TMC_FFCR);
  56. writel_relaxed(drvdata->trigger_cntr, drvdata->base + TMC_TRG);
  57. tmc_enable_hw(drvdata);
  58. CS_LOCK(drvdata->base);
  59. }
  60. static void tmc_etr_dump_hw(struct tmc_drvdata *drvdata)
  61. {
  62. const u32 *barrier;
  63. u32 val;
  64. u32 *temp;
  65. u64 rwp;
  66. rwp = tmc_read_rwp(drvdata);
  67. val = readl_relaxed(drvdata->base + TMC_STS);
  68. /*
  69. * Adjust the buffer to point to the beginning of the trace data
  70. * and update the available trace data.
  71. */
  72. if (val & TMC_STS_FULL) {
  73. drvdata->buf = drvdata->vaddr + rwp - drvdata->paddr;
  74. drvdata->len = drvdata->size;
  75. barrier = barrier_pkt;
  76. temp = (u32 *)drvdata->buf;
  77. while (*barrier) {
  78. *temp = *barrier;
  79. temp++;
  80. barrier++;
  81. }
  82. } else {
  83. drvdata->buf = drvdata->vaddr;
  84. drvdata->len = rwp - drvdata->paddr;
  85. }
  86. }
  87. static void tmc_etr_disable_hw(struct tmc_drvdata *drvdata)
  88. {
  89. CS_UNLOCK(drvdata->base);
  90. tmc_flush_and_stop(drvdata);
  91. /*
  92. * When operating in sysFS mode the content of the buffer needs to be
  93. * read before the TMC is disabled.
  94. */
  95. if (drvdata->mode == CS_MODE_SYSFS)
  96. tmc_etr_dump_hw(drvdata);
  97. tmc_disable_hw(drvdata);
  98. CS_LOCK(drvdata->base);
  99. }
  100. static int tmc_enable_etr_sink_sysfs(struct coresight_device *csdev)
  101. {
  102. int ret = 0;
  103. bool used = false;
  104. unsigned long flags;
  105. void __iomem *vaddr = NULL;
  106. dma_addr_t paddr;
  107. struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
  108. /*
  109. * If we don't have a buffer release the lock and allocate memory.
  110. * Otherwise keep the lock and move along.
  111. */
  112. spin_lock_irqsave(&drvdata->spinlock, flags);
  113. if (!drvdata->vaddr) {
  114. spin_unlock_irqrestore(&drvdata->spinlock, flags);
  115. /*
  116. * Contiguous memory can't be allocated while a spinlock is
  117. * held. As such allocate memory here and free it if a buffer
  118. * has already been allocated (from a previous session).
  119. */
  120. vaddr = dma_alloc_coherent(drvdata->dev, drvdata->size,
  121. &paddr, GFP_KERNEL);
  122. if (!vaddr)
  123. return -ENOMEM;
  124. /* Let's try again */
  125. spin_lock_irqsave(&drvdata->spinlock, flags);
  126. }
  127. if (drvdata->reading) {
  128. ret = -EBUSY;
  129. goto out;
  130. }
  131. /*
  132. * In sysFS mode we can have multiple writers per sink. Since this
  133. * sink is already enabled no memory is needed and the HW need not be
  134. * touched.
  135. */
  136. if (drvdata->mode == CS_MODE_SYSFS)
  137. goto out;
  138. /*
  139. * If drvdata::buf == NULL, use the memory allocated above.
  140. * Otherwise a buffer still exists from a previous session, so
  141. * simply use that.
  142. */
  143. if (drvdata->buf == NULL) {
  144. used = true;
  145. drvdata->vaddr = vaddr;
  146. drvdata->paddr = paddr;
  147. drvdata->buf = drvdata->vaddr;
  148. }
  149. drvdata->mode = CS_MODE_SYSFS;
  150. tmc_etr_enable_hw(drvdata);
  151. out:
  152. spin_unlock_irqrestore(&drvdata->spinlock, flags);
  153. /* Free memory outside the spinlock if need be */
  154. if (!used && vaddr)
  155. dma_free_coherent(drvdata->dev, drvdata->size, vaddr, paddr);
  156. if (!ret)
  157. dev_info(drvdata->dev, "TMC-ETR enabled\n");
  158. return ret;
  159. }
  160. static int tmc_enable_etr_sink_perf(struct coresight_device *csdev)
  161. {
  162. int ret = 0;
  163. unsigned long flags;
  164. struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
  165. spin_lock_irqsave(&drvdata->spinlock, flags);
  166. if (drvdata->reading) {
  167. ret = -EINVAL;
  168. goto out;
  169. }
  170. /*
  171. * In Perf mode there can be only one writer per sink. There
  172. * is also no need to continue if the ETR is already operated
  173. * from sysFS.
  174. */
  175. if (drvdata->mode != CS_MODE_DISABLED) {
  176. ret = -EINVAL;
  177. goto out;
  178. }
  179. drvdata->mode = CS_MODE_PERF;
  180. tmc_etr_enable_hw(drvdata);
  181. out:
  182. spin_unlock_irqrestore(&drvdata->spinlock, flags);
  183. return ret;
  184. }
  185. static int tmc_enable_etr_sink(struct coresight_device *csdev, u32 mode)
  186. {
  187. switch (mode) {
  188. case CS_MODE_SYSFS:
  189. return tmc_enable_etr_sink_sysfs(csdev);
  190. case CS_MODE_PERF:
  191. return tmc_enable_etr_sink_perf(csdev);
  192. }
  193. /* We shouldn't be here */
  194. return -EINVAL;
  195. }
  196. static void tmc_disable_etr_sink(struct coresight_device *csdev)
  197. {
  198. unsigned long flags;
  199. struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
  200. spin_lock_irqsave(&drvdata->spinlock, flags);
  201. if (drvdata->reading) {
  202. spin_unlock_irqrestore(&drvdata->spinlock, flags);
  203. return;
  204. }
  205. /* Disable the TMC only if it needs to */
  206. if (drvdata->mode != CS_MODE_DISABLED) {
  207. tmc_etr_disable_hw(drvdata);
  208. drvdata->mode = CS_MODE_DISABLED;
  209. }
  210. spin_unlock_irqrestore(&drvdata->spinlock, flags);
  211. dev_info(drvdata->dev, "TMC-ETR disabled\n");
  212. }
  213. static const struct coresight_ops_sink tmc_etr_sink_ops = {
  214. .enable = tmc_enable_etr_sink,
  215. .disable = tmc_disable_etr_sink,
  216. };
  217. const struct coresight_ops tmc_etr_cs_ops = {
  218. .sink_ops = &tmc_etr_sink_ops,
  219. };
  220. int tmc_read_prepare_etr(struct tmc_drvdata *drvdata)
  221. {
  222. int ret = 0;
  223. unsigned long flags;
  224. /* config types are set a boot time and never change */
  225. if (WARN_ON_ONCE(drvdata->config_type != TMC_CONFIG_TYPE_ETR))
  226. return -EINVAL;
  227. spin_lock_irqsave(&drvdata->spinlock, flags);
  228. if (drvdata->reading) {
  229. ret = -EBUSY;
  230. goto out;
  231. }
  232. /* Don't interfere if operated from Perf */
  233. if (drvdata->mode == CS_MODE_PERF) {
  234. ret = -EINVAL;
  235. goto out;
  236. }
  237. /* If drvdata::buf is NULL the trace data has been read already */
  238. if (drvdata->buf == NULL) {
  239. ret = -EINVAL;
  240. goto out;
  241. }
  242. /* Disable the TMC if need be */
  243. if (drvdata->mode == CS_MODE_SYSFS)
  244. tmc_etr_disable_hw(drvdata);
  245. drvdata->reading = true;
  246. out:
  247. spin_unlock_irqrestore(&drvdata->spinlock, flags);
  248. return ret;
  249. }
  250. int tmc_read_unprepare_etr(struct tmc_drvdata *drvdata)
  251. {
  252. unsigned long flags;
  253. dma_addr_t paddr;
  254. void __iomem *vaddr = NULL;
  255. /* config types are set a boot time and never change */
  256. if (WARN_ON_ONCE(drvdata->config_type != TMC_CONFIG_TYPE_ETR))
  257. return -EINVAL;
  258. spin_lock_irqsave(&drvdata->spinlock, flags);
  259. /* RE-enable the TMC if need be */
  260. if (drvdata->mode == CS_MODE_SYSFS) {
  261. /*
  262. * The trace run will continue with the same allocated trace
  263. * buffer. The trace buffer is cleared in tmc_etr_enable_hw(),
  264. * so we don't have to explicitly clear it. Also, since the
  265. * tracer is still enabled drvdata::buf can't be NULL.
  266. */
  267. tmc_etr_enable_hw(drvdata);
  268. } else {
  269. /*
  270. * The ETR is not tracing and the buffer was just read.
  271. * As such prepare to free the trace buffer.
  272. */
  273. vaddr = drvdata->vaddr;
  274. paddr = drvdata->paddr;
  275. drvdata->buf = drvdata->vaddr = NULL;
  276. }
  277. drvdata->reading = false;
  278. spin_unlock_irqrestore(&drvdata->spinlock, flags);
  279. /* Free allocated memory out side of the spinlock */
  280. if (vaddr)
  281. dma_free_coherent(drvdata->dev, drvdata->size, vaddr, paddr);
  282. return 0;
  283. }