coresight-tmc-etr.c 8.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326
  1. /*
  2. * Copyright(C) 2016 Linaro Limited. All rights reserved.
  3. * Author: Mathieu Poirier <mathieu.poirier@linaro.org>
  4. *
  5. * This program is free software; you can redistribute it and/or modify it
  6. * under the terms of the GNU General Public License version 2 as published by
  7. * the Free Software Foundation.
  8. *
  9. * This program is distributed in the hope that it will be useful, but WITHOUT
  10. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  12. * more details.
  13. *
  14. * You should have received a copy of the GNU General Public License along with
  15. * this program. If not, see <http://www.gnu.org/licenses/>.
  16. */
  17. #include <linux/coresight.h>
  18. #include <linux/dma-mapping.h>
  19. #include "coresight-priv.h"
  20. #include "coresight-tmc.h"
  21. void tmc_etr_enable_hw(struct tmc_drvdata *drvdata)
  22. {
  23. u32 axictl;
  24. /* Zero out the memory to help with debug */
  25. memset(drvdata->vaddr, 0, drvdata->size);
  26. CS_UNLOCK(drvdata->base);
  27. /* Wait for TMCSReady bit to be set */
  28. tmc_wait_for_tmcready(drvdata);
  29. writel_relaxed(drvdata->size / 4, drvdata->base + TMC_RSZ);
  30. writel_relaxed(TMC_MODE_CIRCULAR_BUFFER, drvdata->base + TMC_MODE);
  31. axictl = readl_relaxed(drvdata->base + TMC_AXICTL);
  32. axictl |= TMC_AXICTL_WR_BURST_16;
  33. writel_relaxed(axictl, drvdata->base + TMC_AXICTL);
  34. axictl &= ~TMC_AXICTL_SCT_GAT_MODE;
  35. writel_relaxed(axictl, drvdata->base + TMC_AXICTL);
  36. axictl = (axictl &
  37. ~(TMC_AXICTL_PROT_CTL_B0 | TMC_AXICTL_PROT_CTL_B1)) |
  38. TMC_AXICTL_PROT_CTL_B1;
  39. writel_relaxed(axictl, drvdata->base + TMC_AXICTL);
  40. writel_relaxed(drvdata->paddr, drvdata->base + TMC_DBALO);
  41. writel_relaxed(0x0, drvdata->base + TMC_DBAHI);
  42. writel_relaxed(TMC_FFCR_EN_FMT | TMC_FFCR_EN_TI |
  43. TMC_FFCR_FON_FLIN | TMC_FFCR_FON_TRIG_EVT |
  44. TMC_FFCR_TRIGON_TRIGIN,
  45. drvdata->base + TMC_FFCR);
  46. writel_relaxed(drvdata->trigger_cntr, drvdata->base + TMC_TRG);
  47. tmc_enable_hw(drvdata);
  48. CS_LOCK(drvdata->base);
  49. }
  50. static void tmc_etr_dump_hw(struct tmc_drvdata *drvdata)
  51. {
  52. u32 rwp, val;
  53. rwp = readl_relaxed(drvdata->base + TMC_RWP);
  54. val = readl_relaxed(drvdata->base + TMC_STS);
  55. /* How much memory do we still have */
  56. if (val & BIT(0))
  57. drvdata->buf = drvdata->vaddr + rwp - drvdata->paddr;
  58. else
  59. drvdata->buf = drvdata->vaddr;
  60. }
  61. static void tmc_etr_disable_hw(struct tmc_drvdata *drvdata)
  62. {
  63. CS_UNLOCK(drvdata->base);
  64. tmc_flush_and_stop(drvdata);
  65. /*
  66. * When operating in sysFS mode the content of the buffer needs to be
  67. * read before the TMC is disabled.
  68. */
  69. if (local_read(&drvdata->mode) == CS_MODE_SYSFS)
  70. tmc_etr_dump_hw(drvdata);
  71. tmc_disable_hw(drvdata);
  72. CS_LOCK(drvdata->base);
  73. }
  74. static int tmc_enable_etr_sink_sysfs(struct coresight_device *csdev, u32 mode)
  75. {
  76. int ret = 0;
  77. bool used = false;
  78. long val;
  79. unsigned long flags;
  80. void __iomem *vaddr = NULL;
  81. dma_addr_t paddr;
  82. struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
  83. /* This shouldn't be happening */
  84. if (WARN_ON(mode != CS_MODE_SYSFS))
  85. return -EINVAL;
  86. /*
  87. * If we don't have a buffer release the lock and allocate memory.
  88. * Otherwise keep the lock and move along.
  89. */
  90. spin_lock_irqsave(&drvdata->spinlock, flags);
  91. if (!drvdata->vaddr) {
  92. spin_unlock_irqrestore(&drvdata->spinlock, flags);
  93. /*
  94. * Contiguous memory can't be allocated while a spinlock is
  95. * held. As such allocate memory here and free it if a buffer
  96. * has already been allocated (from a previous session).
  97. */
  98. vaddr = dma_alloc_coherent(drvdata->dev, drvdata->size,
  99. &paddr, GFP_KERNEL);
  100. if (!vaddr)
  101. return -ENOMEM;
  102. /* Let's try again */
  103. spin_lock_irqsave(&drvdata->spinlock, flags);
  104. }
  105. if (drvdata->reading) {
  106. ret = -EBUSY;
  107. goto out;
  108. }
  109. val = local_xchg(&drvdata->mode, mode);
  110. /*
  111. * In sysFS mode we can have multiple writers per sink. Since this
  112. * sink is already enabled no memory is needed and the HW need not be
  113. * touched.
  114. */
  115. if (val == CS_MODE_SYSFS)
  116. goto out;
  117. /*
  118. * If drvdata::buf == NULL, use the memory allocated above.
  119. * Otherwise a buffer still exists from a previous session, so
  120. * simply use that.
  121. */
  122. if (drvdata->buf == NULL) {
  123. used = true;
  124. drvdata->vaddr = vaddr;
  125. drvdata->paddr = paddr;
  126. drvdata->buf = drvdata->vaddr;
  127. }
  128. memset(drvdata->vaddr, 0, drvdata->size);
  129. tmc_etr_enable_hw(drvdata);
  130. out:
  131. spin_unlock_irqrestore(&drvdata->spinlock, flags);
  132. /* Free memory outside the spinlock if need be */
  133. if (!used && vaddr)
  134. dma_free_coherent(drvdata->dev, drvdata->size, vaddr, paddr);
  135. if (!ret)
  136. dev_info(drvdata->dev, "TMC-ETR enabled\n");
  137. return ret;
  138. }
  139. static int tmc_enable_etr_sink_perf(struct coresight_device *csdev, u32 mode)
  140. {
  141. int ret = 0;
  142. long val;
  143. unsigned long flags;
  144. struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
  145. /* This shouldn't be happening */
  146. if (WARN_ON(mode != CS_MODE_PERF))
  147. return -EINVAL;
  148. spin_lock_irqsave(&drvdata->spinlock, flags);
  149. if (drvdata->reading) {
  150. ret = -EINVAL;
  151. goto out;
  152. }
  153. val = local_xchg(&drvdata->mode, mode);
  154. /*
  155. * In Perf mode there can be only one writer per sink. There
  156. * is also no need to continue if the ETR is already operated
  157. * from sysFS.
  158. */
  159. if (val != CS_MODE_DISABLED) {
  160. ret = -EINVAL;
  161. goto out;
  162. }
  163. tmc_etr_enable_hw(drvdata);
  164. out:
  165. spin_unlock_irqrestore(&drvdata->spinlock, flags);
  166. return ret;
  167. }
  168. static int tmc_enable_etr_sink(struct coresight_device *csdev, u32 mode)
  169. {
  170. switch (mode) {
  171. case CS_MODE_SYSFS:
  172. return tmc_enable_etr_sink_sysfs(csdev, mode);
  173. case CS_MODE_PERF:
  174. return tmc_enable_etr_sink_perf(csdev, mode);
  175. }
  176. /* We shouldn't be here */
  177. return -EINVAL;
  178. }
  179. static void tmc_disable_etr_sink(struct coresight_device *csdev)
  180. {
  181. long val;
  182. unsigned long flags;
  183. struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
  184. spin_lock_irqsave(&drvdata->spinlock, flags);
  185. if (drvdata->reading) {
  186. spin_unlock_irqrestore(&drvdata->spinlock, flags);
  187. return;
  188. }
  189. val = local_xchg(&drvdata->mode, CS_MODE_DISABLED);
  190. /* Disable the TMC only if it needs to */
  191. if (val != CS_MODE_DISABLED)
  192. tmc_etr_disable_hw(drvdata);
  193. spin_unlock_irqrestore(&drvdata->spinlock, flags);
  194. dev_info(drvdata->dev, "TMC-ETR disabled\n");
  195. }
  196. static const struct coresight_ops_sink tmc_etr_sink_ops = {
  197. .enable = tmc_enable_etr_sink,
  198. .disable = tmc_disable_etr_sink,
  199. };
  200. const struct coresight_ops tmc_etr_cs_ops = {
  201. .sink_ops = &tmc_etr_sink_ops,
  202. };
  203. int tmc_read_prepare_etr(struct tmc_drvdata *drvdata)
  204. {
  205. int ret = 0;
  206. long val;
  207. unsigned long flags;
  208. /* config types are set a boot time and never change */
  209. if (WARN_ON_ONCE(drvdata->config_type != TMC_CONFIG_TYPE_ETR))
  210. return -EINVAL;
  211. spin_lock_irqsave(&drvdata->spinlock, flags);
  212. if (drvdata->reading) {
  213. ret = -EBUSY;
  214. goto out;
  215. }
  216. val = local_read(&drvdata->mode);
  217. /* Don't interfere if operated from Perf */
  218. if (val == CS_MODE_PERF) {
  219. ret = -EINVAL;
  220. goto out;
  221. }
  222. /* If drvdata::buf is NULL the trace data has been read already */
  223. if (drvdata->buf == NULL) {
  224. ret = -EINVAL;
  225. goto out;
  226. }
  227. /* Disable the TMC if need be */
  228. if (val == CS_MODE_SYSFS)
  229. tmc_etr_disable_hw(drvdata);
  230. drvdata->reading = true;
  231. out:
  232. spin_unlock_irqrestore(&drvdata->spinlock, flags);
  233. return ret;
  234. }
  235. int tmc_read_unprepare_etr(struct tmc_drvdata *drvdata)
  236. {
  237. unsigned long flags;
  238. dma_addr_t paddr;
  239. void __iomem *vaddr = NULL;
  240. /* config types are set a boot time and never change */
  241. if (WARN_ON_ONCE(drvdata->config_type != TMC_CONFIG_TYPE_ETR))
  242. return -EINVAL;
  243. spin_lock_irqsave(&drvdata->spinlock, flags);
  244. /* RE-enable the TMC if need be */
  245. if (local_read(&drvdata->mode) == CS_MODE_SYSFS) {
  246. /*
  247. * The trace run will continue with the same allocated trace
  248. * buffer. The trace buffer is cleared in tmc_etr_enable_hw(),
  249. * so we don't have to explicitly clear it. Also, since the
  250. * tracer is still enabled drvdata::buf can't be NULL.
  251. */
  252. tmc_etr_enable_hw(drvdata);
  253. } else {
  254. /*
  255. * The ETR is not tracing and the buffer was just read.
  256. * As such prepare to free the trace buffer.
  257. */
  258. vaddr = drvdata->vaddr;
  259. paddr = drvdata->paddr;
  260. drvdata->buf = drvdata->vaddr = NULL;
  261. }
  262. drvdata->reading = false;
  263. spin_unlock_irqrestore(&drvdata->spinlock, flags);
  264. /* Free allocated memory out side of the spinlock */
  265. if (vaddr)
  266. dma_free_coherent(drvdata->dev, drvdata->size, vaddr, paddr);
  267. return 0;
  268. }