coresight-etb10.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768
  1. /* Copyright (c) 2011-2012, The Linux Foundation. All rights reserved.
  2. *
  3. * Description: CoreSight Embedded Trace Buffer driver
  4. *
  5. * This program is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU General Public License version 2 and
  7. * only version 2 as published by the Free Software Foundation.
  8. *
  9. * This program is distributed in the hope that it will be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. * GNU General Public License for more details.
  13. */
  14. #include <asm/local.h>
  15. #include <linux/kernel.h>
  16. #include <linux/init.h>
  17. #include <linux/types.h>
  18. #include <linux/device.h>
  19. #include <linux/io.h>
  20. #include <linux/err.h>
  21. #include <linux/fs.h>
  22. #include <linux/miscdevice.h>
  23. #include <linux/uaccess.h>
  24. #include <linux/slab.h>
  25. #include <linux/spinlock.h>
  26. #include <linux/pm_runtime.h>
  27. #include <linux/seq_file.h>
  28. #include <linux/coresight.h>
  29. #include <linux/amba/bus.h>
  30. #include <linux/clk.h>
  31. #include <linux/circ_buf.h>
  32. #include <linux/mm.h>
  33. #include <linux/perf_event.h>
  34. #include <asm/local.h>
  35. #include "coresight-priv.h"
  36. #define ETB_RAM_DEPTH_REG 0x004
  37. #define ETB_STATUS_REG 0x00c
  38. #define ETB_RAM_READ_DATA_REG 0x010
  39. #define ETB_RAM_READ_POINTER 0x014
  40. #define ETB_RAM_WRITE_POINTER 0x018
  41. #define ETB_TRG 0x01c
  42. #define ETB_CTL_REG 0x020
  43. #define ETB_RWD_REG 0x024
  44. #define ETB_FFSR 0x300
  45. #define ETB_FFCR 0x304
  46. #define ETB_ITMISCOP0 0xee0
  47. #define ETB_ITTRFLINACK 0xee4
  48. #define ETB_ITTRFLIN 0xee8
  49. #define ETB_ITATBDATA0 0xeeC
  50. #define ETB_ITATBCTR2 0xef0
  51. #define ETB_ITATBCTR1 0xef4
  52. #define ETB_ITATBCTR0 0xef8
  53. /* register description */
  54. /* STS - 0x00C */
  55. #define ETB_STATUS_RAM_FULL BIT(0)
  56. /* CTL - 0x020 */
  57. #define ETB_CTL_CAPT_EN BIT(0)
  58. /* FFCR - 0x304 */
  59. #define ETB_FFCR_EN_FTC BIT(0)
  60. #define ETB_FFCR_FON_MAN BIT(6)
  61. #define ETB_FFCR_STOP_FI BIT(12)
  62. #define ETB_FFCR_STOP_TRIGGER BIT(13)
  63. #define ETB_FFCR_BIT 6
  64. #define ETB_FFSR_BIT 1
  65. #define ETB_FRAME_SIZE_WORDS 4
  66. /**
  67. * struct etb_drvdata - specifics associated to an ETB component
  68. * @base: memory mapped base address for this component.
  69. * @dev: the device entity associated to this component.
  70. * @atclk: optional clock for the core parts of the ETB.
  71. * @csdev: component vitals needed by the framework.
  72. * @miscdev: specifics to handle "/dev/xyz.etb" entry.
  73. * @spinlock: only one at a time pls.
  74. * @reading: synchronise user space access to etb buffer.
  75. * @mode: this ETB is being used.
  76. * @buf: area of memory where ETB buffer content gets sent.
  77. * @buffer_depth: size of @buf.
  78. * @trigger_cntr: amount of words to store after a trigger.
  79. */
  80. struct etb_drvdata {
  81. void __iomem *base;
  82. struct device *dev;
  83. struct clk *atclk;
  84. struct coresight_device *csdev;
  85. struct miscdevice miscdev;
  86. spinlock_t spinlock;
  87. local_t reading;
  88. local_t mode;
  89. u8 *buf;
  90. u32 buffer_depth;
  91. u32 trigger_cntr;
  92. };
  93. static unsigned int etb_get_buffer_depth(struct etb_drvdata *drvdata)
  94. {
  95. u32 depth = 0;
  96. pm_runtime_get_sync(drvdata->dev);
  97. /* RO registers don't need locking */
  98. depth = readl_relaxed(drvdata->base + ETB_RAM_DEPTH_REG);
  99. pm_runtime_put(drvdata->dev);
  100. return depth;
  101. }
  102. static void etb_enable_hw(struct etb_drvdata *drvdata)
  103. {
  104. int i;
  105. u32 depth;
  106. CS_UNLOCK(drvdata->base);
  107. depth = drvdata->buffer_depth;
  108. /* reset write RAM pointer address */
  109. writel_relaxed(0x0, drvdata->base + ETB_RAM_WRITE_POINTER);
  110. /* clear entire RAM buffer */
  111. for (i = 0; i < depth; i++)
  112. writel_relaxed(0x0, drvdata->base + ETB_RWD_REG);
  113. /* reset write RAM pointer address */
  114. writel_relaxed(0x0, drvdata->base + ETB_RAM_WRITE_POINTER);
  115. /* reset read RAM pointer address */
  116. writel_relaxed(0x0, drvdata->base + ETB_RAM_READ_POINTER);
  117. writel_relaxed(drvdata->trigger_cntr, drvdata->base + ETB_TRG);
  118. writel_relaxed(ETB_FFCR_EN_FTC | ETB_FFCR_STOP_TRIGGER,
  119. drvdata->base + ETB_FFCR);
  120. /* ETB trace capture enable */
  121. writel_relaxed(ETB_CTL_CAPT_EN, drvdata->base + ETB_CTL_REG);
  122. CS_LOCK(drvdata->base);
  123. }
  124. static int etb_enable(struct coresight_device *csdev, u32 mode)
  125. {
  126. u32 val;
  127. unsigned long flags;
  128. struct etb_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
  129. val = local_cmpxchg(&drvdata->mode,
  130. CS_MODE_DISABLED, mode);
  131. /*
  132. * When accessing from Perf, a HW buffer can be handled
  133. * by a single trace entity. In sysFS mode many tracers
  134. * can be logging to the same HW buffer.
  135. */
  136. if (val == CS_MODE_PERF)
  137. return -EBUSY;
  138. /* Nothing to do, the tracer is already enabled. */
  139. if (val == CS_MODE_SYSFS)
  140. goto out;
  141. spin_lock_irqsave(&drvdata->spinlock, flags);
  142. etb_enable_hw(drvdata);
  143. spin_unlock_irqrestore(&drvdata->spinlock, flags);
  144. out:
  145. dev_info(drvdata->dev, "ETB enabled\n");
  146. return 0;
  147. }
  148. static void etb_disable_hw(struct etb_drvdata *drvdata)
  149. {
  150. u32 ffcr;
  151. CS_UNLOCK(drvdata->base);
  152. ffcr = readl_relaxed(drvdata->base + ETB_FFCR);
  153. /* stop formatter when a stop has completed */
  154. ffcr |= ETB_FFCR_STOP_FI;
  155. writel_relaxed(ffcr, drvdata->base + ETB_FFCR);
  156. /* manually generate a flush of the system */
  157. ffcr |= ETB_FFCR_FON_MAN;
  158. writel_relaxed(ffcr, drvdata->base + ETB_FFCR);
  159. if (coresight_timeout(drvdata->base, ETB_FFCR, ETB_FFCR_BIT, 0)) {
  160. dev_err(drvdata->dev,
  161. "timeout while waiting for completion of Manual Flush\n");
  162. }
  163. /* disable trace capture */
  164. writel_relaxed(0x0, drvdata->base + ETB_CTL_REG);
  165. if (coresight_timeout(drvdata->base, ETB_FFSR, ETB_FFSR_BIT, 1)) {
  166. dev_err(drvdata->dev,
  167. "timeout while waiting for Formatter to Stop\n");
  168. }
  169. CS_LOCK(drvdata->base);
  170. }
  171. static void etb_dump_hw(struct etb_drvdata *drvdata)
  172. {
  173. bool lost = false;
  174. int i;
  175. u8 *buf_ptr;
  176. const u32 *barrier;
  177. u32 read_data, depth;
  178. u32 read_ptr, write_ptr;
  179. u32 frame_off, frame_endoff;
  180. CS_UNLOCK(drvdata->base);
  181. read_ptr = readl_relaxed(drvdata->base + ETB_RAM_READ_POINTER);
  182. write_ptr = readl_relaxed(drvdata->base + ETB_RAM_WRITE_POINTER);
  183. frame_off = write_ptr % ETB_FRAME_SIZE_WORDS;
  184. frame_endoff = ETB_FRAME_SIZE_WORDS - frame_off;
  185. if (frame_off) {
  186. dev_err(drvdata->dev,
  187. "write_ptr: %lu not aligned to formatter frame size\n",
  188. (unsigned long)write_ptr);
  189. dev_err(drvdata->dev, "frameoff: %lu, frame_endoff: %lu\n",
  190. (unsigned long)frame_off, (unsigned long)frame_endoff);
  191. write_ptr += frame_endoff;
  192. }
  193. if ((readl_relaxed(drvdata->base + ETB_STATUS_REG)
  194. & ETB_STATUS_RAM_FULL) == 0) {
  195. writel_relaxed(0x0, drvdata->base + ETB_RAM_READ_POINTER);
  196. } else {
  197. writel_relaxed(write_ptr, drvdata->base + ETB_RAM_READ_POINTER);
  198. lost = true;
  199. }
  200. depth = drvdata->buffer_depth;
  201. buf_ptr = drvdata->buf;
  202. barrier = barrier_pkt;
  203. for (i = 0; i < depth; i++) {
  204. read_data = readl_relaxed(drvdata->base +
  205. ETB_RAM_READ_DATA_REG);
  206. if (lost && *barrier) {
  207. read_data = *barrier;
  208. barrier++;
  209. }
  210. *(u32 *)buf_ptr = read_data;
  211. buf_ptr += 4;
  212. }
  213. if (frame_off) {
  214. buf_ptr -= (frame_endoff * 4);
  215. for (i = 0; i < frame_endoff; i++) {
  216. *buf_ptr++ = 0x0;
  217. *buf_ptr++ = 0x0;
  218. *buf_ptr++ = 0x0;
  219. *buf_ptr++ = 0x0;
  220. }
  221. }
  222. writel_relaxed(read_ptr, drvdata->base + ETB_RAM_READ_POINTER);
  223. CS_LOCK(drvdata->base);
  224. }
  225. static void etb_disable(struct coresight_device *csdev)
  226. {
  227. struct etb_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
  228. unsigned long flags;
  229. spin_lock_irqsave(&drvdata->spinlock, flags);
  230. etb_disable_hw(drvdata);
  231. etb_dump_hw(drvdata);
  232. spin_unlock_irqrestore(&drvdata->spinlock, flags);
  233. local_set(&drvdata->mode, CS_MODE_DISABLED);
  234. dev_info(drvdata->dev, "ETB disabled\n");
  235. }
  236. static void *etb_alloc_buffer(struct coresight_device *csdev, int cpu,
  237. void **pages, int nr_pages, bool overwrite)
  238. {
  239. int node;
  240. struct cs_buffers *buf;
  241. if (cpu == -1)
  242. cpu = smp_processor_id();
  243. node = cpu_to_node(cpu);
  244. buf = kzalloc_node(sizeof(struct cs_buffers), GFP_KERNEL, node);
  245. if (!buf)
  246. return NULL;
  247. buf->snapshot = overwrite;
  248. buf->nr_pages = nr_pages;
  249. buf->data_pages = pages;
  250. return buf;
  251. }
  252. static void etb_free_buffer(void *config)
  253. {
  254. struct cs_buffers *buf = config;
  255. kfree(buf);
  256. }
  257. static int etb_set_buffer(struct coresight_device *csdev,
  258. struct perf_output_handle *handle,
  259. void *sink_config)
  260. {
  261. int ret = 0;
  262. unsigned long head;
  263. struct cs_buffers *buf = sink_config;
  264. /* wrap head around to the amount of space we have */
  265. head = handle->head & ((buf->nr_pages << PAGE_SHIFT) - 1);
  266. /* find the page to write to */
  267. buf->cur = head / PAGE_SIZE;
  268. /* and offset within that page */
  269. buf->offset = head % PAGE_SIZE;
  270. local_set(&buf->data_size, 0);
  271. return ret;
  272. }
  273. static unsigned long etb_reset_buffer(struct coresight_device *csdev,
  274. struct perf_output_handle *handle,
  275. void *sink_config)
  276. {
  277. unsigned long size = 0;
  278. struct cs_buffers *buf = sink_config;
  279. if (buf) {
  280. /*
  281. * In snapshot mode ->data_size holds the new address of the
  282. * ring buffer's head. The size itself is the whole address
  283. * range since we want the latest information.
  284. */
  285. if (buf->snapshot)
  286. handle->head = local_xchg(&buf->data_size,
  287. buf->nr_pages << PAGE_SHIFT);
  288. /*
  289. * Tell the tracer PMU how much we got in this run and if
  290. * something went wrong along the way. Nobody else can use
  291. * this cs_buffers instance until we are done. As such
  292. * resetting parameters here and squaring off with the ring
  293. * buffer API in the tracer PMU is fine.
  294. */
  295. size = local_xchg(&buf->data_size, 0);
  296. }
  297. return size;
  298. }
  299. static void etb_update_buffer(struct coresight_device *csdev,
  300. struct perf_output_handle *handle,
  301. void *sink_config)
  302. {
  303. bool lost = false;
  304. int i, cur;
  305. u8 *buf_ptr;
  306. const u32 *barrier;
  307. u32 read_ptr, write_ptr, capacity;
  308. u32 status, read_data, to_read;
  309. unsigned long offset;
  310. struct cs_buffers *buf = sink_config;
  311. struct etb_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
  312. if (!buf)
  313. return;
  314. capacity = drvdata->buffer_depth * ETB_FRAME_SIZE_WORDS;
  315. etb_disable_hw(drvdata);
  316. CS_UNLOCK(drvdata->base);
  317. /* unit is in words, not bytes */
  318. read_ptr = readl_relaxed(drvdata->base + ETB_RAM_READ_POINTER);
  319. write_ptr = readl_relaxed(drvdata->base + ETB_RAM_WRITE_POINTER);
  320. /*
  321. * Entries should be aligned to the frame size. If they are not
  322. * go back to the last alignment point to give decoding tools a
  323. * chance to fix things.
  324. */
  325. if (write_ptr % ETB_FRAME_SIZE_WORDS) {
  326. dev_err(drvdata->dev,
  327. "write_ptr: %lu not aligned to formatter frame size\n",
  328. (unsigned long)write_ptr);
  329. write_ptr &= ~(ETB_FRAME_SIZE_WORDS - 1);
  330. lost = true;
  331. }
  332. /*
  333. * Get a hold of the status register and see if a wrap around
  334. * has occurred. If so adjust things accordingly. Otherwise
  335. * start at the beginning and go until the write pointer has
  336. * been reached.
  337. */
  338. status = readl_relaxed(drvdata->base + ETB_STATUS_REG);
  339. if (status & ETB_STATUS_RAM_FULL) {
  340. lost = true;
  341. to_read = capacity;
  342. read_ptr = write_ptr;
  343. } else {
  344. to_read = CIRC_CNT(write_ptr, read_ptr, drvdata->buffer_depth);
  345. to_read *= ETB_FRAME_SIZE_WORDS;
  346. }
  347. /*
  348. * Make sure we don't overwrite data that hasn't been consumed yet.
  349. * It is entirely possible that the HW buffer has more data than the
  350. * ring buffer can currently handle. If so adjust the start address
  351. * to take only the last traces.
  352. *
  353. * In snapshot mode we are looking to get the latest traces only and as
  354. * such, we don't care about not overwriting data that hasn't been
  355. * processed by user space.
  356. */
  357. if (!buf->snapshot && to_read > handle->size) {
  358. u32 mask = ~(ETB_FRAME_SIZE_WORDS - 1);
  359. /* The new read pointer must be frame size aligned */
  360. to_read = handle->size & mask;
  361. /*
  362. * Move the RAM read pointer up, keeping in mind that
  363. * everything is in frame size units.
  364. */
  365. read_ptr = (write_ptr + drvdata->buffer_depth) -
  366. to_read / ETB_FRAME_SIZE_WORDS;
  367. /* Wrap around if need be*/
  368. if (read_ptr > (drvdata->buffer_depth - 1))
  369. read_ptr -= drvdata->buffer_depth;
  370. /* let the decoder know we've skipped ahead */
  371. lost = true;
  372. }
  373. if (lost)
  374. perf_aux_output_flag(handle, PERF_AUX_FLAG_TRUNCATED);
  375. /* finally tell HW where we want to start reading from */
  376. writel_relaxed(read_ptr, drvdata->base + ETB_RAM_READ_POINTER);
  377. cur = buf->cur;
  378. offset = buf->offset;
  379. barrier = barrier_pkt;
  380. for (i = 0; i < to_read; i += 4) {
  381. buf_ptr = buf->data_pages[cur] + offset;
  382. read_data = readl_relaxed(drvdata->base +
  383. ETB_RAM_READ_DATA_REG);
  384. if (lost && *barrier) {
  385. read_data = *barrier;
  386. barrier++;
  387. }
  388. *(u32 *)buf_ptr = read_data;
  389. buf_ptr += 4;
  390. offset += 4;
  391. if (offset >= PAGE_SIZE) {
  392. offset = 0;
  393. cur++;
  394. /* wrap around at the end of the buffer */
  395. cur &= buf->nr_pages - 1;
  396. }
  397. }
  398. /* reset ETB buffer for next run */
  399. writel_relaxed(0x0, drvdata->base + ETB_RAM_READ_POINTER);
  400. writel_relaxed(0x0, drvdata->base + ETB_RAM_WRITE_POINTER);
  401. /*
  402. * In snapshot mode all we have to do is communicate to
  403. * perf_aux_output_end() the address of the current head. In full
  404. * trace mode the same function expects a size to move rb->aux_head
  405. * forward.
  406. */
  407. if (buf->snapshot)
  408. local_set(&buf->data_size, (cur * PAGE_SIZE) + offset);
  409. else
  410. local_add(to_read, &buf->data_size);
  411. etb_enable_hw(drvdata);
  412. CS_LOCK(drvdata->base);
  413. }
  414. static const struct coresight_ops_sink etb_sink_ops = {
  415. .enable = etb_enable,
  416. .disable = etb_disable,
  417. .alloc_buffer = etb_alloc_buffer,
  418. .free_buffer = etb_free_buffer,
  419. .set_buffer = etb_set_buffer,
  420. .reset_buffer = etb_reset_buffer,
  421. .update_buffer = etb_update_buffer,
  422. };
  423. static const struct coresight_ops etb_cs_ops = {
  424. .sink_ops = &etb_sink_ops,
  425. };
  426. static void etb_dump(struct etb_drvdata *drvdata)
  427. {
  428. unsigned long flags;
  429. spin_lock_irqsave(&drvdata->spinlock, flags);
  430. if (local_read(&drvdata->mode) == CS_MODE_SYSFS) {
  431. etb_disable_hw(drvdata);
  432. etb_dump_hw(drvdata);
  433. etb_enable_hw(drvdata);
  434. }
  435. spin_unlock_irqrestore(&drvdata->spinlock, flags);
  436. dev_info(drvdata->dev, "ETB dumped\n");
  437. }
  438. static int etb_open(struct inode *inode, struct file *file)
  439. {
  440. struct etb_drvdata *drvdata = container_of(file->private_data,
  441. struct etb_drvdata, miscdev);
  442. if (local_cmpxchg(&drvdata->reading, 0, 1))
  443. return -EBUSY;
  444. dev_dbg(drvdata->dev, "%s: successfully opened\n", __func__);
  445. return 0;
  446. }
  447. static ssize_t etb_read(struct file *file, char __user *data,
  448. size_t len, loff_t *ppos)
  449. {
  450. u32 depth;
  451. struct etb_drvdata *drvdata = container_of(file->private_data,
  452. struct etb_drvdata, miscdev);
  453. etb_dump(drvdata);
  454. depth = drvdata->buffer_depth;
  455. if (*ppos + len > depth * 4)
  456. len = depth * 4 - *ppos;
  457. if (copy_to_user(data, drvdata->buf + *ppos, len)) {
  458. dev_dbg(drvdata->dev, "%s: copy_to_user failed\n", __func__);
  459. return -EFAULT;
  460. }
  461. *ppos += len;
  462. dev_dbg(drvdata->dev, "%s: %zu bytes copied, %d bytes left\n",
  463. __func__, len, (int)(depth * 4 - *ppos));
  464. return len;
  465. }
  466. static int etb_release(struct inode *inode, struct file *file)
  467. {
  468. struct etb_drvdata *drvdata = container_of(file->private_data,
  469. struct etb_drvdata, miscdev);
  470. local_set(&drvdata->reading, 0);
  471. dev_dbg(drvdata->dev, "%s: released\n", __func__);
  472. return 0;
  473. }
  474. static const struct file_operations etb_fops = {
  475. .owner = THIS_MODULE,
  476. .open = etb_open,
  477. .read = etb_read,
  478. .release = etb_release,
  479. .llseek = no_llseek,
  480. };
  481. #define coresight_etb10_reg(name, offset) \
  482. coresight_simple_reg32(struct etb_drvdata, name, offset)
  483. coresight_etb10_reg(rdp, ETB_RAM_DEPTH_REG);
  484. coresight_etb10_reg(sts, ETB_STATUS_REG);
  485. coresight_etb10_reg(rrp, ETB_RAM_READ_POINTER);
  486. coresight_etb10_reg(rwp, ETB_RAM_WRITE_POINTER);
  487. coresight_etb10_reg(trg, ETB_TRG);
  488. coresight_etb10_reg(ctl, ETB_CTL_REG);
  489. coresight_etb10_reg(ffsr, ETB_FFSR);
  490. coresight_etb10_reg(ffcr, ETB_FFCR);
  491. static struct attribute *coresight_etb_mgmt_attrs[] = {
  492. &dev_attr_rdp.attr,
  493. &dev_attr_sts.attr,
  494. &dev_attr_rrp.attr,
  495. &dev_attr_rwp.attr,
  496. &dev_attr_trg.attr,
  497. &dev_attr_ctl.attr,
  498. &dev_attr_ffsr.attr,
  499. &dev_attr_ffcr.attr,
  500. NULL,
  501. };
  502. static ssize_t trigger_cntr_show(struct device *dev,
  503. struct device_attribute *attr, char *buf)
  504. {
  505. struct etb_drvdata *drvdata = dev_get_drvdata(dev->parent);
  506. unsigned long val = drvdata->trigger_cntr;
  507. return sprintf(buf, "%#lx\n", val);
  508. }
  509. static ssize_t trigger_cntr_store(struct device *dev,
  510. struct device_attribute *attr,
  511. const char *buf, size_t size)
  512. {
  513. int ret;
  514. unsigned long val;
  515. struct etb_drvdata *drvdata = dev_get_drvdata(dev->parent);
  516. ret = kstrtoul(buf, 16, &val);
  517. if (ret)
  518. return ret;
  519. drvdata->trigger_cntr = val;
  520. return size;
  521. }
  522. static DEVICE_ATTR_RW(trigger_cntr);
  523. static struct attribute *coresight_etb_attrs[] = {
  524. &dev_attr_trigger_cntr.attr,
  525. NULL,
  526. };
  527. static const struct attribute_group coresight_etb_group = {
  528. .attrs = coresight_etb_attrs,
  529. };
  530. static const struct attribute_group coresight_etb_mgmt_group = {
  531. .attrs = coresight_etb_mgmt_attrs,
  532. .name = "mgmt",
  533. };
  534. const struct attribute_group *coresight_etb_groups[] = {
  535. &coresight_etb_group,
  536. &coresight_etb_mgmt_group,
  537. NULL,
  538. };
  539. static int etb_probe(struct amba_device *adev, const struct amba_id *id)
  540. {
  541. int ret;
  542. void __iomem *base;
  543. struct device *dev = &adev->dev;
  544. struct coresight_platform_data *pdata = NULL;
  545. struct etb_drvdata *drvdata;
  546. struct resource *res = &adev->res;
  547. struct coresight_desc desc = { 0 };
  548. struct device_node *np = adev->dev.of_node;
  549. if (np) {
  550. pdata = of_get_coresight_platform_data(dev, np);
  551. if (IS_ERR(pdata))
  552. return PTR_ERR(pdata);
  553. adev->dev.platform_data = pdata;
  554. }
  555. drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
  556. if (!drvdata)
  557. return -ENOMEM;
  558. drvdata->dev = &adev->dev;
  559. drvdata->atclk = devm_clk_get(&adev->dev, "atclk"); /* optional */
  560. if (!IS_ERR(drvdata->atclk)) {
  561. ret = clk_prepare_enable(drvdata->atclk);
  562. if (ret)
  563. return ret;
  564. }
  565. dev_set_drvdata(dev, drvdata);
  566. /* validity for the resource is already checked by the AMBA core */
  567. base = devm_ioremap_resource(dev, res);
  568. if (IS_ERR(base))
  569. return PTR_ERR(base);
  570. drvdata->base = base;
  571. spin_lock_init(&drvdata->spinlock);
  572. drvdata->buffer_depth = etb_get_buffer_depth(drvdata);
  573. pm_runtime_put(&adev->dev);
  574. if (drvdata->buffer_depth & 0x80000000)
  575. return -EINVAL;
  576. drvdata->buf = devm_kzalloc(dev,
  577. drvdata->buffer_depth * 4, GFP_KERNEL);
  578. if (!drvdata->buf)
  579. return -ENOMEM;
  580. desc.type = CORESIGHT_DEV_TYPE_SINK;
  581. desc.subtype.sink_subtype = CORESIGHT_DEV_SUBTYPE_SINK_BUFFER;
  582. desc.ops = &etb_cs_ops;
  583. desc.pdata = pdata;
  584. desc.dev = dev;
  585. desc.groups = coresight_etb_groups;
  586. drvdata->csdev = coresight_register(&desc);
  587. if (IS_ERR(drvdata->csdev))
  588. return PTR_ERR(drvdata->csdev);
  589. drvdata->miscdev.name = pdata->name;
  590. drvdata->miscdev.minor = MISC_DYNAMIC_MINOR;
  591. drvdata->miscdev.fops = &etb_fops;
  592. ret = misc_register(&drvdata->miscdev);
  593. if (ret)
  594. goto err_misc_register;
  595. return 0;
  596. err_misc_register:
  597. coresight_unregister(drvdata->csdev);
  598. return ret;
  599. }
  600. #ifdef CONFIG_PM
  601. static int etb_runtime_suspend(struct device *dev)
  602. {
  603. struct etb_drvdata *drvdata = dev_get_drvdata(dev);
  604. if (drvdata && !IS_ERR(drvdata->atclk))
  605. clk_disable_unprepare(drvdata->atclk);
  606. return 0;
  607. }
  608. static int etb_runtime_resume(struct device *dev)
  609. {
  610. struct etb_drvdata *drvdata = dev_get_drvdata(dev);
  611. if (drvdata && !IS_ERR(drvdata->atclk))
  612. clk_prepare_enable(drvdata->atclk);
  613. return 0;
  614. }
  615. #endif
  616. static const struct dev_pm_ops etb_dev_pm_ops = {
  617. SET_RUNTIME_PM_OPS(etb_runtime_suspend, etb_runtime_resume, NULL)
  618. };
  619. static const struct amba_id etb_ids[] = {
  620. {
  621. .id = 0x0003b907,
  622. .mask = 0x0003ffff,
  623. },
  624. { 0, 0},
  625. };
  626. static struct amba_driver etb_driver = {
  627. .drv = {
  628. .name = "coresight-etb10",
  629. .owner = THIS_MODULE,
  630. .pm = &etb_dev_pm_ops,
  631. .suppress_bind_attrs = true,
  632. },
  633. .probe = etb_probe,
  634. .id_table = etb_ids,
  635. };
  636. builtin_amba_driver(etb_driver);