coresight-etm3x.c 47 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943
  1. /* Copyright (c) 2011-2012, The Linux Foundation. All rights reserved.
  2. *
  3. * This program is free software; you can redistribute it and/or modify
  4. * it under the terms of the GNU General Public License version 2 and
  5. * only version 2 as published by the Free Software Foundation.
  6. *
  7. * This program is distributed in the hope that it will be useful,
  8. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  9. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  10. * GNU General Public License for more details.
  11. */
  12. #include <linux/kernel.h>
  13. #include <linux/module.h>
  14. #include <linux/init.h>
  15. #include <linux/types.h>
  16. #include <linux/device.h>
  17. #include <linux/io.h>
  18. #include <linux/err.h>
  19. #include <linux/fs.h>
  20. #include <linux/slab.h>
  21. #include <linux/delay.h>
  22. #include <linux/smp.h>
  23. #include <linux/sysfs.h>
  24. #include <linux/stat.h>
  25. #include <linux/pm_runtime.h>
  26. #include <linux/cpu.h>
  27. #include <linux/of.h>
  28. #include <linux/coresight.h>
  29. #include <linux/amba/bus.h>
  30. #include <linux/seq_file.h>
  31. #include <linux/uaccess.h>
  32. #include <linux/clk.h>
  33. #include <asm/sections.h>
  34. #include "coresight-etm.h"
  35. static int boot_enable;
  36. module_param_named(boot_enable, boot_enable, int, S_IRUGO);
  37. /* The number of ETM/PTM currently registered */
  38. static int etm_count;
  39. static struct etm_drvdata *etmdrvdata[NR_CPUS];
  40. static inline void etm_writel(struct etm_drvdata *drvdata,
  41. u32 val, u32 off)
  42. {
  43. if (drvdata->use_cp14) {
  44. if (etm_writel_cp14(off, val)) {
  45. dev_err(drvdata->dev,
  46. "invalid CP14 access to ETM reg: %#x", off);
  47. }
  48. } else {
  49. writel_relaxed(val, drvdata->base + off);
  50. }
  51. }
  52. static inline unsigned int etm_readl(struct etm_drvdata *drvdata, u32 off)
  53. {
  54. u32 val;
  55. if (drvdata->use_cp14) {
  56. if (etm_readl_cp14(off, &val)) {
  57. dev_err(drvdata->dev,
  58. "invalid CP14 access to ETM reg: %#x", off);
  59. }
  60. } else {
  61. val = readl_relaxed(drvdata->base + off);
  62. }
  63. return val;
  64. }
  65. /*
  66. * Memory mapped writes to clear os lock are not supported on some processors
  67. * and OS lock must be unlocked before any memory mapped access on such
  68. * processors, otherwise memory mapped reads/writes will be invalid.
  69. */
  70. static void etm_os_unlock(void *info)
  71. {
  72. struct etm_drvdata *drvdata = (struct etm_drvdata *)info;
  73. /* Writing any value to ETMOSLAR unlocks the trace registers */
  74. etm_writel(drvdata, 0x0, ETMOSLAR);
  75. isb();
  76. }
  77. static void etm_set_pwrdwn(struct etm_drvdata *drvdata)
  78. {
  79. u32 etmcr;
  80. /* Ensure pending cp14 accesses complete before setting pwrdwn */
  81. mb();
  82. isb();
  83. etmcr = etm_readl(drvdata, ETMCR);
  84. etmcr |= ETMCR_PWD_DWN;
  85. etm_writel(drvdata, etmcr, ETMCR);
  86. }
  87. static void etm_clr_pwrdwn(struct etm_drvdata *drvdata)
  88. {
  89. u32 etmcr;
  90. etmcr = etm_readl(drvdata, ETMCR);
  91. etmcr &= ~ETMCR_PWD_DWN;
  92. etm_writel(drvdata, etmcr, ETMCR);
  93. /* Ensure pwrup completes before subsequent cp14 accesses */
  94. mb();
  95. isb();
  96. }
  97. static void etm_set_pwrup(struct etm_drvdata *drvdata)
  98. {
  99. u32 etmpdcr;
  100. etmpdcr = readl_relaxed(drvdata->base + ETMPDCR);
  101. etmpdcr |= ETMPDCR_PWD_UP;
  102. writel_relaxed(etmpdcr, drvdata->base + ETMPDCR);
  103. /* Ensure pwrup completes before subsequent cp14 accesses */
  104. mb();
  105. isb();
  106. }
  107. static void etm_clr_pwrup(struct etm_drvdata *drvdata)
  108. {
  109. u32 etmpdcr;
  110. /* Ensure pending cp14 accesses complete before clearing pwrup */
  111. mb();
  112. isb();
  113. etmpdcr = readl_relaxed(drvdata->base + ETMPDCR);
  114. etmpdcr &= ~ETMPDCR_PWD_UP;
  115. writel_relaxed(etmpdcr, drvdata->base + ETMPDCR);
  116. }
  117. /**
  118. * coresight_timeout_etm - loop until a bit has changed to a specific state.
  119. * @drvdata: etm's private data structure.
  120. * @offset: address of a register, starting from @addr.
  121. * @position: the position of the bit of interest.
  122. * @value: the value the bit should have.
  123. *
  124. * Basically the same as @coresight_timeout except for the register access
  125. * method where we have to account for CP14 configurations.
  126. * Return: 0 as soon as the bit has taken the desired state or -EAGAIN if
  127. * TIMEOUT_US has elapsed, which ever happens first.
  128. */
  129. static int coresight_timeout_etm(struct etm_drvdata *drvdata, u32 offset,
  130. int position, int value)
  131. {
  132. int i;
  133. u32 val;
  134. for (i = TIMEOUT_US; i > 0; i--) {
  135. val = etm_readl(drvdata, offset);
  136. /* Waiting on the bit to go from 0 to 1 */
  137. if (value) {
  138. if (val & BIT(position))
  139. return 0;
  140. /* Waiting on the bit to go from 1 to 0 */
  141. } else {
  142. if (!(val & BIT(position)))
  143. return 0;
  144. }
  145. /*
  146. * Delay is arbitrary - the specification doesn't say how long
  147. * we are expected to wait. Extra check required to make sure
  148. * we don't wait needlessly on the last iteration.
  149. */
  150. if (i - 1)
  151. udelay(1);
  152. }
  153. return -EAGAIN;
  154. }
  155. static void etm_set_prog(struct etm_drvdata *drvdata)
  156. {
  157. u32 etmcr;
  158. etmcr = etm_readl(drvdata, ETMCR);
  159. etmcr |= ETMCR_ETM_PRG;
  160. etm_writel(drvdata, etmcr, ETMCR);
  161. /*
  162. * Recommended by spec for cp14 accesses to ensure etmcr write is
  163. * complete before polling etmsr
  164. */
  165. isb();
  166. if (coresight_timeout_etm(drvdata, ETMSR, ETMSR_PROG_BIT, 1)) {
  167. dev_err(drvdata->dev,
  168. "timeout observed when probing at offset %#x\n", ETMSR);
  169. }
  170. }
  171. static void etm_clr_prog(struct etm_drvdata *drvdata)
  172. {
  173. u32 etmcr;
  174. etmcr = etm_readl(drvdata, ETMCR);
  175. etmcr &= ~ETMCR_ETM_PRG;
  176. etm_writel(drvdata, etmcr, ETMCR);
  177. /*
  178. * Recommended by spec for cp14 accesses to ensure etmcr write is
  179. * complete before polling etmsr
  180. */
  181. isb();
  182. if (coresight_timeout_etm(drvdata, ETMSR, ETMSR_PROG_BIT, 0)) {
  183. dev_err(drvdata->dev,
  184. "timeout observed when probing at offset %#x\n", ETMSR);
  185. }
  186. }
  187. static void etm_set_default(struct etm_drvdata *drvdata)
  188. {
  189. int i;
  190. drvdata->trigger_event = ETM_DEFAULT_EVENT_VAL;
  191. drvdata->enable_event = ETM_HARD_WIRE_RES_A;
  192. drvdata->seq_12_event = ETM_DEFAULT_EVENT_VAL;
  193. drvdata->seq_21_event = ETM_DEFAULT_EVENT_VAL;
  194. drvdata->seq_23_event = ETM_DEFAULT_EVENT_VAL;
  195. drvdata->seq_31_event = ETM_DEFAULT_EVENT_VAL;
  196. drvdata->seq_32_event = ETM_DEFAULT_EVENT_VAL;
  197. drvdata->seq_13_event = ETM_DEFAULT_EVENT_VAL;
  198. drvdata->timestamp_event = ETM_DEFAULT_EVENT_VAL;
  199. for (i = 0; i < drvdata->nr_cntr; i++) {
  200. drvdata->cntr_rld_val[i] = 0x0;
  201. drvdata->cntr_event[i] = ETM_DEFAULT_EVENT_VAL;
  202. drvdata->cntr_rld_event[i] = ETM_DEFAULT_EVENT_VAL;
  203. drvdata->cntr_val[i] = 0x0;
  204. }
  205. drvdata->seq_curr_state = 0x0;
  206. drvdata->ctxid_idx = 0x0;
  207. for (i = 0; i < drvdata->nr_ctxid_cmp; i++) {
  208. drvdata->ctxid_pid[i] = 0x0;
  209. drvdata->ctxid_vpid[i] = 0x0;
  210. }
  211. drvdata->ctxid_mask = 0x0;
  212. }
  213. static void etm_enable_hw(void *info)
  214. {
  215. int i;
  216. u32 etmcr;
  217. struct etm_drvdata *drvdata = info;
  218. CS_UNLOCK(drvdata->base);
  219. /* Turn engine on */
  220. etm_clr_pwrdwn(drvdata);
  221. /* Apply power to trace registers */
  222. etm_set_pwrup(drvdata);
  223. /* Make sure all registers are accessible */
  224. etm_os_unlock(drvdata);
  225. etm_set_prog(drvdata);
  226. etmcr = etm_readl(drvdata, ETMCR);
  227. etmcr &= (ETMCR_PWD_DWN | ETMCR_ETM_PRG);
  228. etmcr |= drvdata->port_size;
  229. etm_writel(drvdata, drvdata->ctrl | etmcr, ETMCR);
  230. etm_writel(drvdata, drvdata->trigger_event, ETMTRIGGER);
  231. etm_writel(drvdata, drvdata->startstop_ctrl, ETMTSSCR);
  232. etm_writel(drvdata, drvdata->enable_event, ETMTEEVR);
  233. etm_writel(drvdata, drvdata->enable_ctrl1, ETMTECR1);
  234. etm_writel(drvdata, drvdata->fifofull_level, ETMFFLR);
  235. for (i = 0; i < drvdata->nr_addr_cmp; i++) {
  236. etm_writel(drvdata, drvdata->addr_val[i], ETMACVRn(i));
  237. etm_writel(drvdata, drvdata->addr_acctype[i], ETMACTRn(i));
  238. }
  239. for (i = 0; i < drvdata->nr_cntr; i++) {
  240. etm_writel(drvdata, drvdata->cntr_rld_val[i], ETMCNTRLDVRn(i));
  241. etm_writel(drvdata, drvdata->cntr_event[i], ETMCNTENRn(i));
  242. etm_writel(drvdata, drvdata->cntr_rld_event[i],
  243. ETMCNTRLDEVRn(i));
  244. etm_writel(drvdata, drvdata->cntr_val[i], ETMCNTVRn(i));
  245. }
  246. etm_writel(drvdata, drvdata->seq_12_event, ETMSQ12EVR);
  247. etm_writel(drvdata, drvdata->seq_21_event, ETMSQ21EVR);
  248. etm_writel(drvdata, drvdata->seq_23_event, ETMSQ23EVR);
  249. etm_writel(drvdata, drvdata->seq_31_event, ETMSQ31EVR);
  250. etm_writel(drvdata, drvdata->seq_32_event, ETMSQ32EVR);
  251. etm_writel(drvdata, drvdata->seq_13_event, ETMSQ13EVR);
  252. etm_writel(drvdata, drvdata->seq_curr_state, ETMSQR);
  253. for (i = 0; i < drvdata->nr_ext_out; i++)
  254. etm_writel(drvdata, ETM_DEFAULT_EVENT_VAL, ETMEXTOUTEVRn(i));
  255. for (i = 0; i < drvdata->nr_ctxid_cmp; i++)
  256. etm_writel(drvdata, drvdata->ctxid_pid[i], ETMCIDCVRn(i));
  257. etm_writel(drvdata, drvdata->ctxid_mask, ETMCIDCMR);
  258. etm_writel(drvdata, drvdata->sync_freq, ETMSYNCFR);
  259. /* No external input selected */
  260. etm_writel(drvdata, 0x0, ETMEXTINSELR);
  261. etm_writel(drvdata, drvdata->timestamp_event, ETMTSEVR);
  262. /* No auxiliary control selected */
  263. etm_writel(drvdata, 0x0, ETMAUXCR);
  264. etm_writel(drvdata, drvdata->traceid, ETMTRACEIDR);
  265. /* No VMID comparator value selected */
  266. etm_writel(drvdata, 0x0, ETMVMIDCVR);
  267. /* Ensures trace output is enabled from this ETM */
  268. etm_writel(drvdata, drvdata->ctrl | ETMCR_ETM_EN | etmcr, ETMCR);
  269. etm_clr_prog(drvdata);
  270. CS_LOCK(drvdata->base);
  271. dev_dbg(drvdata->dev, "cpu: %d enable smp call done\n", drvdata->cpu);
  272. }
  273. static int etm_trace_id_simple(struct etm_drvdata *drvdata)
  274. {
  275. if (!drvdata->enable)
  276. return drvdata->traceid;
  277. return (etm_readl(drvdata, ETMTRACEIDR) & ETM_TRACEID_MASK);
  278. }
  279. static int etm_trace_id(struct coresight_device *csdev)
  280. {
  281. struct etm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
  282. unsigned long flags;
  283. int trace_id = -1;
  284. if (!drvdata->enable)
  285. return drvdata->traceid;
  286. pm_runtime_get_sync(csdev->dev.parent);
  287. spin_lock_irqsave(&drvdata->spinlock, flags);
  288. CS_UNLOCK(drvdata->base);
  289. trace_id = (etm_readl(drvdata, ETMTRACEIDR) & ETM_TRACEID_MASK);
  290. CS_LOCK(drvdata->base);
  291. spin_unlock_irqrestore(&drvdata->spinlock, flags);
  292. pm_runtime_put(csdev->dev.parent);
  293. return trace_id;
  294. }
  295. static int etm_enable(struct coresight_device *csdev)
  296. {
  297. struct etm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
  298. int ret;
  299. pm_runtime_get_sync(csdev->dev.parent);
  300. spin_lock(&drvdata->spinlock);
  301. /*
  302. * Configure the ETM only if the CPU is online. If it isn't online
  303. * hw configuration will take place when 'CPU_STARTING' is received
  304. * in @etm_cpu_callback.
  305. */
  306. if (cpu_online(drvdata->cpu)) {
  307. ret = smp_call_function_single(drvdata->cpu,
  308. etm_enable_hw, drvdata, 1);
  309. if (ret)
  310. goto err;
  311. }
  312. drvdata->enable = true;
  313. drvdata->sticky_enable = true;
  314. spin_unlock(&drvdata->spinlock);
  315. dev_info(drvdata->dev, "ETM tracing enabled\n");
  316. return 0;
  317. err:
  318. spin_unlock(&drvdata->spinlock);
  319. pm_runtime_put(csdev->dev.parent);
  320. return ret;
  321. }
  322. static void etm_disable_hw(void *info)
  323. {
  324. int i;
  325. struct etm_drvdata *drvdata = info;
  326. CS_UNLOCK(drvdata->base);
  327. etm_set_prog(drvdata);
  328. /* Program trace enable to low by using always false event */
  329. etm_writel(drvdata, ETM_HARD_WIRE_RES_A | ETM_EVENT_NOT_A, ETMTEEVR);
  330. /* Read back sequencer and counters for post trace analysis */
  331. drvdata->seq_curr_state = (etm_readl(drvdata, ETMSQR) & ETM_SQR_MASK);
  332. for (i = 0; i < drvdata->nr_cntr; i++)
  333. drvdata->cntr_val[i] = etm_readl(drvdata, ETMCNTVRn(i));
  334. etm_set_pwrdwn(drvdata);
  335. CS_LOCK(drvdata->base);
  336. dev_dbg(drvdata->dev, "cpu: %d disable smp call done\n", drvdata->cpu);
  337. }
  338. static void etm_disable(struct coresight_device *csdev)
  339. {
  340. struct etm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
  341. /*
  342. * Taking hotplug lock here protects from clocks getting disabled
  343. * with tracing being left on (crash scenario) if user disable occurs
  344. * after cpu online mask indicates the cpu is offline but before the
  345. * DYING hotplug callback is serviced by the ETM driver.
  346. */
  347. get_online_cpus();
  348. spin_lock(&drvdata->spinlock);
  349. /*
  350. * Executing etm_disable_hw on the cpu whose ETM is being disabled
  351. * ensures that register writes occur when cpu is powered.
  352. */
  353. smp_call_function_single(drvdata->cpu, etm_disable_hw, drvdata, 1);
  354. drvdata->enable = false;
  355. spin_unlock(&drvdata->spinlock);
  356. put_online_cpus();
  357. pm_runtime_put(csdev->dev.parent);
  358. dev_info(drvdata->dev, "ETM tracing disabled\n");
  359. }
  360. static const struct coresight_ops_source etm_source_ops = {
  361. .trace_id = etm_trace_id,
  362. .enable = etm_enable,
  363. .disable = etm_disable,
  364. };
  365. static const struct coresight_ops etm_cs_ops = {
  366. .source_ops = &etm_source_ops,
  367. };
  368. static ssize_t nr_addr_cmp_show(struct device *dev,
  369. struct device_attribute *attr, char *buf)
  370. {
  371. unsigned long val;
  372. struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
  373. val = drvdata->nr_addr_cmp;
  374. return sprintf(buf, "%#lx\n", val);
  375. }
  376. static DEVICE_ATTR_RO(nr_addr_cmp);
  377. static ssize_t nr_cntr_show(struct device *dev,
  378. struct device_attribute *attr, char *buf)
  379. { unsigned long val;
  380. struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
  381. val = drvdata->nr_cntr;
  382. return sprintf(buf, "%#lx\n", val);
  383. }
  384. static DEVICE_ATTR_RO(nr_cntr);
  385. static ssize_t nr_ctxid_cmp_show(struct device *dev,
  386. struct device_attribute *attr, char *buf)
  387. {
  388. unsigned long val;
  389. struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
  390. val = drvdata->nr_ctxid_cmp;
  391. return sprintf(buf, "%#lx\n", val);
  392. }
  393. static DEVICE_ATTR_RO(nr_ctxid_cmp);
  394. static ssize_t etmsr_show(struct device *dev,
  395. struct device_attribute *attr, char *buf)
  396. {
  397. unsigned long flags, val;
  398. struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
  399. pm_runtime_get_sync(drvdata->dev);
  400. spin_lock_irqsave(&drvdata->spinlock, flags);
  401. CS_UNLOCK(drvdata->base);
  402. val = etm_readl(drvdata, ETMSR);
  403. CS_LOCK(drvdata->base);
  404. spin_unlock_irqrestore(&drvdata->spinlock, flags);
  405. pm_runtime_put(drvdata->dev);
  406. return sprintf(buf, "%#lx\n", val);
  407. }
  408. static DEVICE_ATTR_RO(etmsr);
  409. static ssize_t reset_store(struct device *dev,
  410. struct device_attribute *attr,
  411. const char *buf, size_t size)
  412. {
  413. int i, ret;
  414. unsigned long val;
  415. struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
  416. ret = kstrtoul(buf, 16, &val);
  417. if (ret)
  418. return ret;
  419. if (val) {
  420. spin_lock(&drvdata->spinlock);
  421. drvdata->mode = ETM_MODE_EXCLUDE;
  422. drvdata->ctrl = 0x0;
  423. drvdata->trigger_event = ETM_DEFAULT_EVENT_VAL;
  424. drvdata->startstop_ctrl = 0x0;
  425. drvdata->addr_idx = 0x0;
  426. for (i = 0; i < drvdata->nr_addr_cmp; i++) {
  427. drvdata->addr_val[i] = 0x0;
  428. drvdata->addr_acctype[i] = 0x0;
  429. drvdata->addr_type[i] = ETM_ADDR_TYPE_NONE;
  430. }
  431. drvdata->cntr_idx = 0x0;
  432. etm_set_default(drvdata);
  433. spin_unlock(&drvdata->spinlock);
  434. }
  435. return size;
  436. }
  437. static DEVICE_ATTR_WO(reset);
  438. static ssize_t mode_show(struct device *dev,
  439. struct device_attribute *attr, char *buf)
  440. {
  441. unsigned long val;
  442. struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
  443. val = drvdata->mode;
  444. return sprintf(buf, "%#lx\n", val);
  445. }
  446. static ssize_t mode_store(struct device *dev,
  447. struct device_attribute *attr,
  448. const char *buf, size_t size)
  449. {
  450. int ret;
  451. unsigned long val;
  452. struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
  453. ret = kstrtoul(buf, 16, &val);
  454. if (ret)
  455. return ret;
  456. spin_lock(&drvdata->spinlock);
  457. drvdata->mode = val & ETM_MODE_ALL;
  458. if (drvdata->mode & ETM_MODE_EXCLUDE)
  459. drvdata->enable_ctrl1 |= ETMTECR1_INC_EXC;
  460. else
  461. drvdata->enable_ctrl1 &= ~ETMTECR1_INC_EXC;
  462. if (drvdata->mode & ETM_MODE_CYCACC)
  463. drvdata->ctrl |= ETMCR_CYC_ACC;
  464. else
  465. drvdata->ctrl &= ~ETMCR_CYC_ACC;
  466. if (drvdata->mode & ETM_MODE_STALL) {
  467. if (!(drvdata->etmccr & ETMCCR_FIFOFULL)) {
  468. dev_warn(drvdata->dev, "stall mode not supported\n");
  469. ret = -EINVAL;
  470. goto err_unlock;
  471. }
  472. drvdata->ctrl |= ETMCR_STALL_MODE;
  473. } else
  474. drvdata->ctrl &= ~ETMCR_STALL_MODE;
  475. if (drvdata->mode & ETM_MODE_TIMESTAMP) {
  476. if (!(drvdata->etmccer & ETMCCER_TIMESTAMP)) {
  477. dev_warn(drvdata->dev, "timestamp not supported\n");
  478. ret = -EINVAL;
  479. goto err_unlock;
  480. }
  481. drvdata->ctrl |= ETMCR_TIMESTAMP_EN;
  482. } else
  483. drvdata->ctrl &= ~ETMCR_TIMESTAMP_EN;
  484. if (drvdata->mode & ETM_MODE_CTXID)
  485. drvdata->ctrl |= ETMCR_CTXID_SIZE;
  486. else
  487. drvdata->ctrl &= ~ETMCR_CTXID_SIZE;
  488. spin_unlock(&drvdata->spinlock);
  489. return size;
  490. err_unlock:
  491. spin_unlock(&drvdata->spinlock);
  492. return ret;
  493. }
  494. static DEVICE_ATTR_RW(mode);
  495. static ssize_t trigger_event_show(struct device *dev,
  496. struct device_attribute *attr, char *buf)
  497. {
  498. unsigned long val;
  499. struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
  500. val = drvdata->trigger_event;
  501. return sprintf(buf, "%#lx\n", val);
  502. }
  503. static ssize_t trigger_event_store(struct device *dev,
  504. struct device_attribute *attr,
  505. const char *buf, size_t size)
  506. {
  507. int ret;
  508. unsigned long val;
  509. struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
  510. ret = kstrtoul(buf, 16, &val);
  511. if (ret)
  512. return ret;
  513. drvdata->trigger_event = val & ETM_EVENT_MASK;
  514. return size;
  515. }
  516. static DEVICE_ATTR_RW(trigger_event);
  517. static ssize_t enable_event_show(struct device *dev,
  518. struct device_attribute *attr, char *buf)
  519. {
  520. unsigned long val;
  521. struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
  522. val = drvdata->enable_event;
  523. return sprintf(buf, "%#lx\n", val);
  524. }
  525. static ssize_t enable_event_store(struct device *dev,
  526. struct device_attribute *attr,
  527. const char *buf, size_t size)
  528. {
  529. int ret;
  530. unsigned long val;
  531. struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
  532. ret = kstrtoul(buf, 16, &val);
  533. if (ret)
  534. return ret;
  535. drvdata->enable_event = val & ETM_EVENT_MASK;
  536. return size;
  537. }
  538. static DEVICE_ATTR_RW(enable_event);
  539. static ssize_t fifofull_level_show(struct device *dev,
  540. struct device_attribute *attr, char *buf)
  541. {
  542. unsigned long val;
  543. struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
  544. val = drvdata->fifofull_level;
  545. return sprintf(buf, "%#lx\n", val);
  546. }
  547. static ssize_t fifofull_level_store(struct device *dev,
  548. struct device_attribute *attr,
  549. const char *buf, size_t size)
  550. {
  551. int ret;
  552. unsigned long val;
  553. struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
  554. ret = kstrtoul(buf, 16, &val);
  555. if (ret)
  556. return ret;
  557. drvdata->fifofull_level = val;
  558. return size;
  559. }
  560. static DEVICE_ATTR_RW(fifofull_level);
  561. static ssize_t addr_idx_show(struct device *dev,
  562. struct device_attribute *attr, char *buf)
  563. {
  564. unsigned long val;
  565. struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
  566. val = drvdata->addr_idx;
  567. return sprintf(buf, "%#lx\n", val);
  568. }
  569. static ssize_t addr_idx_store(struct device *dev,
  570. struct device_attribute *attr,
  571. const char *buf, size_t size)
  572. {
  573. int ret;
  574. unsigned long val;
  575. struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
  576. ret = kstrtoul(buf, 16, &val);
  577. if (ret)
  578. return ret;
  579. if (val >= drvdata->nr_addr_cmp)
  580. return -EINVAL;
  581. /*
  582. * Use spinlock to ensure index doesn't change while it gets
  583. * dereferenced multiple times within a spinlock block elsewhere.
  584. */
  585. spin_lock(&drvdata->spinlock);
  586. drvdata->addr_idx = val;
  587. spin_unlock(&drvdata->spinlock);
  588. return size;
  589. }
  590. static DEVICE_ATTR_RW(addr_idx);
  591. static ssize_t addr_single_show(struct device *dev,
  592. struct device_attribute *attr, char *buf)
  593. {
  594. u8 idx;
  595. unsigned long val;
  596. struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
  597. spin_lock(&drvdata->spinlock);
  598. idx = drvdata->addr_idx;
  599. if (!(drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
  600. drvdata->addr_type[idx] == ETM_ADDR_TYPE_SINGLE)) {
  601. spin_unlock(&drvdata->spinlock);
  602. return -EINVAL;
  603. }
  604. val = drvdata->addr_val[idx];
  605. spin_unlock(&drvdata->spinlock);
  606. return sprintf(buf, "%#lx\n", val);
  607. }
  608. static ssize_t addr_single_store(struct device *dev,
  609. struct device_attribute *attr,
  610. const char *buf, size_t size)
  611. {
  612. u8 idx;
  613. int ret;
  614. unsigned long val;
  615. struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
  616. ret = kstrtoul(buf, 16, &val);
  617. if (ret)
  618. return ret;
  619. spin_lock(&drvdata->spinlock);
  620. idx = drvdata->addr_idx;
  621. if (!(drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
  622. drvdata->addr_type[idx] == ETM_ADDR_TYPE_SINGLE)) {
  623. spin_unlock(&drvdata->spinlock);
  624. return -EINVAL;
  625. }
  626. drvdata->addr_val[idx] = val;
  627. drvdata->addr_type[idx] = ETM_ADDR_TYPE_SINGLE;
  628. spin_unlock(&drvdata->spinlock);
  629. return size;
  630. }
  631. static DEVICE_ATTR_RW(addr_single);
  632. static ssize_t addr_range_show(struct device *dev,
  633. struct device_attribute *attr, char *buf)
  634. {
  635. u8 idx;
  636. unsigned long val1, val2;
  637. struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
  638. spin_lock(&drvdata->spinlock);
  639. idx = drvdata->addr_idx;
  640. if (idx % 2 != 0) {
  641. spin_unlock(&drvdata->spinlock);
  642. return -EPERM;
  643. }
  644. if (!((drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE &&
  645. drvdata->addr_type[idx + 1] == ETM_ADDR_TYPE_NONE) ||
  646. (drvdata->addr_type[idx] == ETM_ADDR_TYPE_RANGE &&
  647. drvdata->addr_type[idx + 1] == ETM_ADDR_TYPE_RANGE))) {
  648. spin_unlock(&drvdata->spinlock);
  649. return -EPERM;
  650. }
  651. val1 = drvdata->addr_val[idx];
  652. val2 = drvdata->addr_val[idx + 1];
  653. spin_unlock(&drvdata->spinlock);
  654. return sprintf(buf, "%#lx %#lx\n", val1, val2);
  655. }
  656. static ssize_t addr_range_store(struct device *dev,
  657. struct device_attribute *attr,
  658. const char *buf, size_t size)
  659. {
  660. u8 idx;
  661. unsigned long val1, val2;
  662. struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
  663. if (sscanf(buf, "%lx %lx", &val1, &val2) != 2)
  664. return -EINVAL;
  665. /* Lower address comparator cannot have a higher address value */
  666. if (val1 > val2)
  667. return -EINVAL;
  668. spin_lock(&drvdata->spinlock);
  669. idx = drvdata->addr_idx;
  670. if (idx % 2 != 0) {
  671. spin_unlock(&drvdata->spinlock);
  672. return -EPERM;
  673. }
  674. if (!((drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE &&
  675. drvdata->addr_type[idx + 1] == ETM_ADDR_TYPE_NONE) ||
  676. (drvdata->addr_type[idx] == ETM_ADDR_TYPE_RANGE &&
  677. drvdata->addr_type[idx + 1] == ETM_ADDR_TYPE_RANGE))) {
  678. spin_unlock(&drvdata->spinlock);
  679. return -EPERM;
  680. }
  681. drvdata->addr_val[idx] = val1;
  682. drvdata->addr_type[idx] = ETM_ADDR_TYPE_RANGE;
  683. drvdata->addr_val[idx + 1] = val2;
  684. drvdata->addr_type[idx + 1] = ETM_ADDR_TYPE_RANGE;
  685. drvdata->enable_ctrl1 |= (1 << (idx/2));
  686. spin_unlock(&drvdata->spinlock);
  687. return size;
  688. }
  689. static DEVICE_ATTR_RW(addr_range);
  690. static ssize_t addr_start_show(struct device *dev,
  691. struct device_attribute *attr, char *buf)
  692. {
  693. u8 idx;
  694. unsigned long val;
  695. struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
  696. spin_lock(&drvdata->spinlock);
  697. idx = drvdata->addr_idx;
  698. if (!(drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
  699. drvdata->addr_type[idx] == ETM_ADDR_TYPE_START)) {
  700. spin_unlock(&drvdata->spinlock);
  701. return -EPERM;
  702. }
  703. val = drvdata->addr_val[idx];
  704. spin_unlock(&drvdata->spinlock);
  705. return sprintf(buf, "%#lx\n", val);
  706. }
  707. static ssize_t addr_start_store(struct device *dev,
  708. struct device_attribute *attr,
  709. const char *buf, size_t size)
  710. {
  711. u8 idx;
  712. int ret;
  713. unsigned long val;
  714. struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
  715. ret = kstrtoul(buf, 16, &val);
  716. if (ret)
  717. return ret;
  718. spin_lock(&drvdata->spinlock);
  719. idx = drvdata->addr_idx;
  720. if (!(drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
  721. drvdata->addr_type[idx] == ETM_ADDR_TYPE_START)) {
  722. spin_unlock(&drvdata->spinlock);
  723. return -EPERM;
  724. }
  725. drvdata->addr_val[idx] = val;
  726. drvdata->addr_type[idx] = ETM_ADDR_TYPE_START;
  727. drvdata->startstop_ctrl |= (1 << idx);
  728. drvdata->enable_ctrl1 |= BIT(25);
  729. spin_unlock(&drvdata->spinlock);
  730. return size;
  731. }
  732. static DEVICE_ATTR_RW(addr_start);
  733. static ssize_t addr_stop_show(struct device *dev,
  734. struct device_attribute *attr, char *buf)
  735. {
  736. u8 idx;
  737. unsigned long val;
  738. struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
  739. spin_lock(&drvdata->spinlock);
  740. idx = drvdata->addr_idx;
  741. if (!(drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
  742. drvdata->addr_type[idx] == ETM_ADDR_TYPE_STOP)) {
  743. spin_unlock(&drvdata->spinlock);
  744. return -EPERM;
  745. }
  746. val = drvdata->addr_val[idx];
  747. spin_unlock(&drvdata->spinlock);
  748. return sprintf(buf, "%#lx\n", val);
  749. }
  750. static ssize_t addr_stop_store(struct device *dev,
  751. struct device_attribute *attr,
  752. const char *buf, size_t size)
  753. {
  754. u8 idx;
  755. int ret;
  756. unsigned long val;
  757. struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
  758. ret = kstrtoul(buf, 16, &val);
  759. if (ret)
  760. return ret;
  761. spin_lock(&drvdata->spinlock);
  762. idx = drvdata->addr_idx;
  763. if (!(drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
  764. drvdata->addr_type[idx] == ETM_ADDR_TYPE_STOP)) {
  765. spin_unlock(&drvdata->spinlock);
  766. return -EPERM;
  767. }
  768. drvdata->addr_val[idx] = val;
  769. drvdata->addr_type[idx] = ETM_ADDR_TYPE_STOP;
  770. drvdata->startstop_ctrl |= (1 << (idx + 16));
  771. drvdata->enable_ctrl1 |= ETMTECR1_START_STOP;
  772. spin_unlock(&drvdata->spinlock);
  773. return size;
  774. }
  775. static DEVICE_ATTR_RW(addr_stop);
  776. static ssize_t addr_acctype_show(struct device *dev,
  777. struct device_attribute *attr, char *buf)
  778. {
  779. unsigned long val;
  780. struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
  781. spin_lock(&drvdata->spinlock);
  782. val = drvdata->addr_acctype[drvdata->addr_idx];
  783. spin_unlock(&drvdata->spinlock);
  784. return sprintf(buf, "%#lx\n", val);
  785. }
  786. static ssize_t addr_acctype_store(struct device *dev,
  787. struct device_attribute *attr,
  788. const char *buf, size_t size)
  789. {
  790. int ret;
  791. unsigned long val;
  792. struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
  793. ret = kstrtoul(buf, 16, &val);
  794. if (ret)
  795. return ret;
  796. spin_lock(&drvdata->spinlock);
  797. drvdata->addr_acctype[drvdata->addr_idx] = val;
  798. spin_unlock(&drvdata->spinlock);
  799. return size;
  800. }
  801. static DEVICE_ATTR_RW(addr_acctype);
  802. static ssize_t cntr_idx_show(struct device *dev,
  803. struct device_attribute *attr, char *buf)
  804. {
  805. unsigned long val;
  806. struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
  807. val = drvdata->cntr_idx;
  808. return sprintf(buf, "%#lx\n", val);
  809. }
  810. static ssize_t cntr_idx_store(struct device *dev,
  811. struct device_attribute *attr,
  812. const char *buf, size_t size)
  813. {
  814. int ret;
  815. unsigned long val;
  816. struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
  817. ret = kstrtoul(buf, 16, &val);
  818. if (ret)
  819. return ret;
  820. if (val >= drvdata->nr_cntr)
  821. return -EINVAL;
  822. /*
  823. * Use spinlock to ensure index doesn't change while it gets
  824. * dereferenced multiple times within a spinlock block elsewhere.
  825. */
  826. spin_lock(&drvdata->spinlock);
  827. drvdata->cntr_idx = val;
  828. spin_unlock(&drvdata->spinlock);
  829. return size;
  830. }
  831. static DEVICE_ATTR_RW(cntr_idx);
  832. static ssize_t cntr_rld_val_show(struct device *dev,
  833. struct device_attribute *attr, char *buf)
  834. {
  835. unsigned long val;
  836. struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
  837. spin_lock(&drvdata->spinlock);
  838. val = drvdata->cntr_rld_val[drvdata->cntr_idx];
  839. spin_unlock(&drvdata->spinlock);
  840. return sprintf(buf, "%#lx\n", val);
  841. }
  842. static ssize_t cntr_rld_val_store(struct device *dev,
  843. struct device_attribute *attr,
  844. const char *buf, size_t size)
  845. {
  846. int ret;
  847. unsigned long val;
  848. struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
  849. ret = kstrtoul(buf, 16, &val);
  850. if (ret)
  851. return ret;
  852. spin_lock(&drvdata->spinlock);
  853. drvdata->cntr_rld_val[drvdata->cntr_idx] = val;
  854. spin_unlock(&drvdata->spinlock);
  855. return size;
  856. }
  857. static DEVICE_ATTR_RW(cntr_rld_val);
  858. static ssize_t cntr_event_show(struct device *dev,
  859. struct device_attribute *attr, char *buf)
  860. {
  861. unsigned long val;
  862. struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
  863. spin_lock(&drvdata->spinlock);
  864. val = drvdata->cntr_event[drvdata->cntr_idx];
  865. spin_unlock(&drvdata->spinlock);
  866. return sprintf(buf, "%#lx\n", val);
  867. }
  868. static ssize_t cntr_event_store(struct device *dev,
  869. struct device_attribute *attr,
  870. const char *buf, size_t size)
  871. {
  872. int ret;
  873. unsigned long val;
  874. struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
  875. ret = kstrtoul(buf, 16, &val);
  876. if (ret)
  877. return ret;
  878. spin_lock(&drvdata->spinlock);
  879. drvdata->cntr_event[drvdata->cntr_idx] = val & ETM_EVENT_MASK;
  880. spin_unlock(&drvdata->spinlock);
  881. return size;
  882. }
  883. static DEVICE_ATTR_RW(cntr_event);
  884. static ssize_t cntr_rld_event_show(struct device *dev,
  885. struct device_attribute *attr, char *buf)
  886. {
  887. unsigned long val;
  888. struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
  889. spin_lock(&drvdata->spinlock);
  890. val = drvdata->cntr_rld_event[drvdata->cntr_idx];
  891. spin_unlock(&drvdata->spinlock);
  892. return sprintf(buf, "%#lx\n", val);
  893. }
  894. static ssize_t cntr_rld_event_store(struct device *dev,
  895. struct device_attribute *attr,
  896. const char *buf, size_t size)
  897. {
  898. int ret;
  899. unsigned long val;
  900. struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
  901. ret = kstrtoul(buf, 16, &val);
  902. if (ret)
  903. return ret;
  904. spin_lock(&drvdata->spinlock);
  905. drvdata->cntr_rld_event[drvdata->cntr_idx] = val & ETM_EVENT_MASK;
  906. spin_unlock(&drvdata->spinlock);
  907. return size;
  908. }
  909. static DEVICE_ATTR_RW(cntr_rld_event);
  910. static ssize_t cntr_val_show(struct device *dev,
  911. struct device_attribute *attr, char *buf)
  912. {
  913. int i, ret = 0;
  914. u32 val;
  915. struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
  916. if (!drvdata->enable) {
  917. spin_lock(&drvdata->spinlock);
  918. for (i = 0; i < drvdata->nr_cntr; i++)
  919. ret += sprintf(buf, "counter %d: %x\n",
  920. i, drvdata->cntr_val[i]);
  921. spin_unlock(&drvdata->spinlock);
  922. return ret;
  923. }
  924. for (i = 0; i < drvdata->nr_cntr; i++) {
  925. val = etm_readl(drvdata, ETMCNTVRn(i));
  926. ret += sprintf(buf, "counter %d: %x\n", i, val);
  927. }
  928. return ret;
  929. }
  930. static ssize_t cntr_val_store(struct device *dev,
  931. struct device_attribute *attr,
  932. const char *buf, size_t size)
  933. {
  934. int ret;
  935. unsigned long val;
  936. struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
  937. ret = kstrtoul(buf, 16, &val);
  938. if (ret)
  939. return ret;
  940. spin_lock(&drvdata->spinlock);
  941. drvdata->cntr_val[drvdata->cntr_idx] = val;
  942. spin_unlock(&drvdata->spinlock);
  943. return size;
  944. }
  945. static DEVICE_ATTR_RW(cntr_val);
  946. static ssize_t seq_12_event_show(struct device *dev,
  947. struct device_attribute *attr, char *buf)
  948. {
  949. unsigned long val;
  950. struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
  951. val = drvdata->seq_12_event;
  952. return sprintf(buf, "%#lx\n", val);
  953. }
  954. static ssize_t seq_12_event_store(struct device *dev,
  955. struct device_attribute *attr,
  956. const char *buf, size_t size)
  957. {
  958. int ret;
  959. unsigned long val;
  960. struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
  961. ret = kstrtoul(buf, 16, &val);
  962. if (ret)
  963. return ret;
  964. drvdata->seq_12_event = val & ETM_EVENT_MASK;
  965. return size;
  966. }
  967. static DEVICE_ATTR_RW(seq_12_event);
  968. static ssize_t seq_21_event_show(struct device *dev,
  969. struct device_attribute *attr, char *buf)
  970. {
  971. unsigned long val;
  972. struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
  973. val = drvdata->seq_21_event;
  974. return sprintf(buf, "%#lx\n", val);
  975. }
  976. static ssize_t seq_21_event_store(struct device *dev,
  977. struct device_attribute *attr,
  978. const char *buf, size_t size)
  979. {
  980. int ret;
  981. unsigned long val;
  982. struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
  983. ret = kstrtoul(buf, 16, &val);
  984. if (ret)
  985. return ret;
  986. drvdata->seq_21_event = val & ETM_EVENT_MASK;
  987. return size;
  988. }
  989. static DEVICE_ATTR_RW(seq_21_event);
  990. static ssize_t seq_23_event_show(struct device *dev,
  991. struct device_attribute *attr, char *buf)
  992. {
  993. unsigned long val;
  994. struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
  995. val = drvdata->seq_23_event;
  996. return sprintf(buf, "%#lx\n", val);
  997. }
  998. static ssize_t seq_23_event_store(struct device *dev,
  999. struct device_attribute *attr,
  1000. const char *buf, size_t size)
  1001. {
  1002. int ret;
  1003. unsigned long val;
  1004. struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
  1005. ret = kstrtoul(buf, 16, &val);
  1006. if (ret)
  1007. return ret;
  1008. drvdata->seq_23_event = val & ETM_EVENT_MASK;
  1009. return size;
  1010. }
  1011. static DEVICE_ATTR_RW(seq_23_event);
  1012. static ssize_t seq_31_event_show(struct device *dev,
  1013. struct device_attribute *attr, char *buf)
  1014. {
  1015. unsigned long val;
  1016. struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
  1017. val = drvdata->seq_31_event;
  1018. return sprintf(buf, "%#lx\n", val);
  1019. }
  1020. static ssize_t seq_31_event_store(struct device *dev,
  1021. struct device_attribute *attr,
  1022. const char *buf, size_t size)
  1023. {
  1024. int ret;
  1025. unsigned long val;
  1026. struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
  1027. ret = kstrtoul(buf, 16, &val);
  1028. if (ret)
  1029. return ret;
  1030. drvdata->seq_31_event = val & ETM_EVENT_MASK;
  1031. return size;
  1032. }
  1033. static DEVICE_ATTR_RW(seq_31_event);
  1034. static ssize_t seq_32_event_show(struct device *dev,
  1035. struct device_attribute *attr, char *buf)
  1036. {
  1037. unsigned long val;
  1038. struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
  1039. val = drvdata->seq_32_event;
  1040. return sprintf(buf, "%#lx\n", val);
  1041. }
  1042. static ssize_t seq_32_event_store(struct device *dev,
  1043. struct device_attribute *attr,
  1044. const char *buf, size_t size)
  1045. {
  1046. int ret;
  1047. unsigned long val;
  1048. struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
  1049. ret = kstrtoul(buf, 16, &val);
  1050. if (ret)
  1051. return ret;
  1052. drvdata->seq_32_event = val & ETM_EVENT_MASK;
  1053. return size;
  1054. }
  1055. static DEVICE_ATTR_RW(seq_32_event);
  1056. static ssize_t seq_13_event_show(struct device *dev,
  1057. struct device_attribute *attr, char *buf)
  1058. {
  1059. unsigned long val;
  1060. struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
  1061. val = drvdata->seq_13_event;
  1062. return sprintf(buf, "%#lx\n", val);
  1063. }
  1064. static ssize_t seq_13_event_store(struct device *dev,
  1065. struct device_attribute *attr,
  1066. const char *buf, size_t size)
  1067. {
  1068. int ret;
  1069. unsigned long val;
  1070. struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
  1071. ret = kstrtoul(buf, 16, &val);
  1072. if (ret)
  1073. return ret;
  1074. drvdata->seq_13_event = val & ETM_EVENT_MASK;
  1075. return size;
  1076. }
  1077. static DEVICE_ATTR_RW(seq_13_event);
  1078. static ssize_t seq_curr_state_show(struct device *dev,
  1079. struct device_attribute *attr, char *buf)
  1080. {
  1081. unsigned long val, flags;
  1082. struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
  1083. if (!drvdata->enable) {
  1084. val = drvdata->seq_curr_state;
  1085. goto out;
  1086. }
  1087. pm_runtime_get_sync(drvdata->dev);
  1088. spin_lock_irqsave(&drvdata->spinlock, flags);
  1089. CS_UNLOCK(drvdata->base);
  1090. val = (etm_readl(drvdata, ETMSQR) & ETM_SQR_MASK);
  1091. CS_LOCK(drvdata->base);
  1092. spin_unlock_irqrestore(&drvdata->spinlock, flags);
  1093. pm_runtime_put(drvdata->dev);
  1094. out:
  1095. return sprintf(buf, "%#lx\n", val);
  1096. }
  1097. static ssize_t seq_curr_state_store(struct device *dev,
  1098. struct device_attribute *attr,
  1099. const char *buf, size_t size)
  1100. {
  1101. int ret;
  1102. unsigned long val;
  1103. struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
  1104. ret = kstrtoul(buf, 16, &val);
  1105. if (ret)
  1106. return ret;
  1107. if (val > ETM_SEQ_STATE_MAX_VAL)
  1108. return -EINVAL;
  1109. drvdata->seq_curr_state = val;
  1110. return size;
  1111. }
  1112. static DEVICE_ATTR_RW(seq_curr_state);
  1113. static ssize_t ctxid_idx_show(struct device *dev,
  1114. struct device_attribute *attr, char *buf)
  1115. {
  1116. unsigned long val;
  1117. struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
  1118. val = drvdata->ctxid_idx;
  1119. return sprintf(buf, "%#lx\n", val);
  1120. }
  1121. static ssize_t ctxid_idx_store(struct device *dev,
  1122. struct device_attribute *attr,
  1123. const char *buf, size_t size)
  1124. {
  1125. int ret;
  1126. unsigned long val;
  1127. struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
  1128. ret = kstrtoul(buf, 16, &val);
  1129. if (ret)
  1130. return ret;
  1131. if (val >= drvdata->nr_ctxid_cmp)
  1132. return -EINVAL;
  1133. /*
  1134. * Use spinlock to ensure index doesn't change while it gets
  1135. * dereferenced multiple times within a spinlock block elsewhere.
  1136. */
  1137. spin_lock(&drvdata->spinlock);
  1138. drvdata->ctxid_idx = val;
  1139. spin_unlock(&drvdata->spinlock);
  1140. return size;
  1141. }
  1142. static DEVICE_ATTR_RW(ctxid_idx);
  1143. static ssize_t ctxid_pid_show(struct device *dev,
  1144. struct device_attribute *attr, char *buf)
  1145. {
  1146. unsigned long val;
  1147. struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
  1148. spin_lock(&drvdata->spinlock);
  1149. val = drvdata->ctxid_vpid[drvdata->ctxid_idx];
  1150. spin_unlock(&drvdata->spinlock);
  1151. return sprintf(buf, "%#lx\n", val);
  1152. }
  1153. static ssize_t ctxid_pid_store(struct device *dev,
  1154. struct device_attribute *attr,
  1155. const char *buf, size_t size)
  1156. {
  1157. int ret;
  1158. unsigned long vpid, pid;
  1159. struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
  1160. ret = kstrtoul(buf, 16, &vpid);
  1161. if (ret)
  1162. return ret;
  1163. pid = coresight_vpid_to_pid(vpid);
  1164. spin_lock(&drvdata->spinlock);
  1165. drvdata->ctxid_pid[drvdata->ctxid_idx] = pid;
  1166. drvdata->ctxid_vpid[drvdata->ctxid_idx] = vpid;
  1167. spin_unlock(&drvdata->spinlock);
  1168. return size;
  1169. }
  1170. static DEVICE_ATTR_RW(ctxid_pid);
  1171. static ssize_t ctxid_mask_show(struct device *dev,
  1172. struct device_attribute *attr, char *buf)
  1173. {
  1174. unsigned long val;
  1175. struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
  1176. val = drvdata->ctxid_mask;
  1177. return sprintf(buf, "%#lx\n", val);
  1178. }
  1179. static ssize_t ctxid_mask_store(struct device *dev,
  1180. struct device_attribute *attr,
  1181. const char *buf, size_t size)
  1182. {
  1183. int ret;
  1184. unsigned long val;
  1185. struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
  1186. ret = kstrtoul(buf, 16, &val);
  1187. if (ret)
  1188. return ret;
  1189. drvdata->ctxid_mask = val;
  1190. return size;
  1191. }
  1192. static DEVICE_ATTR_RW(ctxid_mask);
  1193. static ssize_t sync_freq_show(struct device *dev,
  1194. struct device_attribute *attr, char *buf)
  1195. {
  1196. unsigned long val;
  1197. struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
  1198. val = drvdata->sync_freq;
  1199. return sprintf(buf, "%#lx\n", val);
  1200. }
  1201. static ssize_t sync_freq_store(struct device *dev,
  1202. struct device_attribute *attr,
  1203. const char *buf, size_t size)
  1204. {
  1205. int ret;
  1206. unsigned long val;
  1207. struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
  1208. ret = kstrtoul(buf, 16, &val);
  1209. if (ret)
  1210. return ret;
  1211. drvdata->sync_freq = val & ETM_SYNC_MASK;
  1212. return size;
  1213. }
  1214. static DEVICE_ATTR_RW(sync_freq);
  1215. static ssize_t timestamp_event_show(struct device *dev,
  1216. struct device_attribute *attr, char *buf)
  1217. {
  1218. unsigned long val;
  1219. struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
  1220. val = drvdata->timestamp_event;
  1221. return sprintf(buf, "%#lx\n", val);
  1222. }
  1223. static ssize_t timestamp_event_store(struct device *dev,
  1224. struct device_attribute *attr,
  1225. const char *buf, size_t size)
  1226. {
  1227. int ret;
  1228. unsigned long val;
  1229. struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
  1230. ret = kstrtoul(buf, 16, &val);
  1231. if (ret)
  1232. return ret;
  1233. drvdata->timestamp_event = val & ETM_EVENT_MASK;
  1234. return size;
  1235. }
  1236. static DEVICE_ATTR_RW(timestamp_event);
  1237. static ssize_t status_show(struct device *dev,
  1238. struct device_attribute *attr, char *buf)
  1239. {
  1240. int ret;
  1241. unsigned long flags;
  1242. struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
  1243. pm_runtime_get_sync(drvdata->dev);
  1244. spin_lock_irqsave(&drvdata->spinlock, flags);
  1245. CS_UNLOCK(drvdata->base);
  1246. ret = sprintf(buf,
  1247. "ETMCCR: 0x%08x\n"
  1248. "ETMCCER: 0x%08x\n"
  1249. "ETMSCR: 0x%08x\n"
  1250. "ETMIDR: 0x%08x\n"
  1251. "ETMCR: 0x%08x\n"
  1252. "ETMTRACEIDR: 0x%08x\n"
  1253. "Enable event: 0x%08x\n"
  1254. "Enable start/stop: 0x%08x\n"
  1255. "Enable control: CR1 0x%08x CR2 0x%08x\n"
  1256. "CPU affinity: %d\n",
  1257. drvdata->etmccr, drvdata->etmccer,
  1258. etm_readl(drvdata, ETMSCR), etm_readl(drvdata, ETMIDR),
  1259. etm_readl(drvdata, ETMCR), etm_trace_id_simple(drvdata),
  1260. etm_readl(drvdata, ETMTEEVR),
  1261. etm_readl(drvdata, ETMTSSCR),
  1262. etm_readl(drvdata, ETMTECR1),
  1263. etm_readl(drvdata, ETMTECR2),
  1264. drvdata->cpu);
  1265. CS_LOCK(drvdata->base);
  1266. spin_unlock_irqrestore(&drvdata->spinlock, flags);
  1267. pm_runtime_put(drvdata->dev);
  1268. return ret;
  1269. }
  1270. static DEVICE_ATTR_RO(status);
  1271. static ssize_t traceid_show(struct device *dev,
  1272. struct device_attribute *attr, char *buf)
  1273. {
  1274. unsigned long val, flags;
  1275. struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
  1276. if (!drvdata->enable) {
  1277. val = drvdata->traceid;
  1278. goto out;
  1279. }
  1280. pm_runtime_get_sync(drvdata->dev);
  1281. spin_lock_irqsave(&drvdata->spinlock, flags);
  1282. CS_UNLOCK(drvdata->base);
  1283. val = (etm_readl(drvdata, ETMTRACEIDR) & ETM_TRACEID_MASK);
  1284. CS_LOCK(drvdata->base);
  1285. spin_unlock_irqrestore(&drvdata->spinlock, flags);
  1286. pm_runtime_put(drvdata->dev);
  1287. out:
  1288. return sprintf(buf, "%#lx\n", val);
  1289. }
  1290. static ssize_t traceid_store(struct device *dev,
  1291. struct device_attribute *attr,
  1292. const char *buf, size_t size)
  1293. {
  1294. int ret;
  1295. unsigned long val;
  1296. struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
  1297. ret = kstrtoul(buf, 16, &val);
  1298. if (ret)
  1299. return ret;
  1300. drvdata->traceid = val & ETM_TRACEID_MASK;
  1301. return size;
  1302. }
  1303. static DEVICE_ATTR_RW(traceid);
  1304. static struct attribute *coresight_etm_attrs[] = {
  1305. &dev_attr_nr_addr_cmp.attr,
  1306. &dev_attr_nr_cntr.attr,
  1307. &dev_attr_nr_ctxid_cmp.attr,
  1308. &dev_attr_etmsr.attr,
  1309. &dev_attr_reset.attr,
  1310. &dev_attr_mode.attr,
  1311. &dev_attr_trigger_event.attr,
  1312. &dev_attr_enable_event.attr,
  1313. &dev_attr_fifofull_level.attr,
  1314. &dev_attr_addr_idx.attr,
  1315. &dev_attr_addr_single.attr,
  1316. &dev_attr_addr_range.attr,
  1317. &dev_attr_addr_start.attr,
  1318. &dev_attr_addr_stop.attr,
  1319. &dev_attr_addr_acctype.attr,
  1320. &dev_attr_cntr_idx.attr,
  1321. &dev_attr_cntr_rld_val.attr,
  1322. &dev_attr_cntr_event.attr,
  1323. &dev_attr_cntr_rld_event.attr,
  1324. &dev_attr_cntr_val.attr,
  1325. &dev_attr_seq_12_event.attr,
  1326. &dev_attr_seq_21_event.attr,
  1327. &dev_attr_seq_23_event.attr,
  1328. &dev_attr_seq_31_event.attr,
  1329. &dev_attr_seq_32_event.attr,
  1330. &dev_attr_seq_13_event.attr,
  1331. &dev_attr_seq_curr_state.attr,
  1332. &dev_attr_ctxid_idx.attr,
  1333. &dev_attr_ctxid_pid.attr,
  1334. &dev_attr_ctxid_mask.attr,
  1335. &dev_attr_sync_freq.attr,
  1336. &dev_attr_timestamp_event.attr,
  1337. &dev_attr_status.attr,
  1338. &dev_attr_traceid.attr,
  1339. NULL,
  1340. };
  1341. ATTRIBUTE_GROUPS(coresight_etm);
  1342. static int etm_cpu_callback(struct notifier_block *nfb, unsigned long action,
  1343. void *hcpu)
  1344. {
  1345. unsigned int cpu = (unsigned long)hcpu;
  1346. if (!etmdrvdata[cpu])
  1347. goto out;
  1348. switch (action & (~CPU_TASKS_FROZEN)) {
  1349. case CPU_STARTING:
  1350. spin_lock(&etmdrvdata[cpu]->spinlock);
  1351. if (!etmdrvdata[cpu]->os_unlock) {
  1352. etm_os_unlock(etmdrvdata[cpu]);
  1353. etmdrvdata[cpu]->os_unlock = true;
  1354. }
  1355. if (etmdrvdata[cpu]->enable)
  1356. etm_enable_hw(etmdrvdata[cpu]);
  1357. spin_unlock(&etmdrvdata[cpu]->spinlock);
  1358. break;
  1359. case CPU_ONLINE:
  1360. if (etmdrvdata[cpu]->boot_enable &&
  1361. !etmdrvdata[cpu]->sticky_enable)
  1362. coresight_enable(etmdrvdata[cpu]->csdev);
  1363. break;
  1364. case CPU_DYING:
  1365. spin_lock(&etmdrvdata[cpu]->spinlock);
  1366. if (etmdrvdata[cpu]->enable)
  1367. etm_disable_hw(etmdrvdata[cpu]);
  1368. spin_unlock(&etmdrvdata[cpu]->spinlock);
  1369. break;
  1370. }
  1371. out:
  1372. return NOTIFY_OK;
  1373. }
  1374. static struct notifier_block etm_cpu_notifier = {
  1375. .notifier_call = etm_cpu_callback,
  1376. };
  1377. static bool etm_arch_supported(u8 arch)
  1378. {
  1379. switch (arch) {
  1380. case ETM_ARCH_V3_3:
  1381. break;
  1382. case ETM_ARCH_V3_5:
  1383. break;
  1384. case PFT_ARCH_V1_0:
  1385. break;
  1386. case PFT_ARCH_V1_1:
  1387. break;
  1388. default:
  1389. return false;
  1390. }
  1391. return true;
  1392. }
  1393. static void etm_init_arch_data(void *info)
  1394. {
  1395. u32 etmidr;
  1396. u32 etmccr;
  1397. struct etm_drvdata *drvdata = info;
  1398. CS_UNLOCK(drvdata->base);
  1399. /* First dummy read */
  1400. (void)etm_readl(drvdata, ETMPDSR);
  1401. /* Provide power to ETM: ETMPDCR[3] == 1 */
  1402. etm_set_pwrup(drvdata);
  1403. /*
  1404. * Clear power down bit since when this bit is set writes to
  1405. * certain registers might be ignored.
  1406. */
  1407. etm_clr_pwrdwn(drvdata);
  1408. /*
  1409. * Set prog bit. It will be set from reset but this is included to
  1410. * ensure it is set
  1411. */
  1412. etm_set_prog(drvdata);
  1413. /* Find all capabilities */
  1414. etmidr = etm_readl(drvdata, ETMIDR);
  1415. drvdata->arch = BMVAL(etmidr, 4, 11);
  1416. drvdata->port_size = etm_readl(drvdata, ETMCR) & PORT_SIZE_MASK;
  1417. drvdata->etmccer = etm_readl(drvdata, ETMCCER);
  1418. etmccr = etm_readl(drvdata, ETMCCR);
  1419. drvdata->etmccr = etmccr;
  1420. drvdata->nr_addr_cmp = BMVAL(etmccr, 0, 3) * 2;
  1421. drvdata->nr_cntr = BMVAL(etmccr, 13, 15);
  1422. drvdata->nr_ext_inp = BMVAL(etmccr, 17, 19);
  1423. drvdata->nr_ext_out = BMVAL(etmccr, 20, 22);
  1424. drvdata->nr_ctxid_cmp = BMVAL(etmccr, 24, 25);
  1425. etm_set_pwrdwn(drvdata);
  1426. etm_clr_pwrup(drvdata);
  1427. CS_LOCK(drvdata->base);
  1428. }
  1429. static void etm_init_default_data(struct etm_drvdata *drvdata)
  1430. {
  1431. /*
  1432. * A trace ID of value 0 is invalid, so let's start at some
  1433. * random value that fits in 7 bits and will be just as good.
  1434. */
  1435. static int etm3x_traceid = 0x10;
  1436. u32 flags = (1 << 0 | /* instruction execute*/
  1437. 3 << 3 | /* ARM instruction */
  1438. 0 << 5 | /* No data value comparison */
  1439. 0 << 7 | /* No exact mach */
  1440. 0 << 8 | /* Ignore context ID */
  1441. 0 << 10); /* Security ignored */
  1442. /*
  1443. * Initial configuration only - guarantees sources handled by
  1444. * this driver have a unique ID at startup time but not between
  1445. * all other types of sources. For that we lean on the core
  1446. * framework.
  1447. */
  1448. drvdata->traceid = etm3x_traceid++;
  1449. drvdata->ctrl = (ETMCR_CYC_ACC | ETMCR_TIMESTAMP_EN);
  1450. drvdata->enable_ctrl1 = ETMTECR1_ADDR_COMP_1;
  1451. if (drvdata->nr_addr_cmp >= 2) {
  1452. drvdata->addr_val[0] = (u32) _stext;
  1453. drvdata->addr_val[1] = (u32) _etext;
  1454. drvdata->addr_acctype[0] = flags;
  1455. drvdata->addr_acctype[1] = flags;
  1456. drvdata->addr_type[0] = ETM_ADDR_TYPE_RANGE;
  1457. drvdata->addr_type[1] = ETM_ADDR_TYPE_RANGE;
  1458. }
  1459. etm_set_default(drvdata);
  1460. }
  1461. static int etm_probe(struct amba_device *adev, const struct amba_id *id)
  1462. {
  1463. int ret;
  1464. void __iomem *base;
  1465. struct device *dev = &adev->dev;
  1466. struct coresight_platform_data *pdata = NULL;
  1467. struct etm_drvdata *drvdata;
  1468. struct resource *res = &adev->res;
  1469. struct coresight_desc *desc;
  1470. struct device_node *np = adev->dev.of_node;
  1471. desc = devm_kzalloc(dev, sizeof(*desc), GFP_KERNEL);
  1472. if (!desc)
  1473. return -ENOMEM;
  1474. drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
  1475. if (!drvdata)
  1476. return -ENOMEM;
  1477. if (np) {
  1478. pdata = of_get_coresight_platform_data(dev, np);
  1479. if (IS_ERR(pdata))
  1480. return PTR_ERR(pdata);
  1481. adev->dev.platform_data = pdata;
  1482. drvdata->use_cp14 = of_property_read_bool(np, "arm,cp14");
  1483. }
  1484. drvdata->dev = &adev->dev;
  1485. dev_set_drvdata(dev, drvdata);
  1486. /* Validity for the resource is already checked by the AMBA core */
  1487. base = devm_ioremap_resource(dev, res);
  1488. if (IS_ERR(base))
  1489. return PTR_ERR(base);
  1490. drvdata->base = base;
  1491. spin_lock_init(&drvdata->spinlock);
  1492. drvdata->atclk = devm_clk_get(&adev->dev, "atclk"); /* optional */
  1493. if (!IS_ERR(drvdata->atclk)) {
  1494. ret = clk_prepare_enable(drvdata->atclk);
  1495. if (ret)
  1496. return ret;
  1497. }
  1498. drvdata->cpu = pdata ? pdata->cpu : 0;
  1499. get_online_cpus();
  1500. etmdrvdata[drvdata->cpu] = drvdata;
  1501. if (!smp_call_function_single(drvdata->cpu, etm_os_unlock, drvdata, 1))
  1502. drvdata->os_unlock = true;
  1503. if (smp_call_function_single(drvdata->cpu,
  1504. etm_init_arch_data, drvdata, 1))
  1505. dev_err(dev, "ETM arch init failed\n");
  1506. if (!etm_count++)
  1507. register_hotcpu_notifier(&etm_cpu_notifier);
  1508. put_online_cpus();
  1509. if (etm_arch_supported(drvdata->arch) == false) {
  1510. ret = -EINVAL;
  1511. goto err_arch_supported;
  1512. }
  1513. etm_init_default_data(drvdata);
  1514. desc->type = CORESIGHT_DEV_TYPE_SOURCE;
  1515. desc->subtype.source_subtype = CORESIGHT_DEV_SUBTYPE_SOURCE_PROC;
  1516. desc->ops = &etm_cs_ops;
  1517. desc->pdata = pdata;
  1518. desc->dev = dev;
  1519. desc->groups = coresight_etm_groups;
  1520. drvdata->csdev = coresight_register(desc);
  1521. if (IS_ERR(drvdata->csdev)) {
  1522. ret = PTR_ERR(drvdata->csdev);
  1523. goto err_arch_supported;
  1524. }
  1525. pm_runtime_put(&adev->dev);
  1526. dev_info(dev, "%s initialized\n", (char *)id->data);
  1527. if (boot_enable) {
  1528. coresight_enable(drvdata->csdev);
  1529. drvdata->boot_enable = true;
  1530. }
  1531. return 0;
  1532. err_arch_supported:
  1533. if (--etm_count == 0)
  1534. unregister_hotcpu_notifier(&etm_cpu_notifier);
  1535. return ret;
  1536. }
  1537. static int etm_remove(struct amba_device *adev)
  1538. {
  1539. struct etm_drvdata *drvdata = amba_get_drvdata(adev);
  1540. coresight_unregister(drvdata->csdev);
  1541. if (--etm_count == 0)
  1542. unregister_hotcpu_notifier(&etm_cpu_notifier);
  1543. return 0;
  1544. }
  1545. #ifdef CONFIG_PM
  1546. static int etm_runtime_suspend(struct device *dev)
  1547. {
  1548. struct etm_drvdata *drvdata = dev_get_drvdata(dev);
  1549. if (drvdata && !IS_ERR(drvdata->atclk))
  1550. clk_disable_unprepare(drvdata->atclk);
  1551. return 0;
  1552. }
  1553. static int etm_runtime_resume(struct device *dev)
  1554. {
  1555. struct etm_drvdata *drvdata = dev_get_drvdata(dev);
  1556. if (drvdata && !IS_ERR(drvdata->atclk))
  1557. clk_prepare_enable(drvdata->atclk);
  1558. return 0;
  1559. }
  1560. #endif
  1561. static const struct dev_pm_ops etm_dev_pm_ops = {
  1562. SET_RUNTIME_PM_OPS(etm_runtime_suspend, etm_runtime_resume, NULL)
  1563. };
  1564. static struct amba_id etm_ids[] = {
  1565. { /* ETM 3.3 */
  1566. .id = 0x0003b921,
  1567. .mask = 0x0003ffff,
  1568. .data = "ETM 3.3",
  1569. },
  1570. { /* ETM 3.5 */
  1571. .id = 0x0003b956,
  1572. .mask = 0x0003ffff,
  1573. .data = "ETM 3.5",
  1574. },
  1575. { /* PTM 1.0 */
  1576. .id = 0x0003b950,
  1577. .mask = 0x0003ffff,
  1578. .data = "PTM 1.0",
  1579. },
  1580. { /* PTM 1.1 */
  1581. .id = 0x0003b95f,
  1582. .mask = 0x0003ffff,
  1583. .data = "PTM 1.1",
  1584. },
  1585. { /* PTM 1.1 Qualcomm */
  1586. .id = 0x0003006f,
  1587. .mask = 0x0003ffff,
  1588. .data = "PTM 1.1",
  1589. },
  1590. { 0, 0},
  1591. };
  1592. static struct amba_driver etm_driver = {
  1593. .drv = {
  1594. .name = "coresight-etm3x",
  1595. .owner = THIS_MODULE,
  1596. .pm = &etm_dev_pm_ops,
  1597. },
  1598. .probe = etm_probe,
  1599. .remove = etm_remove,
  1600. .id_table = etm_ids,
  1601. };
  1602. module_amba_driver(etm_driver);
  1603. MODULE_LICENSE("GPL v2");
  1604. MODULE_DESCRIPTION("CoreSight Program Flow Trace driver");