coresight-etm3x.c 47 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932
  1. /* Copyright (c) 2011-2012, The Linux Foundation. All rights reserved.
  2. *
  3. * This program is free software; you can redistribute it and/or modify
  4. * it under the terms of the GNU General Public License version 2 and
  5. * only version 2 as published by the Free Software Foundation.
  6. *
  7. * This program is distributed in the hope that it will be useful,
  8. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  9. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  10. * GNU General Public License for more details.
  11. */
  12. #include <linux/kernel.h>
  13. #include <linux/module.h>
  14. #include <linux/init.h>
  15. #include <linux/types.h>
  16. #include <linux/device.h>
  17. #include <linux/io.h>
  18. #include <linux/err.h>
  19. #include <linux/fs.h>
  20. #include <linux/slab.h>
  21. #include <linux/delay.h>
  22. #include <linux/smp.h>
  23. #include <linux/sysfs.h>
  24. #include <linux/stat.h>
  25. #include <linux/clk.h>
  26. #include <linux/cpu.h>
  27. #include <linux/of.h>
  28. #include <linux/coresight.h>
  29. #include <linux/amba/bus.h>
  30. #include <linux/seq_file.h>
  31. #include <linux/uaccess.h>
  32. #include <asm/sections.h>
  33. #include "coresight-etm.h"
  34. static int boot_enable;
  35. module_param_named(boot_enable, boot_enable, int, S_IRUGO);
  36. /* The number of ETM/PTM currently registered */
  37. static int etm_count;
  38. static struct etm_drvdata *etmdrvdata[NR_CPUS];
  39. static inline void etm_writel(struct etm_drvdata *drvdata,
  40. u32 val, u32 off)
  41. {
  42. if (drvdata->use_cp14) {
  43. if (etm_writel_cp14(off, val)) {
  44. dev_err(drvdata->dev,
  45. "invalid CP14 access to ETM reg: %#x", off);
  46. }
  47. } else {
  48. writel_relaxed(val, drvdata->base + off);
  49. }
  50. }
  51. static inline unsigned int etm_readl(struct etm_drvdata *drvdata, u32 off)
  52. {
  53. u32 val;
  54. if (drvdata->use_cp14) {
  55. if (etm_readl_cp14(off, &val)) {
  56. dev_err(drvdata->dev,
  57. "invalid CP14 access to ETM reg: %#x", off);
  58. }
  59. } else {
  60. val = readl_relaxed(drvdata->base + off);
  61. }
  62. return val;
  63. }
  64. /*
  65. * Memory mapped writes to clear os lock are not supported on some processors
  66. * and OS lock must be unlocked before any memory mapped access on such
  67. * processors, otherwise memory mapped reads/writes will be invalid.
  68. */
  69. static void etm_os_unlock(void *info)
  70. {
  71. struct etm_drvdata *drvdata = (struct etm_drvdata *)info;
  72. /* Writing any value to ETMOSLAR unlocks the trace registers */
  73. etm_writel(drvdata, 0x0, ETMOSLAR);
  74. isb();
  75. }
  76. static void etm_set_pwrdwn(struct etm_drvdata *drvdata)
  77. {
  78. u32 etmcr;
  79. /* Ensure pending cp14 accesses complete before setting pwrdwn */
  80. mb();
  81. isb();
  82. etmcr = etm_readl(drvdata, ETMCR);
  83. etmcr |= ETMCR_PWD_DWN;
  84. etm_writel(drvdata, etmcr, ETMCR);
  85. }
  86. static void etm_clr_pwrdwn(struct etm_drvdata *drvdata)
  87. {
  88. u32 etmcr;
  89. etmcr = etm_readl(drvdata, ETMCR);
  90. etmcr &= ~ETMCR_PWD_DWN;
  91. etm_writel(drvdata, etmcr, ETMCR);
  92. /* Ensure pwrup completes before subsequent cp14 accesses */
  93. mb();
  94. isb();
  95. }
  96. static void etm_set_pwrup(struct etm_drvdata *drvdata)
  97. {
  98. u32 etmpdcr;
  99. etmpdcr = readl_relaxed(drvdata->base + ETMPDCR);
  100. etmpdcr |= ETMPDCR_PWD_UP;
  101. writel_relaxed(etmpdcr, drvdata->base + ETMPDCR);
  102. /* Ensure pwrup completes before subsequent cp14 accesses */
  103. mb();
  104. isb();
  105. }
  106. static void etm_clr_pwrup(struct etm_drvdata *drvdata)
  107. {
  108. u32 etmpdcr;
  109. /* Ensure pending cp14 accesses complete before clearing pwrup */
  110. mb();
  111. isb();
  112. etmpdcr = readl_relaxed(drvdata->base + ETMPDCR);
  113. etmpdcr &= ~ETMPDCR_PWD_UP;
  114. writel_relaxed(etmpdcr, drvdata->base + ETMPDCR);
  115. }
  116. /**
  117. * coresight_timeout_etm - loop until a bit has changed to a specific state.
  118. * @drvdata: etm's private data structure.
  119. * @offset: address of a register, starting from @addr.
  120. * @position: the position of the bit of interest.
  121. * @value: the value the bit should have.
  122. *
  123. * Basically the same as @coresight_timeout except for the register access
  124. * method where we have to account for CP14 configurations.
  125. * Return: 0 as soon as the bit has taken the desired state or -EAGAIN if
  126. * TIMEOUT_US has elapsed, which ever happens first.
  127. */
  128. static int coresight_timeout_etm(struct etm_drvdata *drvdata, u32 offset,
  129. int position, int value)
  130. {
  131. int i;
  132. u32 val;
  133. for (i = TIMEOUT_US; i > 0; i--) {
  134. val = etm_readl(drvdata, offset);
  135. /* Waiting on the bit to go from 0 to 1 */
  136. if (value) {
  137. if (val & BIT(position))
  138. return 0;
  139. /* Waiting on the bit to go from 1 to 0 */
  140. } else {
  141. if (!(val & BIT(position)))
  142. return 0;
  143. }
  144. /*
  145. * Delay is arbitrary - the specification doesn't say how long
  146. * we are expected to wait. Extra check required to make sure
  147. * we don't wait needlessly on the last iteration.
  148. */
  149. if (i - 1)
  150. udelay(1);
  151. }
  152. return -EAGAIN;
  153. }
  154. static void etm_set_prog(struct etm_drvdata *drvdata)
  155. {
  156. u32 etmcr;
  157. etmcr = etm_readl(drvdata, ETMCR);
  158. etmcr |= ETMCR_ETM_PRG;
  159. etm_writel(drvdata, etmcr, ETMCR);
  160. /*
  161. * Recommended by spec for cp14 accesses to ensure etmcr write is
  162. * complete before polling etmsr
  163. */
  164. isb();
  165. if (coresight_timeout_etm(drvdata, ETMSR, ETMSR_PROG_BIT, 1)) {
  166. dev_err(drvdata->dev,
  167. "timeout observed when probing at offset %#x\n", ETMSR);
  168. }
  169. }
  170. static void etm_clr_prog(struct etm_drvdata *drvdata)
  171. {
  172. u32 etmcr;
  173. etmcr = etm_readl(drvdata, ETMCR);
  174. etmcr &= ~ETMCR_ETM_PRG;
  175. etm_writel(drvdata, etmcr, ETMCR);
  176. /*
  177. * Recommended by spec for cp14 accesses to ensure etmcr write is
  178. * complete before polling etmsr
  179. */
  180. isb();
  181. if (coresight_timeout_etm(drvdata, ETMSR, ETMSR_PROG_BIT, 0)) {
  182. dev_err(drvdata->dev,
  183. "timeout observed when probing at offset %#x\n", ETMSR);
  184. }
  185. }
  186. static void etm_set_default(struct etm_drvdata *drvdata)
  187. {
  188. int i;
  189. drvdata->trigger_event = ETM_DEFAULT_EVENT_VAL;
  190. drvdata->enable_event = ETM_HARD_WIRE_RES_A;
  191. drvdata->seq_12_event = ETM_DEFAULT_EVENT_VAL;
  192. drvdata->seq_21_event = ETM_DEFAULT_EVENT_VAL;
  193. drvdata->seq_23_event = ETM_DEFAULT_EVENT_VAL;
  194. drvdata->seq_31_event = ETM_DEFAULT_EVENT_VAL;
  195. drvdata->seq_32_event = ETM_DEFAULT_EVENT_VAL;
  196. drvdata->seq_13_event = ETM_DEFAULT_EVENT_VAL;
  197. drvdata->timestamp_event = ETM_DEFAULT_EVENT_VAL;
  198. for (i = 0; i < drvdata->nr_cntr; i++) {
  199. drvdata->cntr_rld_val[i] = 0x0;
  200. drvdata->cntr_event[i] = ETM_DEFAULT_EVENT_VAL;
  201. drvdata->cntr_rld_event[i] = ETM_DEFAULT_EVENT_VAL;
  202. drvdata->cntr_val[i] = 0x0;
  203. }
  204. drvdata->seq_curr_state = 0x0;
  205. drvdata->ctxid_idx = 0x0;
  206. for (i = 0; i < drvdata->nr_ctxid_cmp; i++)
  207. drvdata->ctxid_val[i] = 0x0;
  208. drvdata->ctxid_mask = 0x0;
  209. }
  210. static void etm_enable_hw(void *info)
  211. {
  212. int i;
  213. u32 etmcr;
  214. struct etm_drvdata *drvdata = info;
  215. CS_UNLOCK(drvdata->base);
  216. /* Turn engine on */
  217. etm_clr_pwrdwn(drvdata);
  218. /* Apply power to trace registers */
  219. etm_set_pwrup(drvdata);
  220. /* Make sure all registers are accessible */
  221. etm_os_unlock(drvdata);
  222. etm_set_prog(drvdata);
  223. etmcr = etm_readl(drvdata, ETMCR);
  224. etmcr &= (ETMCR_PWD_DWN | ETMCR_ETM_PRG);
  225. etmcr |= drvdata->port_size;
  226. etm_writel(drvdata, drvdata->ctrl | etmcr, ETMCR);
  227. etm_writel(drvdata, drvdata->trigger_event, ETMTRIGGER);
  228. etm_writel(drvdata, drvdata->startstop_ctrl, ETMTSSCR);
  229. etm_writel(drvdata, drvdata->enable_event, ETMTEEVR);
  230. etm_writel(drvdata, drvdata->enable_ctrl1, ETMTECR1);
  231. etm_writel(drvdata, drvdata->fifofull_level, ETMFFLR);
  232. for (i = 0; i < drvdata->nr_addr_cmp; i++) {
  233. etm_writel(drvdata, drvdata->addr_val[i], ETMACVRn(i));
  234. etm_writel(drvdata, drvdata->addr_acctype[i], ETMACTRn(i));
  235. }
  236. for (i = 0; i < drvdata->nr_cntr; i++) {
  237. etm_writel(drvdata, drvdata->cntr_rld_val[i], ETMCNTRLDVRn(i));
  238. etm_writel(drvdata, drvdata->cntr_event[i], ETMCNTENRn(i));
  239. etm_writel(drvdata, drvdata->cntr_rld_event[i],
  240. ETMCNTRLDEVRn(i));
  241. etm_writel(drvdata, drvdata->cntr_val[i], ETMCNTVRn(i));
  242. }
  243. etm_writel(drvdata, drvdata->seq_12_event, ETMSQ12EVR);
  244. etm_writel(drvdata, drvdata->seq_21_event, ETMSQ21EVR);
  245. etm_writel(drvdata, drvdata->seq_23_event, ETMSQ23EVR);
  246. etm_writel(drvdata, drvdata->seq_31_event, ETMSQ31EVR);
  247. etm_writel(drvdata, drvdata->seq_32_event, ETMSQ32EVR);
  248. etm_writel(drvdata, drvdata->seq_13_event, ETMSQ13EVR);
  249. etm_writel(drvdata, drvdata->seq_curr_state, ETMSQR);
  250. for (i = 0; i < drvdata->nr_ext_out; i++)
  251. etm_writel(drvdata, ETM_DEFAULT_EVENT_VAL, ETMEXTOUTEVRn(i));
  252. for (i = 0; i < drvdata->nr_ctxid_cmp; i++)
  253. etm_writel(drvdata, drvdata->ctxid_val[i], ETMCIDCVRn(i));
  254. etm_writel(drvdata, drvdata->ctxid_mask, ETMCIDCMR);
  255. etm_writel(drvdata, drvdata->sync_freq, ETMSYNCFR);
  256. /* No external input selected */
  257. etm_writel(drvdata, 0x0, ETMEXTINSELR);
  258. etm_writel(drvdata, drvdata->timestamp_event, ETMTSEVR);
  259. /* No auxiliary control selected */
  260. etm_writel(drvdata, 0x0, ETMAUXCR);
  261. etm_writel(drvdata, drvdata->traceid, ETMTRACEIDR);
  262. /* No VMID comparator value selected */
  263. etm_writel(drvdata, 0x0, ETMVMIDCVR);
  264. /* Ensures trace output is enabled from this ETM */
  265. etm_writel(drvdata, drvdata->ctrl | ETMCR_ETM_EN | etmcr, ETMCR);
  266. etm_clr_prog(drvdata);
  267. CS_LOCK(drvdata->base);
  268. dev_dbg(drvdata->dev, "cpu: %d enable smp call done\n", drvdata->cpu);
  269. }
  270. static int etm_trace_id_simple(struct etm_drvdata *drvdata)
  271. {
  272. if (!drvdata->enable)
  273. return drvdata->traceid;
  274. return (etm_readl(drvdata, ETMTRACEIDR) & ETM_TRACEID_MASK);
  275. }
  276. static int etm_trace_id(struct coresight_device *csdev)
  277. {
  278. struct etm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
  279. unsigned long flags;
  280. int trace_id = -1;
  281. if (!drvdata->enable)
  282. return drvdata->traceid;
  283. if (clk_prepare_enable(drvdata->clk))
  284. goto out;
  285. spin_lock_irqsave(&drvdata->spinlock, flags);
  286. CS_UNLOCK(drvdata->base);
  287. trace_id = (etm_readl(drvdata, ETMTRACEIDR) & ETM_TRACEID_MASK);
  288. CS_LOCK(drvdata->base);
  289. spin_unlock_irqrestore(&drvdata->spinlock, flags);
  290. clk_disable_unprepare(drvdata->clk);
  291. out:
  292. return trace_id;
  293. }
  294. static int etm_enable(struct coresight_device *csdev)
  295. {
  296. struct etm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
  297. int ret;
  298. ret = clk_prepare_enable(drvdata->clk);
  299. if (ret)
  300. goto err_clk;
  301. spin_lock(&drvdata->spinlock);
  302. /*
  303. * Configure the ETM only if the CPU is online. If it isn't online
  304. * hw configuration will take place when 'CPU_STARTING' is received
  305. * in @etm_cpu_callback.
  306. */
  307. if (cpu_online(drvdata->cpu)) {
  308. ret = smp_call_function_single(drvdata->cpu,
  309. etm_enable_hw, drvdata, 1);
  310. if (ret)
  311. goto err;
  312. }
  313. drvdata->enable = true;
  314. drvdata->sticky_enable = true;
  315. spin_unlock(&drvdata->spinlock);
  316. dev_info(drvdata->dev, "ETM tracing enabled\n");
  317. return 0;
  318. err:
  319. spin_unlock(&drvdata->spinlock);
  320. clk_disable_unprepare(drvdata->clk);
  321. err_clk:
  322. return ret;
  323. }
  324. static void etm_disable_hw(void *info)
  325. {
  326. int i;
  327. struct etm_drvdata *drvdata = info;
  328. CS_UNLOCK(drvdata->base);
  329. etm_set_prog(drvdata);
  330. /* Program trace enable to low by using always false event */
  331. etm_writel(drvdata, ETM_HARD_WIRE_RES_A | ETM_EVENT_NOT_A, ETMTEEVR);
  332. /* Read back sequencer and counters for post trace analysis */
  333. drvdata->seq_curr_state = (etm_readl(drvdata, ETMSQR) & ETM_SQR_MASK);
  334. for (i = 0; i < drvdata->nr_cntr; i++)
  335. drvdata->cntr_val[i] = etm_readl(drvdata, ETMCNTVRn(i));
  336. etm_set_pwrdwn(drvdata);
  337. CS_LOCK(drvdata->base);
  338. dev_dbg(drvdata->dev, "cpu: %d disable smp call done\n", drvdata->cpu);
  339. }
  340. static void etm_disable(struct coresight_device *csdev)
  341. {
  342. struct etm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
  343. /*
  344. * Taking hotplug lock here protects from clocks getting disabled
  345. * with tracing being left on (crash scenario) if user disable occurs
  346. * after cpu online mask indicates the cpu is offline but before the
  347. * DYING hotplug callback is serviced by the ETM driver.
  348. */
  349. get_online_cpus();
  350. spin_lock(&drvdata->spinlock);
  351. /*
  352. * Executing etm_disable_hw on the cpu whose ETM is being disabled
  353. * ensures that register writes occur when cpu is powered.
  354. */
  355. smp_call_function_single(drvdata->cpu, etm_disable_hw, drvdata, 1);
  356. drvdata->enable = false;
  357. spin_unlock(&drvdata->spinlock);
  358. put_online_cpus();
  359. clk_disable_unprepare(drvdata->clk);
  360. dev_info(drvdata->dev, "ETM tracing disabled\n");
  361. }
  362. static const struct coresight_ops_source etm_source_ops = {
  363. .trace_id = etm_trace_id,
  364. .enable = etm_enable,
  365. .disable = etm_disable,
  366. };
  367. static const struct coresight_ops etm_cs_ops = {
  368. .source_ops = &etm_source_ops,
  369. };
  370. static ssize_t nr_addr_cmp_show(struct device *dev,
  371. struct device_attribute *attr, char *buf)
  372. {
  373. unsigned long val;
  374. struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
  375. val = drvdata->nr_addr_cmp;
  376. return sprintf(buf, "%#lx\n", val);
  377. }
  378. static DEVICE_ATTR_RO(nr_addr_cmp);
  379. static ssize_t nr_cntr_show(struct device *dev,
  380. struct device_attribute *attr, char *buf)
  381. { unsigned long val;
  382. struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
  383. val = drvdata->nr_cntr;
  384. return sprintf(buf, "%#lx\n", val);
  385. }
  386. static DEVICE_ATTR_RO(nr_cntr);
  387. static ssize_t nr_ctxid_cmp_show(struct device *dev,
  388. struct device_attribute *attr, char *buf)
  389. {
  390. unsigned long val;
  391. struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
  392. val = drvdata->nr_ctxid_cmp;
  393. return sprintf(buf, "%#lx\n", val);
  394. }
  395. static DEVICE_ATTR_RO(nr_ctxid_cmp);
  396. static ssize_t etmsr_show(struct device *dev,
  397. struct device_attribute *attr, char *buf)
  398. {
  399. int ret;
  400. unsigned long flags, val;
  401. struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
  402. ret = clk_prepare_enable(drvdata->clk);
  403. if (ret)
  404. return ret;
  405. spin_lock_irqsave(&drvdata->spinlock, flags);
  406. CS_UNLOCK(drvdata->base);
  407. val = etm_readl(drvdata, ETMSR);
  408. CS_LOCK(drvdata->base);
  409. spin_unlock_irqrestore(&drvdata->spinlock, flags);
  410. clk_disable_unprepare(drvdata->clk);
  411. return sprintf(buf, "%#lx\n", val);
  412. }
  413. static DEVICE_ATTR_RO(etmsr);
  414. static ssize_t reset_store(struct device *dev,
  415. struct device_attribute *attr,
  416. const char *buf, size_t size)
  417. {
  418. int i, ret;
  419. unsigned long val;
  420. struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
  421. ret = kstrtoul(buf, 16, &val);
  422. if (ret)
  423. return ret;
  424. if (val) {
  425. spin_lock(&drvdata->spinlock);
  426. drvdata->mode = ETM_MODE_EXCLUDE;
  427. drvdata->ctrl = 0x0;
  428. drvdata->trigger_event = ETM_DEFAULT_EVENT_VAL;
  429. drvdata->startstop_ctrl = 0x0;
  430. drvdata->addr_idx = 0x0;
  431. for (i = 0; i < drvdata->nr_addr_cmp; i++) {
  432. drvdata->addr_val[i] = 0x0;
  433. drvdata->addr_acctype[i] = 0x0;
  434. drvdata->addr_type[i] = ETM_ADDR_TYPE_NONE;
  435. }
  436. drvdata->cntr_idx = 0x0;
  437. etm_set_default(drvdata);
  438. spin_unlock(&drvdata->spinlock);
  439. }
  440. return size;
  441. }
  442. static DEVICE_ATTR_WO(reset);
  443. static ssize_t mode_show(struct device *dev,
  444. struct device_attribute *attr, char *buf)
  445. {
  446. unsigned long val;
  447. struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
  448. val = drvdata->mode;
  449. return sprintf(buf, "%#lx\n", val);
  450. }
  451. static ssize_t mode_store(struct device *dev,
  452. struct device_attribute *attr,
  453. const char *buf, size_t size)
  454. {
  455. int ret;
  456. unsigned long val;
  457. struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
  458. ret = kstrtoul(buf, 16, &val);
  459. if (ret)
  460. return ret;
  461. spin_lock(&drvdata->spinlock);
  462. drvdata->mode = val & ETM_MODE_ALL;
  463. if (drvdata->mode & ETM_MODE_EXCLUDE)
  464. drvdata->enable_ctrl1 |= ETMTECR1_INC_EXC;
  465. else
  466. drvdata->enable_ctrl1 &= ~ETMTECR1_INC_EXC;
  467. if (drvdata->mode & ETM_MODE_CYCACC)
  468. drvdata->ctrl |= ETMCR_CYC_ACC;
  469. else
  470. drvdata->ctrl &= ~ETMCR_CYC_ACC;
  471. if (drvdata->mode & ETM_MODE_STALL) {
  472. if (!(drvdata->etmccr & ETMCCR_FIFOFULL)) {
  473. dev_warn(drvdata->dev, "stall mode not supported\n");
  474. ret = -EINVAL;
  475. goto err_unlock;
  476. }
  477. drvdata->ctrl |= ETMCR_STALL_MODE;
  478. } else
  479. drvdata->ctrl &= ~ETMCR_STALL_MODE;
  480. if (drvdata->mode & ETM_MODE_TIMESTAMP) {
  481. if (!(drvdata->etmccer & ETMCCER_TIMESTAMP)) {
  482. dev_warn(drvdata->dev, "timestamp not supported\n");
  483. ret = -EINVAL;
  484. goto err_unlock;
  485. }
  486. drvdata->ctrl |= ETMCR_TIMESTAMP_EN;
  487. } else
  488. drvdata->ctrl &= ~ETMCR_TIMESTAMP_EN;
  489. if (drvdata->mode & ETM_MODE_CTXID)
  490. drvdata->ctrl |= ETMCR_CTXID_SIZE;
  491. else
  492. drvdata->ctrl &= ~ETMCR_CTXID_SIZE;
  493. spin_unlock(&drvdata->spinlock);
  494. return size;
  495. err_unlock:
  496. spin_unlock(&drvdata->spinlock);
  497. return ret;
  498. }
  499. static DEVICE_ATTR_RW(mode);
  500. static ssize_t trigger_event_show(struct device *dev,
  501. struct device_attribute *attr, char *buf)
  502. {
  503. unsigned long val;
  504. struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
  505. val = drvdata->trigger_event;
  506. return sprintf(buf, "%#lx\n", val);
  507. }
  508. static ssize_t trigger_event_store(struct device *dev,
  509. struct device_attribute *attr,
  510. const char *buf, size_t size)
  511. {
  512. int ret;
  513. unsigned long val;
  514. struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
  515. ret = kstrtoul(buf, 16, &val);
  516. if (ret)
  517. return ret;
  518. drvdata->trigger_event = val & ETM_EVENT_MASK;
  519. return size;
  520. }
  521. static DEVICE_ATTR_RW(trigger_event);
  522. static ssize_t enable_event_show(struct device *dev,
  523. struct device_attribute *attr, char *buf)
  524. {
  525. unsigned long val;
  526. struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
  527. val = drvdata->enable_event;
  528. return sprintf(buf, "%#lx\n", val);
  529. }
  530. static ssize_t enable_event_store(struct device *dev,
  531. struct device_attribute *attr,
  532. const char *buf, size_t size)
  533. {
  534. int ret;
  535. unsigned long val;
  536. struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
  537. ret = kstrtoul(buf, 16, &val);
  538. if (ret)
  539. return ret;
  540. drvdata->enable_event = val & ETM_EVENT_MASK;
  541. return size;
  542. }
  543. static DEVICE_ATTR_RW(enable_event);
  544. static ssize_t fifofull_level_show(struct device *dev,
  545. struct device_attribute *attr, char *buf)
  546. {
  547. unsigned long val;
  548. struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
  549. val = drvdata->fifofull_level;
  550. return sprintf(buf, "%#lx\n", val);
  551. }
  552. static ssize_t fifofull_level_store(struct device *dev,
  553. struct device_attribute *attr,
  554. const char *buf, size_t size)
  555. {
  556. int ret;
  557. unsigned long val;
  558. struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
  559. ret = kstrtoul(buf, 16, &val);
  560. if (ret)
  561. return ret;
  562. drvdata->fifofull_level = val;
  563. return size;
  564. }
  565. static DEVICE_ATTR_RW(fifofull_level);
  566. static ssize_t addr_idx_show(struct device *dev,
  567. struct device_attribute *attr, char *buf)
  568. {
  569. unsigned long val;
  570. struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
  571. val = drvdata->addr_idx;
  572. return sprintf(buf, "%#lx\n", val);
  573. }
  574. static ssize_t addr_idx_store(struct device *dev,
  575. struct device_attribute *attr,
  576. const char *buf, size_t size)
  577. {
  578. int ret;
  579. unsigned long val;
  580. struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
  581. ret = kstrtoul(buf, 16, &val);
  582. if (ret)
  583. return ret;
  584. if (val >= drvdata->nr_addr_cmp)
  585. return -EINVAL;
  586. /*
  587. * Use spinlock to ensure index doesn't change while it gets
  588. * dereferenced multiple times within a spinlock block elsewhere.
  589. */
  590. spin_lock(&drvdata->spinlock);
  591. drvdata->addr_idx = val;
  592. spin_unlock(&drvdata->spinlock);
  593. return size;
  594. }
  595. static DEVICE_ATTR_RW(addr_idx);
  596. static ssize_t addr_single_show(struct device *dev,
  597. struct device_attribute *attr, char *buf)
  598. {
  599. u8 idx;
  600. unsigned long val;
  601. struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
  602. spin_lock(&drvdata->spinlock);
  603. idx = drvdata->addr_idx;
  604. if (!(drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
  605. drvdata->addr_type[idx] == ETM_ADDR_TYPE_SINGLE)) {
  606. spin_unlock(&drvdata->spinlock);
  607. return -EINVAL;
  608. }
  609. val = drvdata->addr_val[idx];
  610. spin_unlock(&drvdata->spinlock);
  611. return sprintf(buf, "%#lx\n", val);
  612. }
  613. static ssize_t addr_single_store(struct device *dev,
  614. struct device_attribute *attr,
  615. const char *buf, size_t size)
  616. {
  617. u8 idx;
  618. int ret;
  619. unsigned long val;
  620. struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
  621. ret = kstrtoul(buf, 16, &val);
  622. if (ret)
  623. return ret;
  624. spin_lock(&drvdata->spinlock);
  625. idx = drvdata->addr_idx;
  626. if (!(drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
  627. drvdata->addr_type[idx] == ETM_ADDR_TYPE_SINGLE)) {
  628. spin_unlock(&drvdata->spinlock);
  629. return -EINVAL;
  630. }
  631. drvdata->addr_val[idx] = val;
  632. drvdata->addr_type[idx] = ETM_ADDR_TYPE_SINGLE;
  633. spin_unlock(&drvdata->spinlock);
  634. return size;
  635. }
  636. static DEVICE_ATTR_RW(addr_single);
  637. static ssize_t addr_range_show(struct device *dev,
  638. struct device_attribute *attr, char *buf)
  639. {
  640. u8 idx;
  641. unsigned long val1, val2;
  642. struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
  643. spin_lock(&drvdata->spinlock);
  644. idx = drvdata->addr_idx;
  645. if (idx % 2 != 0) {
  646. spin_unlock(&drvdata->spinlock);
  647. return -EPERM;
  648. }
  649. if (!((drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE &&
  650. drvdata->addr_type[idx + 1] == ETM_ADDR_TYPE_NONE) ||
  651. (drvdata->addr_type[idx] == ETM_ADDR_TYPE_RANGE &&
  652. drvdata->addr_type[idx + 1] == ETM_ADDR_TYPE_RANGE))) {
  653. spin_unlock(&drvdata->spinlock);
  654. return -EPERM;
  655. }
  656. val1 = drvdata->addr_val[idx];
  657. val2 = drvdata->addr_val[idx + 1];
  658. spin_unlock(&drvdata->spinlock);
  659. return sprintf(buf, "%#lx %#lx\n", val1, val2);
  660. }
  661. static ssize_t addr_range_store(struct device *dev,
  662. struct device_attribute *attr,
  663. const char *buf, size_t size)
  664. {
  665. u8 idx;
  666. unsigned long val1, val2;
  667. struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
  668. if (sscanf(buf, "%lx %lx", &val1, &val2) != 2)
  669. return -EINVAL;
  670. /* Lower address comparator cannot have a higher address value */
  671. if (val1 > val2)
  672. return -EINVAL;
  673. spin_lock(&drvdata->spinlock);
  674. idx = drvdata->addr_idx;
  675. if (idx % 2 != 0) {
  676. spin_unlock(&drvdata->spinlock);
  677. return -EPERM;
  678. }
  679. if (!((drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE &&
  680. drvdata->addr_type[idx + 1] == ETM_ADDR_TYPE_NONE) ||
  681. (drvdata->addr_type[idx] == ETM_ADDR_TYPE_RANGE &&
  682. drvdata->addr_type[idx + 1] == ETM_ADDR_TYPE_RANGE))) {
  683. spin_unlock(&drvdata->spinlock);
  684. return -EPERM;
  685. }
  686. drvdata->addr_val[idx] = val1;
  687. drvdata->addr_type[idx] = ETM_ADDR_TYPE_RANGE;
  688. drvdata->addr_val[idx + 1] = val2;
  689. drvdata->addr_type[idx + 1] = ETM_ADDR_TYPE_RANGE;
  690. drvdata->enable_ctrl1 |= (1 << (idx/2));
  691. spin_unlock(&drvdata->spinlock);
  692. return size;
  693. }
  694. static DEVICE_ATTR_RW(addr_range);
  695. static ssize_t addr_start_show(struct device *dev,
  696. struct device_attribute *attr, char *buf)
  697. {
  698. u8 idx;
  699. unsigned long val;
  700. struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
  701. spin_lock(&drvdata->spinlock);
  702. idx = drvdata->addr_idx;
  703. if (!(drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
  704. drvdata->addr_type[idx] == ETM_ADDR_TYPE_START)) {
  705. spin_unlock(&drvdata->spinlock);
  706. return -EPERM;
  707. }
  708. val = drvdata->addr_val[idx];
  709. spin_unlock(&drvdata->spinlock);
  710. return sprintf(buf, "%#lx\n", val);
  711. }
  712. static ssize_t addr_start_store(struct device *dev,
  713. struct device_attribute *attr,
  714. const char *buf, size_t size)
  715. {
  716. u8 idx;
  717. int ret;
  718. unsigned long val;
  719. struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
  720. ret = kstrtoul(buf, 16, &val);
  721. if (ret)
  722. return ret;
  723. spin_lock(&drvdata->spinlock);
  724. idx = drvdata->addr_idx;
  725. if (!(drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
  726. drvdata->addr_type[idx] == ETM_ADDR_TYPE_START)) {
  727. spin_unlock(&drvdata->spinlock);
  728. return -EPERM;
  729. }
  730. drvdata->addr_val[idx] = val;
  731. drvdata->addr_type[idx] = ETM_ADDR_TYPE_START;
  732. drvdata->startstop_ctrl |= (1 << idx);
  733. drvdata->enable_ctrl1 |= BIT(25);
  734. spin_unlock(&drvdata->spinlock);
  735. return size;
  736. }
  737. static DEVICE_ATTR_RW(addr_start);
  738. static ssize_t addr_stop_show(struct device *dev,
  739. struct device_attribute *attr, char *buf)
  740. {
  741. u8 idx;
  742. unsigned long val;
  743. struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
  744. spin_lock(&drvdata->spinlock);
  745. idx = drvdata->addr_idx;
  746. if (!(drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
  747. drvdata->addr_type[idx] == ETM_ADDR_TYPE_STOP)) {
  748. spin_unlock(&drvdata->spinlock);
  749. return -EPERM;
  750. }
  751. val = drvdata->addr_val[idx];
  752. spin_unlock(&drvdata->spinlock);
  753. return sprintf(buf, "%#lx\n", val);
  754. }
  755. static ssize_t addr_stop_store(struct device *dev,
  756. struct device_attribute *attr,
  757. const char *buf, size_t size)
  758. {
  759. u8 idx;
  760. int ret;
  761. unsigned long val;
  762. struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
  763. ret = kstrtoul(buf, 16, &val);
  764. if (ret)
  765. return ret;
  766. spin_lock(&drvdata->spinlock);
  767. idx = drvdata->addr_idx;
  768. if (!(drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
  769. drvdata->addr_type[idx] == ETM_ADDR_TYPE_STOP)) {
  770. spin_unlock(&drvdata->spinlock);
  771. return -EPERM;
  772. }
  773. drvdata->addr_val[idx] = val;
  774. drvdata->addr_type[idx] = ETM_ADDR_TYPE_STOP;
  775. drvdata->startstop_ctrl |= (1 << (idx + 16));
  776. drvdata->enable_ctrl1 |= ETMTECR1_START_STOP;
  777. spin_unlock(&drvdata->spinlock);
  778. return size;
  779. }
  780. static DEVICE_ATTR_RW(addr_stop);
  781. static ssize_t addr_acctype_show(struct device *dev,
  782. struct device_attribute *attr, char *buf)
  783. {
  784. unsigned long val;
  785. struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
  786. spin_lock(&drvdata->spinlock);
  787. val = drvdata->addr_acctype[drvdata->addr_idx];
  788. spin_unlock(&drvdata->spinlock);
  789. return sprintf(buf, "%#lx\n", val);
  790. }
  791. static ssize_t addr_acctype_store(struct device *dev,
  792. struct device_attribute *attr,
  793. const char *buf, size_t size)
  794. {
  795. int ret;
  796. unsigned long val;
  797. struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
  798. ret = kstrtoul(buf, 16, &val);
  799. if (ret)
  800. return ret;
  801. spin_lock(&drvdata->spinlock);
  802. drvdata->addr_acctype[drvdata->addr_idx] = val;
  803. spin_unlock(&drvdata->spinlock);
  804. return size;
  805. }
  806. static DEVICE_ATTR_RW(addr_acctype);
  807. static ssize_t cntr_idx_show(struct device *dev,
  808. struct device_attribute *attr, char *buf)
  809. {
  810. unsigned long val;
  811. struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
  812. val = drvdata->cntr_idx;
  813. return sprintf(buf, "%#lx\n", val);
  814. }
  815. static ssize_t cntr_idx_store(struct device *dev,
  816. struct device_attribute *attr,
  817. const char *buf, size_t size)
  818. {
  819. int ret;
  820. unsigned long val;
  821. struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
  822. ret = kstrtoul(buf, 16, &val);
  823. if (ret)
  824. return ret;
  825. if (val >= drvdata->nr_cntr)
  826. return -EINVAL;
  827. /*
  828. * Use spinlock to ensure index doesn't change while it gets
  829. * dereferenced multiple times within a spinlock block elsewhere.
  830. */
  831. spin_lock(&drvdata->spinlock);
  832. drvdata->cntr_idx = val;
  833. spin_unlock(&drvdata->spinlock);
  834. return size;
  835. }
  836. static DEVICE_ATTR_RW(cntr_idx);
  837. static ssize_t cntr_rld_val_show(struct device *dev,
  838. struct device_attribute *attr, char *buf)
  839. {
  840. unsigned long val;
  841. struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
  842. spin_lock(&drvdata->spinlock);
  843. val = drvdata->cntr_rld_val[drvdata->cntr_idx];
  844. spin_unlock(&drvdata->spinlock);
  845. return sprintf(buf, "%#lx\n", val);
  846. }
  847. static ssize_t cntr_rld_val_store(struct device *dev,
  848. struct device_attribute *attr,
  849. const char *buf, size_t size)
  850. {
  851. int ret;
  852. unsigned long val;
  853. struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
  854. ret = kstrtoul(buf, 16, &val);
  855. if (ret)
  856. return ret;
  857. spin_lock(&drvdata->spinlock);
  858. drvdata->cntr_rld_val[drvdata->cntr_idx] = val;
  859. spin_unlock(&drvdata->spinlock);
  860. return size;
  861. }
  862. static DEVICE_ATTR_RW(cntr_rld_val);
  863. static ssize_t cntr_event_show(struct device *dev,
  864. struct device_attribute *attr, char *buf)
  865. {
  866. unsigned long val;
  867. struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
  868. spin_lock(&drvdata->spinlock);
  869. val = drvdata->cntr_event[drvdata->cntr_idx];
  870. spin_unlock(&drvdata->spinlock);
  871. return sprintf(buf, "%#lx\n", val);
  872. }
  873. static ssize_t cntr_event_store(struct device *dev,
  874. struct device_attribute *attr,
  875. const char *buf, size_t size)
  876. {
  877. int ret;
  878. unsigned long val;
  879. struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
  880. ret = kstrtoul(buf, 16, &val);
  881. if (ret)
  882. return ret;
  883. spin_lock(&drvdata->spinlock);
  884. drvdata->cntr_event[drvdata->cntr_idx] = val & ETM_EVENT_MASK;
  885. spin_unlock(&drvdata->spinlock);
  886. return size;
  887. }
  888. static DEVICE_ATTR_RW(cntr_event);
  889. static ssize_t cntr_rld_event_show(struct device *dev,
  890. struct device_attribute *attr, char *buf)
  891. {
  892. unsigned long val;
  893. struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
  894. spin_lock(&drvdata->spinlock);
  895. val = drvdata->cntr_rld_event[drvdata->cntr_idx];
  896. spin_unlock(&drvdata->spinlock);
  897. return sprintf(buf, "%#lx\n", val);
  898. }
  899. static ssize_t cntr_rld_event_store(struct device *dev,
  900. struct device_attribute *attr,
  901. const char *buf, size_t size)
  902. {
  903. int ret;
  904. unsigned long val;
  905. struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
  906. ret = kstrtoul(buf, 16, &val);
  907. if (ret)
  908. return ret;
  909. spin_lock(&drvdata->spinlock);
  910. drvdata->cntr_rld_event[drvdata->cntr_idx] = val & ETM_EVENT_MASK;
  911. spin_unlock(&drvdata->spinlock);
  912. return size;
  913. }
  914. static DEVICE_ATTR_RW(cntr_rld_event);
  915. static ssize_t cntr_val_show(struct device *dev,
  916. struct device_attribute *attr, char *buf)
  917. {
  918. int i, ret = 0;
  919. u32 val;
  920. struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
  921. if (!drvdata->enable) {
  922. spin_lock(&drvdata->spinlock);
  923. for (i = 0; i < drvdata->nr_cntr; i++)
  924. ret += sprintf(buf, "counter %d: %x\n",
  925. i, drvdata->cntr_val[i]);
  926. spin_unlock(&drvdata->spinlock);
  927. return ret;
  928. }
  929. for (i = 0; i < drvdata->nr_cntr; i++) {
  930. val = etm_readl(drvdata, ETMCNTVRn(i));
  931. ret += sprintf(buf, "counter %d: %x\n", i, val);
  932. }
  933. return ret;
  934. }
  935. static ssize_t cntr_val_store(struct device *dev,
  936. struct device_attribute *attr,
  937. const char *buf, size_t size)
  938. {
  939. int ret;
  940. unsigned long val;
  941. struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
  942. ret = kstrtoul(buf, 16, &val);
  943. if (ret)
  944. return ret;
  945. spin_lock(&drvdata->spinlock);
  946. drvdata->cntr_val[drvdata->cntr_idx] = val;
  947. spin_unlock(&drvdata->spinlock);
  948. return size;
  949. }
  950. static DEVICE_ATTR_RW(cntr_val);
  951. static ssize_t seq_12_event_show(struct device *dev,
  952. struct device_attribute *attr, char *buf)
  953. {
  954. unsigned long val;
  955. struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
  956. val = drvdata->seq_12_event;
  957. return sprintf(buf, "%#lx\n", val);
  958. }
  959. static ssize_t seq_12_event_store(struct device *dev,
  960. struct device_attribute *attr,
  961. const char *buf, size_t size)
  962. {
  963. int ret;
  964. unsigned long val;
  965. struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
  966. ret = kstrtoul(buf, 16, &val);
  967. if (ret)
  968. return ret;
  969. drvdata->seq_12_event = val & ETM_EVENT_MASK;
  970. return size;
  971. }
  972. static DEVICE_ATTR_RW(seq_12_event);
  973. static ssize_t seq_21_event_show(struct device *dev,
  974. struct device_attribute *attr, char *buf)
  975. {
  976. unsigned long val;
  977. struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
  978. val = drvdata->seq_21_event;
  979. return sprintf(buf, "%#lx\n", val);
  980. }
  981. static ssize_t seq_21_event_store(struct device *dev,
  982. struct device_attribute *attr,
  983. const char *buf, size_t size)
  984. {
  985. int ret;
  986. unsigned long val;
  987. struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
  988. ret = kstrtoul(buf, 16, &val);
  989. if (ret)
  990. return ret;
  991. drvdata->seq_21_event = val & ETM_EVENT_MASK;
  992. return size;
  993. }
  994. static DEVICE_ATTR_RW(seq_21_event);
  995. static ssize_t seq_23_event_show(struct device *dev,
  996. struct device_attribute *attr, char *buf)
  997. {
  998. unsigned long val;
  999. struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
  1000. val = drvdata->seq_23_event;
  1001. return sprintf(buf, "%#lx\n", val);
  1002. }
  1003. static ssize_t seq_23_event_store(struct device *dev,
  1004. struct device_attribute *attr,
  1005. const char *buf, size_t size)
  1006. {
  1007. int ret;
  1008. unsigned long val;
  1009. struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
  1010. ret = kstrtoul(buf, 16, &val);
  1011. if (ret)
  1012. return ret;
  1013. drvdata->seq_23_event = val & ETM_EVENT_MASK;
  1014. return size;
  1015. }
  1016. static DEVICE_ATTR_RW(seq_23_event);
  1017. static ssize_t seq_31_event_show(struct device *dev,
  1018. struct device_attribute *attr, char *buf)
  1019. {
  1020. unsigned long val;
  1021. struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
  1022. val = drvdata->seq_31_event;
  1023. return sprintf(buf, "%#lx\n", val);
  1024. }
  1025. static ssize_t seq_31_event_store(struct device *dev,
  1026. struct device_attribute *attr,
  1027. const char *buf, size_t size)
  1028. {
  1029. int ret;
  1030. unsigned long val;
  1031. struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
  1032. ret = kstrtoul(buf, 16, &val);
  1033. if (ret)
  1034. return ret;
  1035. drvdata->seq_31_event = val & ETM_EVENT_MASK;
  1036. return size;
  1037. }
  1038. static DEVICE_ATTR_RW(seq_31_event);
  1039. static ssize_t seq_32_event_show(struct device *dev,
  1040. struct device_attribute *attr, char *buf)
  1041. {
  1042. unsigned long val;
  1043. struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
  1044. val = drvdata->seq_32_event;
  1045. return sprintf(buf, "%#lx\n", val);
  1046. }
  1047. static ssize_t seq_32_event_store(struct device *dev,
  1048. struct device_attribute *attr,
  1049. const char *buf, size_t size)
  1050. {
  1051. int ret;
  1052. unsigned long val;
  1053. struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
  1054. ret = kstrtoul(buf, 16, &val);
  1055. if (ret)
  1056. return ret;
  1057. drvdata->seq_32_event = val & ETM_EVENT_MASK;
  1058. return size;
  1059. }
  1060. static DEVICE_ATTR_RW(seq_32_event);
  1061. static ssize_t seq_13_event_show(struct device *dev,
  1062. struct device_attribute *attr, char *buf)
  1063. {
  1064. unsigned long val;
  1065. struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
  1066. val = drvdata->seq_13_event;
  1067. return sprintf(buf, "%#lx\n", val);
  1068. }
  1069. static ssize_t seq_13_event_store(struct device *dev,
  1070. struct device_attribute *attr,
  1071. const char *buf, size_t size)
  1072. {
  1073. int ret;
  1074. unsigned long val;
  1075. struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
  1076. ret = kstrtoul(buf, 16, &val);
  1077. if (ret)
  1078. return ret;
  1079. drvdata->seq_13_event = val & ETM_EVENT_MASK;
  1080. return size;
  1081. }
  1082. static DEVICE_ATTR_RW(seq_13_event);
  1083. static ssize_t seq_curr_state_show(struct device *dev,
  1084. struct device_attribute *attr, char *buf)
  1085. {
  1086. int ret;
  1087. unsigned long val, flags;
  1088. struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
  1089. if (!drvdata->enable) {
  1090. val = drvdata->seq_curr_state;
  1091. goto out;
  1092. }
  1093. ret = clk_prepare_enable(drvdata->clk);
  1094. if (ret)
  1095. return ret;
  1096. spin_lock_irqsave(&drvdata->spinlock, flags);
  1097. CS_UNLOCK(drvdata->base);
  1098. val = (etm_readl(drvdata, ETMSQR) & ETM_SQR_MASK);
  1099. CS_LOCK(drvdata->base);
  1100. spin_unlock_irqrestore(&drvdata->spinlock, flags);
  1101. clk_disable_unprepare(drvdata->clk);
  1102. out:
  1103. return sprintf(buf, "%#lx\n", val);
  1104. }
  1105. static ssize_t seq_curr_state_store(struct device *dev,
  1106. struct device_attribute *attr,
  1107. const char *buf, size_t size)
  1108. {
  1109. int ret;
  1110. unsigned long val;
  1111. struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
  1112. ret = kstrtoul(buf, 16, &val);
  1113. if (ret)
  1114. return ret;
  1115. if (val > ETM_SEQ_STATE_MAX_VAL)
  1116. return -EINVAL;
  1117. drvdata->seq_curr_state = val;
  1118. return size;
  1119. }
  1120. static DEVICE_ATTR_RW(seq_curr_state);
  1121. static ssize_t ctxid_idx_show(struct device *dev,
  1122. struct device_attribute *attr, char *buf)
  1123. {
  1124. unsigned long val;
  1125. struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
  1126. val = drvdata->ctxid_idx;
  1127. return sprintf(buf, "%#lx\n", val);
  1128. }
  1129. static ssize_t ctxid_idx_store(struct device *dev,
  1130. struct device_attribute *attr,
  1131. const char *buf, size_t size)
  1132. {
  1133. int ret;
  1134. unsigned long val;
  1135. struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
  1136. ret = kstrtoul(buf, 16, &val);
  1137. if (ret)
  1138. return ret;
  1139. if (val >= drvdata->nr_ctxid_cmp)
  1140. return -EINVAL;
  1141. /*
  1142. * Use spinlock to ensure index doesn't change while it gets
  1143. * dereferenced multiple times within a spinlock block elsewhere.
  1144. */
  1145. spin_lock(&drvdata->spinlock);
  1146. drvdata->ctxid_idx = val;
  1147. spin_unlock(&drvdata->spinlock);
  1148. return size;
  1149. }
  1150. static DEVICE_ATTR_RW(ctxid_idx);
  1151. static ssize_t ctxid_val_show(struct device *dev,
  1152. struct device_attribute *attr, char *buf)
  1153. {
  1154. unsigned long val;
  1155. struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
  1156. spin_lock(&drvdata->spinlock);
  1157. val = drvdata->ctxid_val[drvdata->ctxid_idx];
  1158. spin_unlock(&drvdata->spinlock);
  1159. return sprintf(buf, "%#lx\n", val);
  1160. }
  1161. static ssize_t ctxid_val_store(struct device *dev,
  1162. struct device_attribute *attr,
  1163. const char *buf, size_t size)
  1164. {
  1165. int ret;
  1166. unsigned long val;
  1167. struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
  1168. ret = kstrtoul(buf, 16, &val);
  1169. if (ret)
  1170. return ret;
  1171. spin_lock(&drvdata->spinlock);
  1172. drvdata->ctxid_val[drvdata->ctxid_idx] = val;
  1173. spin_unlock(&drvdata->spinlock);
  1174. return size;
  1175. }
  1176. static DEVICE_ATTR_RW(ctxid_val);
  1177. static ssize_t ctxid_mask_show(struct device *dev,
  1178. struct device_attribute *attr, char *buf)
  1179. {
  1180. unsigned long val;
  1181. struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
  1182. val = drvdata->ctxid_mask;
  1183. return sprintf(buf, "%#lx\n", val);
  1184. }
  1185. static ssize_t ctxid_mask_store(struct device *dev,
  1186. struct device_attribute *attr,
  1187. const char *buf, size_t size)
  1188. {
  1189. int ret;
  1190. unsigned long val;
  1191. struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
  1192. ret = kstrtoul(buf, 16, &val);
  1193. if (ret)
  1194. return ret;
  1195. drvdata->ctxid_mask = val;
  1196. return size;
  1197. }
  1198. static DEVICE_ATTR_RW(ctxid_mask);
  1199. static ssize_t sync_freq_show(struct device *dev,
  1200. struct device_attribute *attr, char *buf)
  1201. {
  1202. unsigned long val;
  1203. struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
  1204. val = drvdata->sync_freq;
  1205. return sprintf(buf, "%#lx\n", val);
  1206. }
  1207. static ssize_t sync_freq_store(struct device *dev,
  1208. struct device_attribute *attr,
  1209. const char *buf, size_t size)
  1210. {
  1211. int ret;
  1212. unsigned long val;
  1213. struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
  1214. ret = kstrtoul(buf, 16, &val);
  1215. if (ret)
  1216. return ret;
  1217. drvdata->sync_freq = val & ETM_SYNC_MASK;
  1218. return size;
  1219. }
  1220. static DEVICE_ATTR_RW(sync_freq);
  1221. static ssize_t timestamp_event_show(struct device *dev,
  1222. struct device_attribute *attr, char *buf)
  1223. {
  1224. unsigned long val;
  1225. struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
  1226. val = drvdata->timestamp_event;
  1227. return sprintf(buf, "%#lx\n", val);
  1228. }
  1229. static ssize_t timestamp_event_store(struct device *dev,
  1230. struct device_attribute *attr,
  1231. const char *buf, size_t size)
  1232. {
  1233. int ret;
  1234. unsigned long val;
  1235. struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
  1236. ret = kstrtoul(buf, 16, &val);
  1237. if (ret)
  1238. return ret;
  1239. drvdata->timestamp_event = val & ETM_EVENT_MASK;
  1240. return size;
  1241. }
  1242. static DEVICE_ATTR_RW(timestamp_event);
  1243. static ssize_t status_show(struct device *dev,
  1244. struct device_attribute *attr, char *buf)
  1245. {
  1246. int ret;
  1247. unsigned long flags;
  1248. struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
  1249. ret = clk_prepare_enable(drvdata->clk);
  1250. if (ret)
  1251. return ret;
  1252. spin_lock_irqsave(&drvdata->spinlock, flags);
  1253. CS_UNLOCK(drvdata->base);
  1254. ret = sprintf(buf,
  1255. "ETMCCR: 0x%08x\n"
  1256. "ETMCCER: 0x%08x\n"
  1257. "ETMSCR: 0x%08x\n"
  1258. "ETMIDR: 0x%08x\n"
  1259. "ETMCR: 0x%08x\n"
  1260. "ETMTRACEIDR: 0x%08x\n"
  1261. "Enable event: 0x%08x\n"
  1262. "Enable start/stop: 0x%08x\n"
  1263. "Enable control: CR1 0x%08x CR2 0x%08x\n"
  1264. "CPU affinity: %d\n",
  1265. drvdata->etmccr, drvdata->etmccer,
  1266. etm_readl(drvdata, ETMSCR), etm_readl(drvdata, ETMIDR),
  1267. etm_readl(drvdata, ETMCR), etm_trace_id_simple(drvdata),
  1268. etm_readl(drvdata, ETMTEEVR),
  1269. etm_readl(drvdata, ETMTSSCR),
  1270. etm_readl(drvdata, ETMTECR1),
  1271. etm_readl(drvdata, ETMTECR2),
  1272. drvdata->cpu);
  1273. CS_LOCK(drvdata->base);
  1274. spin_unlock_irqrestore(&drvdata->spinlock, flags);
  1275. clk_disable_unprepare(drvdata->clk);
  1276. return ret;
  1277. }
  1278. static DEVICE_ATTR_RO(status);
  1279. static ssize_t traceid_show(struct device *dev,
  1280. struct device_attribute *attr, char *buf)
  1281. {
  1282. int ret;
  1283. unsigned long val, flags;
  1284. struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
  1285. if (!drvdata->enable) {
  1286. val = drvdata->traceid;
  1287. goto out;
  1288. }
  1289. ret = clk_prepare_enable(drvdata->clk);
  1290. if (ret)
  1291. return ret;
  1292. spin_lock_irqsave(&drvdata->spinlock, flags);
  1293. CS_UNLOCK(drvdata->base);
  1294. val = (etm_readl(drvdata, ETMTRACEIDR) & ETM_TRACEID_MASK);
  1295. CS_LOCK(drvdata->base);
  1296. spin_unlock_irqrestore(&drvdata->spinlock, flags);
  1297. clk_disable_unprepare(drvdata->clk);
  1298. out:
  1299. return sprintf(buf, "%#lx\n", val);
  1300. }
  1301. static ssize_t traceid_store(struct device *dev,
  1302. struct device_attribute *attr,
  1303. const char *buf, size_t size)
  1304. {
  1305. int ret;
  1306. unsigned long val;
  1307. struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
  1308. ret = kstrtoul(buf, 16, &val);
  1309. if (ret)
  1310. return ret;
  1311. drvdata->traceid = val & ETM_TRACEID_MASK;
  1312. return size;
  1313. }
  1314. static DEVICE_ATTR_RW(traceid);
  1315. static struct attribute *coresight_etm_attrs[] = {
  1316. &dev_attr_nr_addr_cmp.attr,
  1317. &dev_attr_nr_cntr.attr,
  1318. &dev_attr_nr_ctxid_cmp.attr,
  1319. &dev_attr_etmsr.attr,
  1320. &dev_attr_reset.attr,
  1321. &dev_attr_mode.attr,
  1322. &dev_attr_trigger_event.attr,
  1323. &dev_attr_enable_event.attr,
  1324. &dev_attr_fifofull_level.attr,
  1325. &dev_attr_addr_idx.attr,
  1326. &dev_attr_addr_single.attr,
  1327. &dev_attr_addr_range.attr,
  1328. &dev_attr_addr_start.attr,
  1329. &dev_attr_addr_stop.attr,
  1330. &dev_attr_addr_acctype.attr,
  1331. &dev_attr_cntr_idx.attr,
  1332. &dev_attr_cntr_rld_val.attr,
  1333. &dev_attr_cntr_event.attr,
  1334. &dev_attr_cntr_rld_event.attr,
  1335. &dev_attr_cntr_val.attr,
  1336. &dev_attr_seq_12_event.attr,
  1337. &dev_attr_seq_21_event.attr,
  1338. &dev_attr_seq_23_event.attr,
  1339. &dev_attr_seq_31_event.attr,
  1340. &dev_attr_seq_32_event.attr,
  1341. &dev_attr_seq_13_event.attr,
  1342. &dev_attr_seq_curr_state.attr,
  1343. &dev_attr_ctxid_idx.attr,
  1344. &dev_attr_ctxid_val.attr,
  1345. &dev_attr_ctxid_mask.attr,
  1346. &dev_attr_sync_freq.attr,
  1347. &dev_attr_timestamp_event.attr,
  1348. &dev_attr_status.attr,
  1349. &dev_attr_traceid.attr,
  1350. NULL,
  1351. };
  1352. ATTRIBUTE_GROUPS(coresight_etm);
  1353. static int etm_cpu_callback(struct notifier_block *nfb, unsigned long action,
  1354. void *hcpu)
  1355. {
  1356. unsigned int cpu = (unsigned long)hcpu;
  1357. if (!etmdrvdata[cpu])
  1358. goto out;
  1359. switch (action & (~CPU_TASKS_FROZEN)) {
  1360. case CPU_STARTING:
  1361. spin_lock(&etmdrvdata[cpu]->spinlock);
  1362. if (!etmdrvdata[cpu]->os_unlock) {
  1363. etm_os_unlock(etmdrvdata[cpu]);
  1364. etmdrvdata[cpu]->os_unlock = true;
  1365. }
  1366. if (etmdrvdata[cpu]->enable)
  1367. etm_enable_hw(etmdrvdata[cpu]);
  1368. spin_unlock(&etmdrvdata[cpu]->spinlock);
  1369. break;
  1370. case CPU_ONLINE:
  1371. if (etmdrvdata[cpu]->boot_enable &&
  1372. !etmdrvdata[cpu]->sticky_enable)
  1373. coresight_enable(etmdrvdata[cpu]->csdev);
  1374. break;
  1375. case CPU_DYING:
  1376. spin_lock(&etmdrvdata[cpu]->spinlock);
  1377. if (etmdrvdata[cpu]->enable)
  1378. etm_disable_hw(etmdrvdata[cpu]);
  1379. spin_unlock(&etmdrvdata[cpu]->spinlock);
  1380. break;
  1381. }
  1382. out:
  1383. return NOTIFY_OK;
  1384. }
  1385. static struct notifier_block etm_cpu_notifier = {
  1386. .notifier_call = etm_cpu_callback,
  1387. };
  1388. static bool etm_arch_supported(u8 arch)
  1389. {
  1390. switch (arch) {
  1391. case ETM_ARCH_V3_3:
  1392. break;
  1393. case ETM_ARCH_V3_5:
  1394. break;
  1395. case PFT_ARCH_V1_0:
  1396. break;
  1397. case PFT_ARCH_V1_1:
  1398. break;
  1399. default:
  1400. return false;
  1401. }
  1402. return true;
  1403. }
  1404. static void etm_init_arch_data(void *info)
  1405. {
  1406. u32 etmidr;
  1407. u32 etmccr;
  1408. struct etm_drvdata *drvdata = info;
  1409. CS_UNLOCK(drvdata->base);
  1410. /* First dummy read */
  1411. (void)etm_readl(drvdata, ETMPDSR);
  1412. /* Provide power to ETM: ETMPDCR[3] == 1 */
  1413. etm_set_pwrup(drvdata);
  1414. /*
  1415. * Clear power down bit since when this bit is set writes to
  1416. * certain registers might be ignored.
  1417. */
  1418. etm_clr_pwrdwn(drvdata);
  1419. /*
  1420. * Set prog bit. It will be set from reset but this is included to
  1421. * ensure it is set
  1422. */
  1423. etm_set_prog(drvdata);
  1424. /* Find all capabilities */
  1425. etmidr = etm_readl(drvdata, ETMIDR);
  1426. drvdata->arch = BMVAL(etmidr, 4, 11);
  1427. drvdata->port_size = etm_readl(drvdata, ETMCR) & PORT_SIZE_MASK;
  1428. drvdata->etmccer = etm_readl(drvdata, ETMCCER);
  1429. etmccr = etm_readl(drvdata, ETMCCR);
  1430. drvdata->etmccr = etmccr;
  1431. drvdata->nr_addr_cmp = BMVAL(etmccr, 0, 3) * 2;
  1432. drvdata->nr_cntr = BMVAL(etmccr, 13, 15);
  1433. drvdata->nr_ext_inp = BMVAL(etmccr, 17, 19);
  1434. drvdata->nr_ext_out = BMVAL(etmccr, 20, 22);
  1435. drvdata->nr_ctxid_cmp = BMVAL(etmccr, 24, 25);
  1436. etm_set_pwrdwn(drvdata);
  1437. etm_clr_pwrup(drvdata);
  1438. CS_LOCK(drvdata->base);
  1439. }
  1440. static void etm_init_default_data(struct etm_drvdata *drvdata)
  1441. {
  1442. /*
  1443. * A trace ID of value 0 is invalid, so let's start at some
  1444. * random value that fits in 7 bits and will be just as good.
  1445. */
  1446. static int etm3x_traceid = 0x10;
  1447. u32 flags = (1 << 0 | /* instruction execute*/
  1448. 3 << 3 | /* ARM instruction */
  1449. 0 << 5 | /* No data value comparison */
  1450. 0 << 7 | /* No exact mach */
  1451. 0 << 8 | /* Ignore context ID */
  1452. 0 << 10); /* Security ignored */
  1453. /*
  1454. * Initial configuration only - guarantees sources handled by
  1455. * this driver have a unique ID at startup time but not between
  1456. * all other types of sources. For that we lean on the core
  1457. * framework.
  1458. */
  1459. drvdata->traceid = etm3x_traceid++;
  1460. drvdata->ctrl = (ETMCR_CYC_ACC | ETMCR_TIMESTAMP_EN);
  1461. drvdata->enable_ctrl1 = ETMTECR1_ADDR_COMP_1;
  1462. if (drvdata->nr_addr_cmp >= 2) {
  1463. drvdata->addr_val[0] = (u32) _stext;
  1464. drvdata->addr_val[1] = (u32) _etext;
  1465. drvdata->addr_acctype[0] = flags;
  1466. drvdata->addr_acctype[1] = flags;
  1467. drvdata->addr_type[0] = ETM_ADDR_TYPE_RANGE;
  1468. drvdata->addr_type[1] = ETM_ADDR_TYPE_RANGE;
  1469. }
  1470. etm_set_default(drvdata);
  1471. }
  1472. static int etm_probe(struct amba_device *adev, const struct amba_id *id)
  1473. {
  1474. int ret;
  1475. void __iomem *base;
  1476. struct device *dev = &adev->dev;
  1477. struct coresight_platform_data *pdata = NULL;
  1478. struct etm_drvdata *drvdata;
  1479. struct resource *res = &adev->res;
  1480. struct coresight_desc *desc;
  1481. struct device_node *np = adev->dev.of_node;
  1482. desc = devm_kzalloc(dev, sizeof(*desc), GFP_KERNEL);
  1483. if (!desc)
  1484. return -ENOMEM;
  1485. drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
  1486. if (!drvdata)
  1487. return -ENOMEM;
  1488. if (np) {
  1489. pdata = of_get_coresight_platform_data(dev, np);
  1490. if (IS_ERR(pdata))
  1491. return PTR_ERR(pdata);
  1492. adev->dev.platform_data = pdata;
  1493. drvdata->use_cp14 = of_property_read_bool(np, "arm,cp14");
  1494. }
  1495. drvdata->dev = &adev->dev;
  1496. dev_set_drvdata(dev, drvdata);
  1497. /* Validity for the resource is already checked by the AMBA core */
  1498. base = devm_ioremap_resource(dev, res);
  1499. if (IS_ERR(base))
  1500. return PTR_ERR(base);
  1501. drvdata->base = base;
  1502. spin_lock_init(&drvdata->spinlock);
  1503. drvdata->clk = adev->pclk;
  1504. ret = clk_prepare_enable(drvdata->clk);
  1505. if (ret)
  1506. return ret;
  1507. drvdata->cpu = pdata ? pdata->cpu : 0;
  1508. get_online_cpus();
  1509. etmdrvdata[drvdata->cpu] = drvdata;
  1510. if (!smp_call_function_single(drvdata->cpu, etm_os_unlock, drvdata, 1))
  1511. drvdata->os_unlock = true;
  1512. if (smp_call_function_single(drvdata->cpu,
  1513. etm_init_arch_data, drvdata, 1))
  1514. dev_err(dev, "ETM arch init failed\n");
  1515. if (!etm_count++)
  1516. register_hotcpu_notifier(&etm_cpu_notifier);
  1517. put_online_cpus();
  1518. if (etm_arch_supported(drvdata->arch) == false) {
  1519. ret = -EINVAL;
  1520. goto err_arch_supported;
  1521. }
  1522. etm_init_default_data(drvdata);
  1523. clk_disable_unprepare(drvdata->clk);
  1524. desc->type = CORESIGHT_DEV_TYPE_SOURCE;
  1525. desc->subtype.source_subtype = CORESIGHT_DEV_SUBTYPE_SOURCE_PROC;
  1526. desc->ops = &etm_cs_ops;
  1527. desc->pdata = pdata;
  1528. desc->dev = dev;
  1529. desc->groups = coresight_etm_groups;
  1530. drvdata->csdev = coresight_register(desc);
  1531. if (IS_ERR(drvdata->csdev)) {
  1532. ret = PTR_ERR(drvdata->csdev);
  1533. goto err_arch_supported;
  1534. }
  1535. dev_info(dev, "ETM initialized\n");
  1536. if (boot_enable) {
  1537. coresight_enable(drvdata->csdev);
  1538. drvdata->boot_enable = true;
  1539. }
  1540. return 0;
  1541. err_arch_supported:
  1542. clk_disable_unprepare(drvdata->clk);
  1543. if (--etm_count == 0)
  1544. unregister_hotcpu_notifier(&etm_cpu_notifier);
  1545. return ret;
  1546. }
  1547. static int etm_remove(struct amba_device *adev)
  1548. {
  1549. struct etm_drvdata *drvdata = amba_get_drvdata(adev);
  1550. coresight_unregister(drvdata->csdev);
  1551. if (--etm_count == 0)
  1552. unregister_hotcpu_notifier(&etm_cpu_notifier);
  1553. return 0;
  1554. }
  1555. static struct amba_id etm_ids[] = {
  1556. { /* ETM 3.3 */
  1557. .id = 0x0003b921,
  1558. .mask = 0x0003ffff,
  1559. },
  1560. { /* ETM 3.5 */
  1561. .id = 0x0003b956,
  1562. .mask = 0x0003ffff,
  1563. },
  1564. { /* PTM 1.0 */
  1565. .id = 0x0003b950,
  1566. .mask = 0x0003ffff,
  1567. },
  1568. { /* PTM 1.1 */
  1569. .id = 0x0003b95f,
  1570. .mask = 0x0003ffff,
  1571. },
  1572. { 0, 0},
  1573. };
  1574. static struct amba_driver etm_driver = {
  1575. .drv = {
  1576. .name = "coresight-etm3x",
  1577. .owner = THIS_MODULE,
  1578. },
  1579. .probe = etm_probe,
  1580. .remove = etm_remove,
  1581. .id_table = etm_ids,
  1582. };
  1583. int __init etm_init(void)
  1584. {
  1585. return amba_driver_register(&etm_driver);
  1586. }
  1587. module_init(etm_init);
  1588. void __exit etm_exit(void)
  1589. {
  1590. amba_driver_unregister(&etm_driver);
  1591. }
  1592. module_exit(etm_exit);
  1593. MODULE_LICENSE("GPL v2");
  1594. MODULE_DESCRIPTION("CoreSight Program Flow Trace driver");