coresight-etm4x.c 71 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712
  1. /* Copyright (c) 2014, The Linux Foundation. All rights reserved.
  2. *
  3. * This program is free software; you can redistribute it and/or modify
  4. * it under the terms of the GNU General Public License version 2 and
  5. * only version 2 as published by the Free Software Foundation.
  6. *
  7. * This program is distributed in the hope that it will be useful,
  8. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  9. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  10. * GNU General Public License for more details.
  11. */
  12. #include <linux/kernel.h>
  13. #include <linux/moduleparam.h>
  14. #include <linux/init.h>
  15. #include <linux/types.h>
  16. #include <linux/device.h>
  17. #include <linux/io.h>
  18. #include <linux/err.h>
  19. #include <linux/fs.h>
  20. #include <linux/slab.h>
  21. #include <linux/delay.h>
  22. #include <linux/smp.h>
  23. #include <linux/sysfs.h>
  24. #include <linux/stat.h>
  25. #include <linux/clk.h>
  26. #include <linux/cpu.h>
  27. #include <linux/coresight.h>
  28. #include <linux/pm_wakeup.h>
  29. #include <linux/amba/bus.h>
  30. #include <linux/seq_file.h>
  31. #include <linux/uaccess.h>
  32. #include <linux/pm_runtime.h>
  33. #include <linux/perf_event.h>
  34. #include <asm/sections.h>
  35. #include "coresight-etm4x.h"
  36. static int boot_enable;
  37. module_param_named(boot_enable, boot_enable, int, S_IRUGO);
  38. /* The number of ETMv4 currently registered */
  39. static int etm4_count;
  40. static struct etmv4_drvdata *etmdrvdata[NR_CPUS];
  41. static void etm4_os_unlock(void *info)
  42. {
  43. struct etmv4_drvdata *drvdata = (struct etmv4_drvdata *)info;
  44. /* Writing any value to ETMOSLAR unlocks the trace registers */
  45. writel_relaxed(0x0, drvdata->base + TRCOSLAR);
  46. isb();
  47. }
  48. static bool etm4_arch_supported(u8 arch)
  49. {
  50. switch (arch) {
  51. case ETM_ARCH_V4:
  52. break;
  53. default:
  54. return false;
  55. }
  56. return true;
  57. }
  58. static int etm4_cpu_id(struct coresight_device *csdev)
  59. {
  60. struct etmv4_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
  61. return drvdata->cpu;
  62. }
  63. static int etm4_trace_id(struct coresight_device *csdev)
  64. {
  65. struct etmv4_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
  66. unsigned long flags;
  67. int trace_id = -1;
  68. if (!drvdata->enable)
  69. return drvdata->trcid;
  70. spin_lock_irqsave(&drvdata->spinlock, flags);
  71. CS_UNLOCK(drvdata->base);
  72. trace_id = readl_relaxed(drvdata->base + TRCTRACEIDR);
  73. trace_id &= ETM_TRACEID_MASK;
  74. CS_LOCK(drvdata->base);
  75. spin_unlock_irqrestore(&drvdata->spinlock, flags);
  76. return trace_id;
  77. }
  78. static void etm4_enable_hw(void *info)
  79. {
  80. int i;
  81. struct etmv4_drvdata *drvdata = info;
  82. CS_UNLOCK(drvdata->base);
  83. etm4_os_unlock(drvdata);
  84. /* Disable the trace unit before programming trace registers */
  85. writel_relaxed(0, drvdata->base + TRCPRGCTLR);
  86. /* wait for TRCSTATR.IDLE to go up */
  87. if (coresight_timeout(drvdata->base, TRCSTATR, TRCSTATR_IDLE_BIT, 1))
  88. dev_err(drvdata->dev,
  89. "timeout observed when probing at offset %#x\n",
  90. TRCSTATR);
  91. writel_relaxed(drvdata->pe_sel, drvdata->base + TRCPROCSELR);
  92. writel_relaxed(drvdata->cfg, drvdata->base + TRCCONFIGR);
  93. /* nothing specific implemented */
  94. writel_relaxed(0x0, drvdata->base + TRCAUXCTLR);
  95. writel_relaxed(drvdata->eventctrl0, drvdata->base + TRCEVENTCTL0R);
  96. writel_relaxed(drvdata->eventctrl1, drvdata->base + TRCEVENTCTL1R);
  97. writel_relaxed(drvdata->stall_ctrl, drvdata->base + TRCSTALLCTLR);
  98. writel_relaxed(drvdata->ts_ctrl, drvdata->base + TRCTSCTLR);
  99. writel_relaxed(drvdata->syncfreq, drvdata->base + TRCSYNCPR);
  100. writel_relaxed(drvdata->ccctlr, drvdata->base + TRCCCCTLR);
  101. writel_relaxed(drvdata->bb_ctrl, drvdata->base + TRCBBCTLR);
  102. writel_relaxed(drvdata->trcid, drvdata->base + TRCTRACEIDR);
  103. writel_relaxed(drvdata->vinst_ctrl, drvdata->base + TRCVICTLR);
  104. writel_relaxed(drvdata->viiectlr, drvdata->base + TRCVIIECTLR);
  105. writel_relaxed(drvdata->vissctlr,
  106. drvdata->base + TRCVISSCTLR);
  107. writel_relaxed(drvdata->vipcssctlr,
  108. drvdata->base + TRCVIPCSSCTLR);
  109. for (i = 0; i < drvdata->nrseqstate - 1; i++)
  110. writel_relaxed(drvdata->seq_ctrl[i],
  111. drvdata->base + TRCSEQEVRn(i));
  112. writel_relaxed(drvdata->seq_rst, drvdata->base + TRCSEQRSTEVR);
  113. writel_relaxed(drvdata->seq_state, drvdata->base + TRCSEQSTR);
  114. writel_relaxed(drvdata->ext_inp, drvdata->base + TRCEXTINSELR);
  115. for (i = 0; i < drvdata->nr_cntr; i++) {
  116. writel_relaxed(drvdata->cntrldvr[i],
  117. drvdata->base + TRCCNTRLDVRn(i));
  118. writel_relaxed(drvdata->cntr_ctrl[i],
  119. drvdata->base + TRCCNTCTLRn(i));
  120. writel_relaxed(drvdata->cntr_val[i],
  121. drvdata->base + TRCCNTVRn(i));
  122. }
  123. /* Resource selector pair 0 is always implemented and reserved */
  124. for (i = 2; i < drvdata->nr_resource * 2; i++)
  125. writel_relaxed(drvdata->res_ctrl[i],
  126. drvdata->base + TRCRSCTLRn(i));
  127. for (i = 0; i < drvdata->nr_ss_cmp; i++) {
  128. writel_relaxed(drvdata->ss_ctrl[i],
  129. drvdata->base + TRCSSCCRn(i));
  130. writel_relaxed(drvdata->ss_status[i],
  131. drvdata->base + TRCSSCSRn(i));
  132. writel_relaxed(drvdata->ss_pe_cmp[i],
  133. drvdata->base + TRCSSPCICRn(i));
  134. }
  135. for (i = 0; i < drvdata->nr_addr_cmp; i++) {
  136. writeq_relaxed(drvdata->addr_val[i],
  137. drvdata->base + TRCACVRn(i));
  138. writeq_relaxed(drvdata->addr_acc[i],
  139. drvdata->base + TRCACATRn(i));
  140. }
  141. for (i = 0; i < drvdata->numcidc; i++)
  142. writeq_relaxed(drvdata->ctxid_pid[i],
  143. drvdata->base + TRCCIDCVRn(i));
  144. writel_relaxed(drvdata->ctxid_mask0, drvdata->base + TRCCIDCCTLR0);
  145. writel_relaxed(drvdata->ctxid_mask1, drvdata->base + TRCCIDCCTLR1);
  146. for (i = 0; i < drvdata->numvmidc; i++)
  147. writeq_relaxed(drvdata->vmid_val[i],
  148. drvdata->base + TRCVMIDCVRn(i));
  149. writel_relaxed(drvdata->vmid_mask0, drvdata->base + TRCVMIDCCTLR0);
  150. writel_relaxed(drvdata->vmid_mask1, drvdata->base + TRCVMIDCCTLR1);
  151. /* Enable the trace unit */
  152. writel_relaxed(1, drvdata->base + TRCPRGCTLR);
  153. /* wait for TRCSTATR.IDLE to go back down to '0' */
  154. if (coresight_timeout(drvdata->base, TRCSTATR, TRCSTATR_IDLE_BIT, 0))
  155. dev_err(drvdata->dev,
  156. "timeout observed when probing at offset %#x\n",
  157. TRCSTATR);
  158. CS_LOCK(drvdata->base);
  159. dev_dbg(drvdata->dev, "cpu: %d enable smp call done\n", drvdata->cpu);
  160. }
  161. static int etm4_enable(struct coresight_device *csdev,
  162. struct perf_event_attr *attr, u32 mode)
  163. {
  164. struct etmv4_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
  165. int ret;
  166. spin_lock(&drvdata->spinlock);
  167. /*
  168. * Executing etm4_enable_hw on the cpu whose ETM is being enabled
  169. * ensures that register writes occur when cpu is powered.
  170. */
  171. ret = smp_call_function_single(drvdata->cpu,
  172. etm4_enable_hw, drvdata, 1);
  173. if (ret)
  174. goto err;
  175. drvdata->enable = true;
  176. drvdata->sticky_enable = true;
  177. spin_unlock(&drvdata->spinlock);
  178. dev_info(drvdata->dev, "ETM tracing enabled\n");
  179. return 0;
  180. err:
  181. spin_unlock(&drvdata->spinlock);
  182. return ret;
  183. }
  184. static void etm4_disable_hw(void *info)
  185. {
  186. u32 control;
  187. struct etmv4_drvdata *drvdata = info;
  188. CS_UNLOCK(drvdata->base);
  189. control = readl_relaxed(drvdata->base + TRCPRGCTLR);
  190. /* EN, bit[0] Trace unit enable bit */
  191. control &= ~0x1;
  192. /* make sure everything completes before disabling */
  193. mb();
  194. isb();
  195. writel_relaxed(control, drvdata->base + TRCPRGCTLR);
  196. CS_LOCK(drvdata->base);
  197. dev_dbg(drvdata->dev, "cpu: %d disable smp call done\n", drvdata->cpu);
  198. }
  199. static void etm4_disable(struct coresight_device *csdev)
  200. {
  201. struct etmv4_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
  202. /*
  203. * Taking hotplug lock here protects from clocks getting disabled
  204. * with tracing being left on (crash scenario) if user disable occurs
  205. * after cpu online mask indicates the cpu is offline but before the
  206. * DYING hotplug callback is serviced by the ETM driver.
  207. */
  208. get_online_cpus();
  209. spin_lock(&drvdata->spinlock);
  210. /*
  211. * Executing etm4_disable_hw on the cpu whose ETM is being disabled
  212. * ensures that register writes occur when cpu is powered.
  213. */
  214. smp_call_function_single(drvdata->cpu, etm4_disable_hw, drvdata, 1);
  215. drvdata->enable = false;
  216. spin_unlock(&drvdata->spinlock);
  217. put_online_cpus();
  218. dev_info(drvdata->dev, "ETM tracing disabled\n");
  219. }
  220. static const struct coresight_ops_source etm4_source_ops = {
  221. .cpu_id = etm4_cpu_id,
  222. .trace_id = etm4_trace_id,
  223. .enable = etm4_enable,
  224. .disable = etm4_disable,
  225. };
  226. static const struct coresight_ops etm4_cs_ops = {
  227. .source_ops = &etm4_source_ops,
  228. };
  229. static int etm4_set_mode_exclude(struct etmv4_drvdata *drvdata, bool exclude)
  230. {
  231. u8 idx = drvdata->addr_idx;
  232. /*
  233. * TRCACATRn.TYPE bit[1:0]: type of comparison
  234. * the trace unit performs
  235. */
  236. if (BMVAL(drvdata->addr_acc[idx], 0, 1) == ETM_INSTR_ADDR) {
  237. if (idx % 2 != 0)
  238. return -EINVAL;
  239. /*
  240. * We are performing instruction address comparison. Set the
  241. * relevant bit of ViewInst Include/Exclude Control register
  242. * for corresponding address comparator pair.
  243. */
  244. if (drvdata->addr_type[idx] != ETM_ADDR_TYPE_RANGE ||
  245. drvdata->addr_type[idx + 1] != ETM_ADDR_TYPE_RANGE)
  246. return -EINVAL;
  247. if (exclude == true) {
  248. /*
  249. * Set exclude bit and unset the include bit
  250. * corresponding to comparator pair
  251. */
  252. drvdata->viiectlr |= BIT(idx / 2 + 16);
  253. drvdata->viiectlr &= ~BIT(idx / 2);
  254. } else {
  255. /*
  256. * Set include bit and unset exclude bit
  257. * corresponding to comparator pair
  258. */
  259. drvdata->viiectlr |= BIT(idx / 2);
  260. drvdata->viiectlr &= ~BIT(idx / 2 + 16);
  261. }
  262. }
  263. return 0;
  264. }
  265. static ssize_t nr_pe_cmp_show(struct device *dev,
  266. struct device_attribute *attr,
  267. char *buf)
  268. {
  269. unsigned long val;
  270. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  271. val = drvdata->nr_pe_cmp;
  272. return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
  273. }
  274. static DEVICE_ATTR_RO(nr_pe_cmp);
  275. static ssize_t nr_addr_cmp_show(struct device *dev,
  276. struct device_attribute *attr,
  277. char *buf)
  278. {
  279. unsigned long val;
  280. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  281. val = drvdata->nr_addr_cmp;
  282. return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
  283. }
  284. static DEVICE_ATTR_RO(nr_addr_cmp);
  285. static ssize_t nr_cntr_show(struct device *dev,
  286. struct device_attribute *attr,
  287. char *buf)
  288. {
  289. unsigned long val;
  290. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  291. val = drvdata->nr_cntr;
  292. return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
  293. }
  294. static DEVICE_ATTR_RO(nr_cntr);
  295. static ssize_t nr_ext_inp_show(struct device *dev,
  296. struct device_attribute *attr,
  297. char *buf)
  298. {
  299. unsigned long val;
  300. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  301. val = drvdata->nr_ext_inp;
  302. return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
  303. }
  304. static DEVICE_ATTR_RO(nr_ext_inp);
  305. static ssize_t numcidc_show(struct device *dev,
  306. struct device_attribute *attr,
  307. char *buf)
  308. {
  309. unsigned long val;
  310. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  311. val = drvdata->numcidc;
  312. return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
  313. }
  314. static DEVICE_ATTR_RO(numcidc);
  315. static ssize_t numvmidc_show(struct device *dev,
  316. struct device_attribute *attr,
  317. char *buf)
  318. {
  319. unsigned long val;
  320. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  321. val = drvdata->numvmidc;
  322. return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
  323. }
  324. static DEVICE_ATTR_RO(numvmidc);
  325. static ssize_t nrseqstate_show(struct device *dev,
  326. struct device_attribute *attr,
  327. char *buf)
  328. {
  329. unsigned long val;
  330. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  331. val = drvdata->nrseqstate;
  332. return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
  333. }
  334. static DEVICE_ATTR_RO(nrseqstate);
  335. static ssize_t nr_resource_show(struct device *dev,
  336. struct device_attribute *attr,
  337. char *buf)
  338. {
  339. unsigned long val;
  340. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  341. val = drvdata->nr_resource;
  342. return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
  343. }
  344. static DEVICE_ATTR_RO(nr_resource);
  345. static ssize_t nr_ss_cmp_show(struct device *dev,
  346. struct device_attribute *attr,
  347. char *buf)
  348. {
  349. unsigned long val;
  350. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  351. val = drvdata->nr_ss_cmp;
  352. return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
  353. }
  354. static DEVICE_ATTR_RO(nr_ss_cmp);
  355. static ssize_t reset_store(struct device *dev,
  356. struct device_attribute *attr,
  357. const char *buf, size_t size)
  358. {
  359. int i;
  360. unsigned long val;
  361. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  362. if (kstrtoul(buf, 16, &val))
  363. return -EINVAL;
  364. spin_lock(&drvdata->spinlock);
  365. if (val)
  366. drvdata->mode = 0x0;
  367. /* Disable data tracing: do not trace load and store data transfers */
  368. drvdata->mode &= ~(ETM_MODE_LOAD | ETM_MODE_STORE);
  369. drvdata->cfg &= ~(BIT(1) | BIT(2));
  370. /* Disable data value and data address tracing */
  371. drvdata->mode &= ~(ETM_MODE_DATA_TRACE_ADDR |
  372. ETM_MODE_DATA_TRACE_VAL);
  373. drvdata->cfg &= ~(BIT(16) | BIT(17));
  374. /* Disable all events tracing */
  375. drvdata->eventctrl0 = 0x0;
  376. drvdata->eventctrl1 = 0x0;
  377. /* Disable timestamp event */
  378. drvdata->ts_ctrl = 0x0;
  379. /* Disable stalling */
  380. drvdata->stall_ctrl = 0x0;
  381. /* Reset trace synchronization period to 2^8 = 256 bytes*/
  382. if (drvdata->syncpr == false)
  383. drvdata->syncfreq = 0x8;
  384. /*
  385. * Enable ViewInst to trace everything with start-stop logic in
  386. * started state. ARM recommends start-stop logic is set before
  387. * each trace run.
  388. */
  389. drvdata->vinst_ctrl |= BIT(0);
  390. if (drvdata->nr_addr_cmp == true) {
  391. drvdata->mode |= ETM_MODE_VIEWINST_STARTSTOP;
  392. /* SSSTATUS, bit[9] */
  393. drvdata->vinst_ctrl |= BIT(9);
  394. }
  395. /* No address range filtering for ViewInst */
  396. drvdata->viiectlr = 0x0;
  397. /* No start-stop filtering for ViewInst */
  398. drvdata->vissctlr = 0x0;
  399. /* Disable seq events */
  400. for (i = 0; i < drvdata->nrseqstate-1; i++)
  401. drvdata->seq_ctrl[i] = 0x0;
  402. drvdata->seq_rst = 0x0;
  403. drvdata->seq_state = 0x0;
  404. /* Disable external input events */
  405. drvdata->ext_inp = 0x0;
  406. drvdata->cntr_idx = 0x0;
  407. for (i = 0; i < drvdata->nr_cntr; i++) {
  408. drvdata->cntrldvr[i] = 0x0;
  409. drvdata->cntr_ctrl[i] = 0x0;
  410. drvdata->cntr_val[i] = 0x0;
  411. }
  412. /* Resource selector pair 0 is always implemented and reserved */
  413. drvdata->res_idx = 0x2;
  414. for (i = 2; i < drvdata->nr_resource * 2; i++)
  415. drvdata->res_ctrl[i] = 0x0;
  416. for (i = 0; i < drvdata->nr_ss_cmp; i++) {
  417. drvdata->ss_ctrl[i] = 0x0;
  418. drvdata->ss_pe_cmp[i] = 0x0;
  419. }
  420. drvdata->addr_idx = 0x0;
  421. for (i = 0; i < drvdata->nr_addr_cmp * 2; i++) {
  422. drvdata->addr_val[i] = 0x0;
  423. drvdata->addr_acc[i] = 0x0;
  424. drvdata->addr_type[i] = ETM_ADDR_TYPE_NONE;
  425. }
  426. drvdata->ctxid_idx = 0x0;
  427. for (i = 0; i < drvdata->numcidc; i++) {
  428. drvdata->ctxid_pid[i] = 0x0;
  429. drvdata->ctxid_vpid[i] = 0x0;
  430. }
  431. drvdata->ctxid_mask0 = 0x0;
  432. drvdata->ctxid_mask1 = 0x0;
  433. drvdata->vmid_idx = 0x0;
  434. for (i = 0; i < drvdata->numvmidc; i++)
  435. drvdata->vmid_val[i] = 0x0;
  436. drvdata->vmid_mask0 = 0x0;
  437. drvdata->vmid_mask1 = 0x0;
  438. drvdata->trcid = drvdata->cpu + 1;
  439. spin_unlock(&drvdata->spinlock);
  440. return size;
  441. }
  442. static DEVICE_ATTR_WO(reset);
  443. static ssize_t mode_show(struct device *dev,
  444. struct device_attribute *attr,
  445. char *buf)
  446. {
  447. unsigned long val;
  448. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  449. val = drvdata->mode;
  450. return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
  451. }
  452. static ssize_t mode_store(struct device *dev,
  453. struct device_attribute *attr,
  454. const char *buf, size_t size)
  455. {
  456. unsigned long val, mode;
  457. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  458. if (kstrtoul(buf, 16, &val))
  459. return -EINVAL;
  460. spin_lock(&drvdata->spinlock);
  461. drvdata->mode = val & ETMv4_MODE_ALL;
  462. if (drvdata->mode & ETM_MODE_EXCLUDE)
  463. etm4_set_mode_exclude(drvdata, true);
  464. else
  465. etm4_set_mode_exclude(drvdata, false);
  466. if (drvdata->instrp0 == true) {
  467. /* start by clearing instruction P0 field */
  468. drvdata->cfg &= ~(BIT(1) | BIT(2));
  469. if (drvdata->mode & ETM_MODE_LOAD)
  470. /* 0b01 Trace load instructions as P0 instructions */
  471. drvdata->cfg |= BIT(1);
  472. if (drvdata->mode & ETM_MODE_STORE)
  473. /* 0b10 Trace store instructions as P0 instructions */
  474. drvdata->cfg |= BIT(2);
  475. if (drvdata->mode & ETM_MODE_LOAD_STORE)
  476. /*
  477. * 0b11 Trace load and store instructions
  478. * as P0 instructions
  479. */
  480. drvdata->cfg |= BIT(1) | BIT(2);
  481. }
  482. /* bit[3], Branch broadcast mode */
  483. if ((drvdata->mode & ETM_MODE_BB) && (drvdata->trcbb == true))
  484. drvdata->cfg |= BIT(3);
  485. else
  486. drvdata->cfg &= ~BIT(3);
  487. /* bit[4], Cycle counting instruction trace bit */
  488. if ((drvdata->mode & ETMv4_MODE_CYCACC) &&
  489. (drvdata->trccci == true))
  490. drvdata->cfg |= BIT(4);
  491. else
  492. drvdata->cfg &= ~BIT(4);
  493. /* bit[6], Context ID tracing bit */
  494. if ((drvdata->mode & ETMv4_MODE_CTXID) && (drvdata->ctxid_size))
  495. drvdata->cfg |= BIT(6);
  496. else
  497. drvdata->cfg &= ~BIT(6);
  498. if ((drvdata->mode & ETM_MODE_VMID) && (drvdata->vmid_size))
  499. drvdata->cfg |= BIT(7);
  500. else
  501. drvdata->cfg &= ~BIT(7);
  502. /* bits[10:8], Conditional instruction tracing bit */
  503. mode = ETM_MODE_COND(drvdata->mode);
  504. if (drvdata->trccond == true) {
  505. drvdata->cfg &= ~(BIT(8) | BIT(9) | BIT(10));
  506. drvdata->cfg |= mode << 8;
  507. }
  508. /* bit[11], Global timestamp tracing bit */
  509. if ((drvdata->mode & ETMv4_MODE_TIMESTAMP) && (drvdata->ts_size))
  510. drvdata->cfg |= BIT(11);
  511. else
  512. drvdata->cfg &= ~BIT(11);
  513. /* bit[12], Return stack enable bit */
  514. if ((drvdata->mode & ETM_MODE_RETURNSTACK) &&
  515. (drvdata->retstack == true))
  516. drvdata->cfg |= BIT(12);
  517. else
  518. drvdata->cfg &= ~BIT(12);
  519. /* bits[14:13], Q element enable field */
  520. mode = ETM_MODE_QELEM(drvdata->mode);
  521. /* start by clearing QE bits */
  522. drvdata->cfg &= ~(BIT(13) | BIT(14));
  523. /* if supported, Q elements with instruction counts are enabled */
  524. if ((mode & BIT(0)) && (drvdata->q_support & BIT(0)))
  525. drvdata->cfg |= BIT(13);
  526. /*
  527. * if supported, Q elements with and without instruction
  528. * counts are enabled
  529. */
  530. if ((mode & BIT(1)) && (drvdata->q_support & BIT(1)))
  531. drvdata->cfg |= BIT(14);
  532. /* bit[11], AMBA Trace Bus (ATB) trigger enable bit */
  533. if ((drvdata->mode & ETM_MODE_ATB_TRIGGER) &&
  534. (drvdata->atbtrig == true))
  535. drvdata->eventctrl1 |= BIT(11);
  536. else
  537. drvdata->eventctrl1 &= ~BIT(11);
  538. /* bit[12], Low-power state behavior override bit */
  539. if ((drvdata->mode & ETM_MODE_LPOVERRIDE) &&
  540. (drvdata->lpoverride == true))
  541. drvdata->eventctrl1 |= BIT(12);
  542. else
  543. drvdata->eventctrl1 &= ~BIT(12);
  544. /* bit[8], Instruction stall bit */
  545. if (drvdata->mode & ETM_MODE_ISTALL_EN)
  546. drvdata->stall_ctrl |= BIT(8);
  547. else
  548. drvdata->stall_ctrl &= ~BIT(8);
  549. /* bit[10], Prioritize instruction trace bit */
  550. if (drvdata->mode & ETM_MODE_INSTPRIO)
  551. drvdata->stall_ctrl |= BIT(10);
  552. else
  553. drvdata->stall_ctrl &= ~BIT(10);
  554. /* bit[13], Trace overflow prevention bit */
  555. if ((drvdata->mode & ETM_MODE_NOOVERFLOW) &&
  556. (drvdata->nooverflow == true))
  557. drvdata->stall_ctrl |= BIT(13);
  558. else
  559. drvdata->stall_ctrl &= ~BIT(13);
  560. /* bit[9] Start/stop logic control bit */
  561. if (drvdata->mode & ETM_MODE_VIEWINST_STARTSTOP)
  562. drvdata->vinst_ctrl |= BIT(9);
  563. else
  564. drvdata->vinst_ctrl &= ~BIT(9);
  565. /* bit[10], Whether a trace unit must trace a Reset exception */
  566. if (drvdata->mode & ETM_MODE_TRACE_RESET)
  567. drvdata->vinst_ctrl |= BIT(10);
  568. else
  569. drvdata->vinst_ctrl &= ~BIT(10);
  570. /* bit[11], Whether a trace unit must trace a system error exception */
  571. if ((drvdata->mode & ETM_MODE_TRACE_ERR) &&
  572. (drvdata->trc_error == true))
  573. drvdata->vinst_ctrl |= BIT(11);
  574. else
  575. drvdata->vinst_ctrl &= ~BIT(11);
  576. spin_unlock(&drvdata->spinlock);
  577. return size;
  578. }
  579. static DEVICE_ATTR_RW(mode);
  580. static ssize_t pe_show(struct device *dev,
  581. struct device_attribute *attr,
  582. char *buf)
  583. {
  584. unsigned long val;
  585. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  586. val = drvdata->pe_sel;
  587. return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
  588. }
  589. static ssize_t pe_store(struct device *dev,
  590. struct device_attribute *attr,
  591. const char *buf, size_t size)
  592. {
  593. unsigned long val;
  594. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  595. if (kstrtoul(buf, 16, &val))
  596. return -EINVAL;
  597. spin_lock(&drvdata->spinlock);
  598. if (val > drvdata->nr_pe) {
  599. spin_unlock(&drvdata->spinlock);
  600. return -EINVAL;
  601. }
  602. drvdata->pe_sel = val;
  603. spin_unlock(&drvdata->spinlock);
  604. return size;
  605. }
  606. static DEVICE_ATTR_RW(pe);
  607. static ssize_t event_show(struct device *dev,
  608. struct device_attribute *attr,
  609. char *buf)
  610. {
  611. unsigned long val;
  612. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  613. val = drvdata->eventctrl0;
  614. return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
  615. }
  616. static ssize_t event_store(struct device *dev,
  617. struct device_attribute *attr,
  618. const char *buf, size_t size)
  619. {
  620. unsigned long val;
  621. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  622. if (kstrtoul(buf, 16, &val))
  623. return -EINVAL;
  624. spin_lock(&drvdata->spinlock);
  625. switch (drvdata->nr_event) {
  626. case 0x0:
  627. /* EVENT0, bits[7:0] */
  628. drvdata->eventctrl0 = val & 0xFF;
  629. break;
  630. case 0x1:
  631. /* EVENT1, bits[15:8] */
  632. drvdata->eventctrl0 = val & 0xFFFF;
  633. break;
  634. case 0x2:
  635. /* EVENT2, bits[23:16] */
  636. drvdata->eventctrl0 = val & 0xFFFFFF;
  637. break;
  638. case 0x3:
  639. /* EVENT3, bits[31:24] */
  640. drvdata->eventctrl0 = val;
  641. break;
  642. default:
  643. break;
  644. }
  645. spin_unlock(&drvdata->spinlock);
  646. return size;
  647. }
  648. static DEVICE_ATTR_RW(event);
  649. static ssize_t event_instren_show(struct device *dev,
  650. struct device_attribute *attr,
  651. char *buf)
  652. {
  653. unsigned long val;
  654. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  655. val = BMVAL(drvdata->eventctrl1, 0, 3);
  656. return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
  657. }
  658. static ssize_t event_instren_store(struct device *dev,
  659. struct device_attribute *attr,
  660. const char *buf, size_t size)
  661. {
  662. unsigned long val;
  663. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  664. if (kstrtoul(buf, 16, &val))
  665. return -EINVAL;
  666. spin_lock(&drvdata->spinlock);
  667. /* start by clearing all instruction event enable bits */
  668. drvdata->eventctrl1 &= ~(BIT(0) | BIT(1) | BIT(2) | BIT(3));
  669. switch (drvdata->nr_event) {
  670. case 0x0:
  671. /* generate Event element for event 1 */
  672. drvdata->eventctrl1 |= val & BIT(1);
  673. break;
  674. case 0x1:
  675. /* generate Event element for event 1 and 2 */
  676. drvdata->eventctrl1 |= val & (BIT(0) | BIT(1));
  677. break;
  678. case 0x2:
  679. /* generate Event element for event 1, 2 and 3 */
  680. drvdata->eventctrl1 |= val & (BIT(0) | BIT(1) | BIT(2));
  681. break;
  682. case 0x3:
  683. /* generate Event element for all 4 events */
  684. drvdata->eventctrl1 |= val & 0xF;
  685. break;
  686. default:
  687. break;
  688. }
  689. spin_unlock(&drvdata->spinlock);
  690. return size;
  691. }
  692. static DEVICE_ATTR_RW(event_instren);
  693. static ssize_t event_ts_show(struct device *dev,
  694. struct device_attribute *attr,
  695. char *buf)
  696. {
  697. unsigned long val;
  698. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  699. val = drvdata->ts_ctrl;
  700. return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
  701. }
  702. static ssize_t event_ts_store(struct device *dev,
  703. struct device_attribute *attr,
  704. const char *buf, size_t size)
  705. {
  706. unsigned long val;
  707. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  708. if (kstrtoul(buf, 16, &val))
  709. return -EINVAL;
  710. if (!drvdata->ts_size)
  711. return -EINVAL;
  712. drvdata->ts_ctrl = val & ETMv4_EVENT_MASK;
  713. return size;
  714. }
  715. static DEVICE_ATTR_RW(event_ts);
  716. static ssize_t syncfreq_show(struct device *dev,
  717. struct device_attribute *attr,
  718. char *buf)
  719. {
  720. unsigned long val;
  721. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  722. val = drvdata->syncfreq;
  723. return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
  724. }
  725. static ssize_t syncfreq_store(struct device *dev,
  726. struct device_attribute *attr,
  727. const char *buf, size_t size)
  728. {
  729. unsigned long val;
  730. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  731. if (kstrtoul(buf, 16, &val))
  732. return -EINVAL;
  733. if (drvdata->syncpr == true)
  734. return -EINVAL;
  735. drvdata->syncfreq = val & ETMv4_SYNC_MASK;
  736. return size;
  737. }
  738. static DEVICE_ATTR_RW(syncfreq);
  739. static ssize_t cyc_threshold_show(struct device *dev,
  740. struct device_attribute *attr,
  741. char *buf)
  742. {
  743. unsigned long val;
  744. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  745. val = drvdata->ccctlr;
  746. return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
  747. }
  748. static ssize_t cyc_threshold_store(struct device *dev,
  749. struct device_attribute *attr,
  750. const char *buf, size_t size)
  751. {
  752. unsigned long val;
  753. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  754. if (kstrtoul(buf, 16, &val))
  755. return -EINVAL;
  756. if (val < drvdata->ccitmin)
  757. return -EINVAL;
  758. drvdata->ccctlr = val & ETM_CYC_THRESHOLD_MASK;
  759. return size;
  760. }
  761. static DEVICE_ATTR_RW(cyc_threshold);
  762. static ssize_t bb_ctrl_show(struct device *dev,
  763. struct device_attribute *attr,
  764. char *buf)
  765. {
  766. unsigned long val;
  767. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  768. val = drvdata->bb_ctrl;
  769. return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
  770. }
  771. static ssize_t bb_ctrl_store(struct device *dev,
  772. struct device_attribute *attr,
  773. const char *buf, size_t size)
  774. {
  775. unsigned long val;
  776. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  777. if (kstrtoul(buf, 16, &val))
  778. return -EINVAL;
  779. if (drvdata->trcbb == false)
  780. return -EINVAL;
  781. if (!drvdata->nr_addr_cmp)
  782. return -EINVAL;
  783. /*
  784. * Bit[7:0] selects which address range comparator is used for
  785. * branch broadcast control.
  786. */
  787. if (BMVAL(val, 0, 7) > drvdata->nr_addr_cmp)
  788. return -EINVAL;
  789. drvdata->bb_ctrl = val;
  790. return size;
  791. }
  792. static DEVICE_ATTR_RW(bb_ctrl);
  793. static ssize_t event_vinst_show(struct device *dev,
  794. struct device_attribute *attr,
  795. char *buf)
  796. {
  797. unsigned long val;
  798. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  799. val = drvdata->vinst_ctrl & ETMv4_EVENT_MASK;
  800. return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
  801. }
  802. static ssize_t event_vinst_store(struct device *dev,
  803. struct device_attribute *attr,
  804. const char *buf, size_t size)
  805. {
  806. unsigned long val;
  807. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  808. if (kstrtoul(buf, 16, &val))
  809. return -EINVAL;
  810. spin_lock(&drvdata->spinlock);
  811. val &= ETMv4_EVENT_MASK;
  812. drvdata->vinst_ctrl &= ~ETMv4_EVENT_MASK;
  813. drvdata->vinst_ctrl |= val;
  814. spin_unlock(&drvdata->spinlock);
  815. return size;
  816. }
  817. static DEVICE_ATTR_RW(event_vinst);
  818. static ssize_t s_exlevel_vinst_show(struct device *dev,
  819. struct device_attribute *attr,
  820. char *buf)
  821. {
  822. unsigned long val;
  823. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  824. val = BMVAL(drvdata->vinst_ctrl, 16, 19);
  825. return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
  826. }
  827. static ssize_t s_exlevel_vinst_store(struct device *dev,
  828. struct device_attribute *attr,
  829. const char *buf, size_t size)
  830. {
  831. unsigned long val;
  832. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  833. if (kstrtoul(buf, 16, &val))
  834. return -EINVAL;
  835. spin_lock(&drvdata->spinlock);
  836. /* clear all EXLEVEL_S bits (bit[18] is never implemented) */
  837. drvdata->vinst_ctrl &= ~(BIT(16) | BIT(17) | BIT(19));
  838. /* enable instruction tracing for corresponding exception level */
  839. val &= drvdata->s_ex_level;
  840. drvdata->vinst_ctrl |= (val << 16);
  841. spin_unlock(&drvdata->spinlock);
  842. return size;
  843. }
  844. static DEVICE_ATTR_RW(s_exlevel_vinst);
  845. static ssize_t ns_exlevel_vinst_show(struct device *dev,
  846. struct device_attribute *attr,
  847. char *buf)
  848. {
  849. unsigned long val;
  850. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  851. /* EXLEVEL_NS, bits[23:20] */
  852. val = BMVAL(drvdata->vinst_ctrl, 20, 23);
  853. return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
  854. }
  855. static ssize_t ns_exlevel_vinst_store(struct device *dev,
  856. struct device_attribute *attr,
  857. const char *buf, size_t size)
  858. {
  859. unsigned long val;
  860. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  861. if (kstrtoul(buf, 16, &val))
  862. return -EINVAL;
  863. spin_lock(&drvdata->spinlock);
  864. /* clear EXLEVEL_NS bits (bit[23] is never implemented */
  865. drvdata->vinst_ctrl &= ~(BIT(20) | BIT(21) | BIT(22));
  866. /* enable instruction tracing for corresponding exception level */
  867. val &= drvdata->ns_ex_level;
  868. drvdata->vinst_ctrl |= (val << 20);
  869. spin_unlock(&drvdata->spinlock);
  870. return size;
  871. }
  872. static DEVICE_ATTR_RW(ns_exlevel_vinst);
  873. static ssize_t addr_idx_show(struct device *dev,
  874. struct device_attribute *attr,
  875. char *buf)
  876. {
  877. unsigned long val;
  878. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  879. val = drvdata->addr_idx;
  880. return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
  881. }
  882. static ssize_t addr_idx_store(struct device *dev,
  883. struct device_attribute *attr,
  884. const char *buf, size_t size)
  885. {
  886. unsigned long val;
  887. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  888. if (kstrtoul(buf, 16, &val))
  889. return -EINVAL;
  890. if (val >= drvdata->nr_addr_cmp * 2)
  891. return -EINVAL;
  892. /*
  893. * Use spinlock to ensure index doesn't change while it gets
  894. * dereferenced multiple times within a spinlock block elsewhere.
  895. */
  896. spin_lock(&drvdata->spinlock);
  897. drvdata->addr_idx = val;
  898. spin_unlock(&drvdata->spinlock);
  899. return size;
  900. }
  901. static DEVICE_ATTR_RW(addr_idx);
  902. static ssize_t addr_instdatatype_show(struct device *dev,
  903. struct device_attribute *attr,
  904. char *buf)
  905. {
  906. ssize_t len;
  907. u8 val, idx;
  908. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  909. spin_lock(&drvdata->spinlock);
  910. idx = drvdata->addr_idx;
  911. val = BMVAL(drvdata->addr_acc[idx], 0, 1);
  912. len = scnprintf(buf, PAGE_SIZE, "%s\n",
  913. val == ETM_INSTR_ADDR ? "instr" :
  914. (val == ETM_DATA_LOAD_ADDR ? "data_load" :
  915. (val == ETM_DATA_STORE_ADDR ? "data_store" :
  916. "data_load_store")));
  917. spin_unlock(&drvdata->spinlock);
  918. return len;
  919. }
  920. static ssize_t addr_instdatatype_store(struct device *dev,
  921. struct device_attribute *attr,
  922. const char *buf, size_t size)
  923. {
  924. u8 idx;
  925. char str[20] = "";
  926. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  927. if (strlen(buf) >= 20)
  928. return -EINVAL;
  929. if (sscanf(buf, "%s", str) != 1)
  930. return -EINVAL;
  931. spin_lock(&drvdata->spinlock);
  932. idx = drvdata->addr_idx;
  933. if (!strcmp(str, "instr"))
  934. /* TYPE, bits[1:0] */
  935. drvdata->addr_acc[idx] &= ~(BIT(0) | BIT(1));
  936. spin_unlock(&drvdata->spinlock);
  937. return size;
  938. }
  939. static DEVICE_ATTR_RW(addr_instdatatype);
  940. static ssize_t addr_single_show(struct device *dev,
  941. struct device_attribute *attr,
  942. char *buf)
  943. {
  944. u8 idx;
  945. unsigned long val;
  946. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  947. idx = drvdata->addr_idx;
  948. spin_lock(&drvdata->spinlock);
  949. if (!(drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
  950. drvdata->addr_type[idx] == ETM_ADDR_TYPE_SINGLE)) {
  951. spin_unlock(&drvdata->spinlock);
  952. return -EPERM;
  953. }
  954. val = (unsigned long)drvdata->addr_val[idx];
  955. spin_unlock(&drvdata->spinlock);
  956. return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
  957. }
  958. static ssize_t addr_single_store(struct device *dev,
  959. struct device_attribute *attr,
  960. const char *buf, size_t size)
  961. {
  962. u8 idx;
  963. unsigned long val;
  964. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  965. if (kstrtoul(buf, 16, &val))
  966. return -EINVAL;
  967. spin_lock(&drvdata->spinlock);
  968. idx = drvdata->addr_idx;
  969. if (!(drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
  970. drvdata->addr_type[idx] == ETM_ADDR_TYPE_SINGLE)) {
  971. spin_unlock(&drvdata->spinlock);
  972. return -EPERM;
  973. }
  974. drvdata->addr_val[idx] = (u64)val;
  975. drvdata->addr_type[idx] = ETM_ADDR_TYPE_SINGLE;
  976. spin_unlock(&drvdata->spinlock);
  977. return size;
  978. }
  979. static DEVICE_ATTR_RW(addr_single);
  980. static ssize_t addr_range_show(struct device *dev,
  981. struct device_attribute *attr,
  982. char *buf)
  983. {
  984. u8 idx;
  985. unsigned long val1, val2;
  986. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  987. spin_lock(&drvdata->spinlock);
  988. idx = drvdata->addr_idx;
  989. if (idx % 2 != 0) {
  990. spin_unlock(&drvdata->spinlock);
  991. return -EPERM;
  992. }
  993. if (!((drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE &&
  994. drvdata->addr_type[idx + 1] == ETM_ADDR_TYPE_NONE) ||
  995. (drvdata->addr_type[idx] == ETM_ADDR_TYPE_RANGE &&
  996. drvdata->addr_type[idx + 1] == ETM_ADDR_TYPE_RANGE))) {
  997. spin_unlock(&drvdata->spinlock);
  998. return -EPERM;
  999. }
  1000. val1 = (unsigned long)drvdata->addr_val[idx];
  1001. val2 = (unsigned long)drvdata->addr_val[idx + 1];
  1002. spin_unlock(&drvdata->spinlock);
  1003. return scnprintf(buf, PAGE_SIZE, "%#lx %#lx\n", val1, val2);
  1004. }
  1005. static ssize_t addr_range_store(struct device *dev,
  1006. struct device_attribute *attr,
  1007. const char *buf, size_t size)
  1008. {
  1009. u8 idx;
  1010. unsigned long val1, val2;
  1011. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  1012. if (sscanf(buf, "%lx %lx", &val1, &val2) != 2)
  1013. return -EINVAL;
  1014. /* lower address comparator cannot have a higher address value */
  1015. if (val1 > val2)
  1016. return -EINVAL;
  1017. spin_lock(&drvdata->spinlock);
  1018. idx = drvdata->addr_idx;
  1019. if (idx % 2 != 0) {
  1020. spin_unlock(&drvdata->spinlock);
  1021. return -EPERM;
  1022. }
  1023. if (!((drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE &&
  1024. drvdata->addr_type[idx + 1] == ETM_ADDR_TYPE_NONE) ||
  1025. (drvdata->addr_type[idx] == ETM_ADDR_TYPE_RANGE &&
  1026. drvdata->addr_type[idx + 1] == ETM_ADDR_TYPE_RANGE))) {
  1027. spin_unlock(&drvdata->spinlock);
  1028. return -EPERM;
  1029. }
  1030. drvdata->addr_val[idx] = (u64)val1;
  1031. drvdata->addr_type[idx] = ETM_ADDR_TYPE_RANGE;
  1032. drvdata->addr_val[idx + 1] = (u64)val2;
  1033. drvdata->addr_type[idx + 1] = ETM_ADDR_TYPE_RANGE;
  1034. /*
  1035. * Program include or exclude control bits for vinst or vdata
  1036. * whenever we change addr comparators to ETM_ADDR_TYPE_RANGE
  1037. */
  1038. if (drvdata->mode & ETM_MODE_EXCLUDE)
  1039. etm4_set_mode_exclude(drvdata, true);
  1040. else
  1041. etm4_set_mode_exclude(drvdata, false);
  1042. spin_unlock(&drvdata->spinlock);
  1043. return size;
  1044. }
  1045. static DEVICE_ATTR_RW(addr_range);
  1046. static ssize_t addr_start_show(struct device *dev,
  1047. struct device_attribute *attr,
  1048. char *buf)
  1049. {
  1050. u8 idx;
  1051. unsigned long val;
  1052. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  1053. spin_lock(&drvdata->spinlock);
  1054. idx = drvdata->addr_idx;
  1055. if (!(drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
  1056. drvdata->addr_type[idx] == ETM_ADDR_TYPE_START)) {
  1057. spin_unlock(&drvdata->spinlock);
  1058. return -EPERM;
  1059. }
  1060. val = (unsigned long)drvdata->addr_val[idx];
  1061. spin_unlock(&drvdata->spinlock);
  1062. return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
  1063. }
  1064. static ssize_t addr_start_store(struct device *dev,
  1065. struct device_attribute *attr,
  1066. const char *buf, size_t size)
  1067. {
  1068. u8 idx;
  1069. unsigned long val;
  1070. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  1071. if (kstrtoul(buf, 16, &val))
  1072. return -EINVAL;
  1073. spin_lock(&drvdata->spinlock);
  1074. idx = drvdata->addr_idx;
  1075. if (!drvdata->nr_addr_cmp) {
  1076. spin_unlock(&drvdata->spinlock);
  1077. return -EINVAL;
  1078. }
  1079. if (!(drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
  1080. drvdata->addr_type[idx] == ETM_ADDR_TYPE_START)) {
  1081. spin_unlock(&drvdata->spinlock);
  1082. return -EPERM;
  1083. }
  1084. drvdata->addr_val[idx] = (u64)val;
  1085. drvdata->addr_type[idx] = ETM_ADDR_TYPE_START;
  1086. drvdata->vissctlr |= BIT(idx);
  1087. /* SSSTATUS, bit[9] - turn on start/stop logic */
  1088. drvdata->vinst_ctrl |= BIT(9);
  1089. spin_unlock(&drvdata->spinlock);
  1090. return size;
  1091. }
  1092. static DEVICE_ATTR_RW(addr_start);
  1093. static ssize_t addr_stop_show(struct device *dev,
  1094. struct device_attribute *attr,
  1095. char *buf)
  1096. {
  1097. u8 idx;
  1098. unsigned long val;
  1099. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  1100. spin_lock(&drvdata->spinlock);
  1101. idx = drvdata->addr_idx;
  1102. if (!(drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
  1103. drvdata->addr_type[idx] == ETM_ADDR_TYPE_STOP)) {
  1104. spin_unlock(&drvdata->spinlock);
  1105. return -EPERM;
  1106. }
  1107. val = (unsigned long)drvdata->addr_val[idx];
  1108. spin_unlock(&drvdata->spinlock);
  1109. return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
  1110. }
  1111. static ssize_t addr_stop_store(struct device *dev,
  1112. struct device_attribute *attr,
  1113. const char *buf, size_t size)
  1114. {
  1115. u8 idx;
  1116. unsigned long val;
  1117. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  1118. if (kstrtoul(buf, 16, &val))
  1119. return -EINVAL;
  1120. spin_lock(&drvdata->spinlock);
  1121. idx = drvdata->addr_idx;
  1122. if (!drvdata->nr_addr_cmp) {
  1123. spin_unlock(&drvdata->spinlock);
  1124. return -EINVAL;
  1125. }
  1126. if (!(drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
  1127. drvdata->addr_type[idx] == ETM_ADDR_TYPE_STOP)) {
  1128. spin_unlock(&drvdata->spinlock);
  1129. return -EPERM;
  1130. }
  1131. drvdata->addr_val[idx] = (u64)val;
  1132. drvdata->addr_type[idx] = ETM_ADDR_TYPE_STOP;
  1133. drvdata->vissctlr |= BIT(idx + 16);
  1134. /* SSSTATUS, bit[9] - turn on start/stop logic */
  1135. drvdata->vinst_ctrl |= BIT(9);
  1136. spin_unlock(&drvdata->spinlock);
  1137. return size;
  1138. }
  1139. static DEVICE_ATTR_RW(addr_stop);
  1140. static ssize_t addr_ctxtype_show(struct device *dev,
  1141. struct device_attribute *attr,
  1142. char *buf)
  1143. {
  1144. ssize_t len;
  1145. u8 idx, val;
  1146. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  1147. spin_lock(&drvdata->spinlock);
  1148. idx = drvdata->addr_idx;
  1149. /* CONTEXTTYPE, bits[3:2] */
  1150. val = BMVAL(drvdata->addr_acc[idx], 2, 3);
  1151. len = scnprintf(buf, PAGE_SIZE, "%s\n", val == ETM_CTX_NONE ? "none" :
  1152. (val == ETM_CTX_CTXID ? "ctxid" :
  1153. (val == ETM_CTX_VMID ? "vmid" : "all")));
  1154. spin_unlock(&drvdata->spinlock);
  1155. return len;
  1156. }
  1157. static ssize_t addr_ctxtype_store(struct device *dev,
  1158. struct device_attribute *attr,
  1159. const char *buf, size_t size)
  1160. {
  1161. u8 idx;
  1162. char str[10] = "";
  1163. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  1164. if (strlen(buf) >= 10)
  1165. return -EINVAL;
  1166. if (sscanf(buf, "%s", str) != 1)
  1167. return -EINVAL;
  1168. spin_lock(&drvdata->spinlock);
  1169. idx = drvdata->addr_idx;
  1170. if (!strcmp(str, "none"))
  1171. /* start by clearing context type bits */
  1172. drvdata->addr_acc[idx] &= ~(BIT(2) | BIT(3));
  1173. else if (!strcmp(str, "ctxid")) {
  1174. /* 0b01 The trace unit performs a Context ID */
  1175. if (drvdata->numcidc) {
  1176. drvdata->addr_acc[idx] |= BIT(2);
  1177. drvdata->addr_acc[idx] &= ~BIT(3);
  1178. }
  1179. } else if (!strcmp(str, "vmid")) {
  1180. /* 0b10 The trace unit performs a VMID */
  1181. if (drvdata->numvmidc) {
  1182. drvdata->addr_acc[idx] &= ~BIT(2);
  1183. drvdata->addr_acc[idx] |= BIT(3);
  1184. }
  1185. } else if (!strcmp(str, "all")) {
  1186. /*
  1187. * 0b11 The trace unit performs a Context ID
  1188. * comparison and a VMID
  1189. */
  1190. if (drvdata->numcidc)
  1191. drvdata->addr_acc[idx] |= BIT(2);
  1192. if (drvdata->numvmidc)
  1193. drvdata->addr_acc[idx] |= BIT(3);
  1194. }
  1195. spin_unlock(&drvdata->spinlock);
  1196. return size;
  1197. }
  1198. static DEVICE_ATTR_RW(addr_ctxtype);
  1199. static ssize_t addr_context_show(struct device *dev,
  1200. struct device_attribute *attr,
  1201. char *buf)
  1202. {
  1203. u8 idx;
  1204. unsigned long val;
  1205. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  1206. spin_lock(&drvdata->spinlock);
  1207. idx = drvdata->addr_idx;
  1208. /* context ID comparator bits[6:4] */
  1209. val = BMVAL(drvdata->addr_acc[idx], 4, 6);
  1210. spin_unlock(&drvdata->spinlock);
  1211. return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
  1212. }
  1213. static ssize_t addr_context_store(struct device *dev,
  1214. struct device_attribute *attr,
  1215. const char *buf, size_t size)
  1216. {
  1217. u8 idx;
  1218. unsigned long val;
  1219. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  1220. if (kstrtoul(buf, 16, &val))
  1221. return -EINVAL;
  1222. if ((drvdata->numcidc <= 1) && (drvdata->numvmidc <= 1))
  1223. return -EINVAL;
  1224. if (val >= (drvdata->numcidc >= drvdata->numvmidc ?
  1225. drvdata->numcidc : drvdata->numvmidc))
  1226. return -EINVAL;
  1227. spin_lock(&drvdata->spinlock);
  1228. idx = drvdata->addr_idx;
  1229. /* clear context ID comparator bits[6:4] */
  1230. drvdata->addr_acc[idx] &= ~(BIT(4) | BIT(5) | BIT(6));
  1231. drvdata->addr_acc[idx] |= (val << 4);
  1232. spin_unlock(&drvdata->spinlock);
  1233. return size;
  1234. }
  1235. static DEVICE_ATTR_RW(addr_context);
  1236. static ssize_t seq_idx_show(struct device *dev,
  1237. struct device_attribute *attr,
  1238. char *buf)
  1239. {
  1240. unsigned long val;
  1241. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  1242. val = drvdata->seq_idx;
  1243. return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
  1244. }
  1245. static ssize_t seq_idx_store(struct device *dev,
  1246. struct device_attribute *attr,
  1247. const char *buf, size_t size)
  1248. {
  1249. unsigned long val;
  1250. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  1251. if (kstrtoul(buf, 16, &val))
  1252. return -EINVAL;
  1253. if (val >= drvdata->nrseqstate - 1)
  1254. return -EINVAL;
  1255. /*
  1256. * Use spinlock to ensure index doesn't change while it gets
  1257. * dereferenced multiple times within a spinlock block elsewhere.
  1258. */
  1259. spin_lock(&drvdata->spinlock);
  1260. drvdata->seq_idx = val;
  1261. spin_unlock(&drvdata->spinlock);
  1262. return size;
  1263. }
  1264. static DEVICE_ATTR_RW(seq_idx);
  1265. static ssize_t seq_state_show(struct device *dev,
  1266. struct device_attribute *attr,
  1267. char *buf)
  1268. {
  1269. unsigned long val;
  1270. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  1271. val = drvdata->seq_state;
  1272. return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
  1273. }
  1274. static ssize_t seq_state_store(struct device *dev,
  1275. struct device_attribute *attr,
  1276. const char *buf, size_t size)
  1277. {
  1278. unsigned long val;
  1279. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  1280. if (kstrtoul(buf, 16, &val))
  1281. return -EINVAL;
  1282. if (val >= drvdata->nrseqstate)
  1283. return -EINVAL;
  1284. drvdata->seq_state = val;
  1285. return size;
  1286. }
  1287. static DEVICE_ATTR_RW(seq_state);
  1288. static ssize_t seq_event_show(struct device *dev,
  1289. struct device_attribute *attr,
  1290. char *buf)
  1291. {
  1292. u8 idx;
  1293. unsigned long val;
  1294. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  1295. spin_lock(&drvdata->spinlock);
  1296. idx = drvdata->seq_idx;
  1297. val = drvdata->seq_ctrl[idx];
  1298. spin_unlock(&drvdata->spinlock);
  1299. return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
  1300. }
  1301. static ssize_t seq_event_store(struct device *dev,
  1302. struct device_attribute *attr,
  1303. const char *buf, size_t size)
  1304. {
  1305. u8 idx;
  1306. unsigned long val;
  1307. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  1308. if (kstrtoul(buf, 16, &val))
  1309. return -EINVAL;
  1310. spin_lock(&drvdata->spinlock);
  1311. idx = drvdata->seq_idx;
  1312. /* RST, bits[7:0] */
  1313. drvdata->seq_ctrl[idx] = val & 0xFF;
  1314. spin_unlock(&drvdata->spinlock);
  1315. return size;
  1316. }
  1317. static DEVICE_ATTR_RW(seq_event);
  1318. static ssize_t seq_reset_event_show(struct device *dev,
  1319. struct device_attribute *attr,
  1320. char *buf)
  1321. {
  1322. unsigned long val;
  1323. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  1324. val = drvdata->seq_rst;
  1325. return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
  1326. }
  1327. static ssize_t seq_reset_event_store(struct device *dev,
  1328. struct device_attribute *attr,
  1329. const char *buf, size_t size)
  1330. {
  1331. unsigned long val;
  1332. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  1333. if (kstrtoul(buf, 16, &val))
  1334. return -EINVAL;
  1335. if (!(drvdata->nrseqstate))
  1336. return -EINVAL;
  1337. drvdata->seq_rst = val & ETMv4_EVENT_MASK;
  1338. return size;
  1339. }
  1340. static DEVICE_ATTR_RW(seq_reset_event);
  1341. static ssize_t cntr_idx_show(struct device *dev,
  1342. struct device_attribute *attr,
  1343. char *buf)
  1344. {
  1345. unsigned long val;
  1346. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  1347. val = drvdata->cntr_idx;
  1348. return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
  1349. }
  1350. static ssize_t cntr_idx_store(struct device *dev,
  1351. struct device_attribute *attr,
  1352. const char *buf, size_t size)
  1353. {
  1354. unsigned long val;
  1355. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  1356. if (kstrtoul(buf, 16, &val))
  1357. return -EINVAL;
  1358. if (val >= drvdata->nr_cntr)
  1359. return -EINVAL;
  1360. /*
  1361. * Use spinlock to ensure index doesn't change while it gets
  1362. * dereferenced multiple times within a spinlock block elsewhere.
  1363. */
  1364. spin_lock(&drvdata->spinlock);
  1365. drvdata->cntr_idx = val;
  1366. spin_unlock(&drvdata->spinlock);
  1367. return size;
  1368. }
  1369. static DEVICE_ATTR_RW(cntr_idx);
  1370. static ssize_t cntrldvr_show(struct device *dev,
  1371. struct device_attribute *attr,
  1372. char *buf)
  1373. {
  1374. u8 idx;
  1375. unsigned long val;
  1376. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  1377. spin_lock(&drvdata->spinlock);
  1378. idx = drvdata->cntr_idx;
  1379. val = drvdata->cntrldvr[idx];
  1380. spin_unlock(&drvdata->spinlock);
  1381. return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
  1382. }
  1383. static ssize_t cntrldvr_store(struct device *dev,
  1384. struct device_attribute *attr,
  1385. const char *buf, size_t size)
  1386. {
  1387. u8 idx;
  1388. unsigned long val;
  1389. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  1390. if (kstrtoul(buf, 16, &val))
  1391. return -EINVAL;
  1392. if (val > ETM_CNTR_MAX_VAL)
  1393. return -EINVAL;
  1394. spin_lock(&drvdata->spinlock);
  1395. idx = drvdata->cntr_idx;
  1396. drvdata->cntrldvr[idx] = val;
  1397. spin_unlock(&drvdata->spinlock);
  1398. return size;
  1399. }
  1400. static DEVICE_ATTR_RW(cntrldvr);
  1401. static ssize_t cntr_val_show(struct device *dev,
  1402. struct device_attribute *attr,
  1403. char *buf)
  1404. {
  1405. u8 idx;
  1406. unsigned long val;
  1407. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  1408. spin_lock(&drvdata->spinlock);
  1409. idx = drvdata->cntr_idx;
  1410. val = drvdata->cntr_val[idx];
  1411. spin_unlock(&drvdata->spinlock);
  1412. return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
  1413. }
  1414. static ssize_t cntr_val_store(struct device *dev,
  1415. struct device_attribute *attr,
  1416. const char *buf, size_t size)
  1417. {
  1418. u8 idx;
  1419. unsigned long val;
  1420. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  1421. if (kstrtoul(buf, 16, &val))
  1422. return -EINVAL;
  1423. if (val > ETM_CNTR_MAX_VAL)
  1424. return -EINVAL;
  1425. spin_lock(&drvdata->spinlock);
  1426. idx = drvdata->cntr_idx;
  1427. drvdata->cntr_val[idx] = val;
  1428. spin_unlock(&drvdata->spinlock);
  1429. return size;
  1430. }
  1431. static DEVICE_ATTR_RW(cntr_val);
  1432. static ssize_t cntr_ctrl_show(struct device *dev,
  1433. struct device_attribute *attr,
  1434. char *buf)
  1435. {
  1436. u8 idx;
  1437. unsigned long val;
  1438. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  1439. spin_lock(&drvdata->spinlock);
  1440. idx = drvdata->cntr_idx;
  1441. val = drvdata->cntr_ctrl[idx];
  1442. spin_unlock(&drvdata->spinlock);
  1443. return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
  1444. }
  1445. static ssize_t cntr_ctrl_store(struct device *dev,
  1446. struct device_attribute *attr,
  1447. const char *buf, size_t size)
  1448. {
  1449. u8 idx;
  1450. unsigned long val;
  1451. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  1452. if (kstrtoul(buf, 16, &val))
  1453. return -EINVAL;
  1454. spin_lock(&drvdata->spinlock);
  1455. idx = drvdata->cntr_idx;
  1456. drvdata->cntr_ctrl[idx] = val;
  1457. spin_unlock(&drvdata->spinlock);
  1458. return size;
  1459. }
  1460. static DEVICE_ATTR_RW(cntr_ctrl);
  1461. static ssize_t res_idx_show(struct device *dev,
  1462. struct device_attribute *attr,
  1463. char *buf)
  1464. {
  1465. unsigned long val;
  1466. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  1467. val = drvdata->res_idx;
  1468. return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
  1469. }
  1470. static ssize_t res_idx_store(struct device *dev,
  1471. struct device_attribute *attr,
  1472. const char *buf, size_t size)
  1473. {
  1474. unsigned long val;
  1475. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  1476. if (kstrtoul(buf, 16, &val))
  1477. return -EINVAL;
  1478. /* Resource selector pair 0 is always implemented and reserved */
  1479. if (val < 2 || val >= drvdata->nr_resource * 2)
  1480. return -EINVAL;
  1481. /*
  1482. * Use spinlock to ensure index doesn't change while it gets
  1483. * dereferenced multiple times within a spinlock block elsewhere.
  1484. */
  1485. spin_lock(&drvdata->spinlock);
  1486. drvdata->res_idx = val;
  1487. spin_unlock(&drvdata->spinlock);
  1488. return size;
  1489. }
  1490. static DEVICE_ATTR_RW(res_idx);
  1491. static ssize_t res_ctrl_show(struct device *dev,
  1492. struct device_attribute *attr,
  1493. char *buf)
  1494. {
  1495. u8 idx;
  1496. unsigned long val;
  1497. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  1498. spin_lock(&drvdata->spinlock);
  1499. idx = drvdata->res_idx;
  1500. val = drvdata->res_ctrl[idx];
  1501. spin_unlock(&drvdata->spinlock);
  1502. return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
  1503. }
  1504. static ssize_t res_ctrl_store(struct device *dev,
  1505. struct device_attribute *attr,
  1506. const char *buf, size_t size)
  1507. {
  1508. u8 idx;
  1509. unsigned long val;
  1510. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  1511. if (kstrtoul(buf, 16, &val))
  1512. return -EINVAL;
  1513. spin_lock(&drvdata->spinlock);
  1514. idx = drvdata->res_idx;
  1515. /* For odd idx pair inversal bit is RES0 */
  1516. if (idx % 2 != 0)
  1517. /* PAIRINV, bit[21] */
  1518. val &= ~BIT(21);
  1519. drvdata->res_ctrl[idx] = val;
  1520. spin_unlock(&drvdata->spinlock);
  1521. return size;
  1522. }
  1523. static DEVICE_ATTR_RW(res_ctrl);
  1524. static ssize_t ctxid_idx_show(struct device *dev,
  1525. struct device_attribute *attr,
  1526. char *buf)
  1527. {
  1528. unsigned long val;
  1529. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  1530. val = drvdata->ctxid_idx;
  1531. return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
  1532. }
  1533. static ssize_t ctxid_idx_store(struct device *dev,
  1534. struct device_attribute *attr,
  1535. const char *buf, size_t size)
  1536. {
  1537. unsigned long val;
  1538. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  1539. if (kstrtoul(buf, 16, &val))
  1540. return -EINVAL;
  1541. if (val >= drvdata->numcidc)
  1542. return -EINVAL;
  1543. /*
  1544. * Use spinlock to ensure index doesn't change while it gets
  1545. * dereferenced multiple times within a spinlock block elsewhere.
  1546. */
  1547. spin_lock(&drvdata->spinlock);
  1548. drvdata->ctxid_idx = val;
  1549. spin_unlock(&drvdata->spinlock);
  1550. return size;
  1551. }
  1552. static DEVICE_ATTR_RW(ctxid_idx);
  1553. static ssize_t ctxid_pid_show(struct device *dev,
  1554. struct device_attribute *attr,
  1555. char *buf)
  1556. {
  1557. u8 idx;
  1558. unsigned long val;
  1559. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  1560. spin_lock(&drvdata->spinlock);
  1561. idx = drvdata->ctxid_idx;
  1562. val = (unsigned long)drvdata->ctxid_vpid[idx];
  1563. spin_unlock(&drvdata->spinlock);
  1564. return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
  1565. }
  1566. static ssize_t ctxid_pid_store(struct device *dev,
  1567. struct device_attribute *attr,
  1568. const char *buf, size_t size)
  1569. {
  1570. u8 idx;
  1571. unsigned long vpid, pid;
  1572. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  1573. /*
  1574. * only implemented when ctxid tracing is enabled, i.e. at least one
  1575. * ctxid comparator is implemented and ctxid is greater than 0 bits
  1576. * in length
  1577. */
  1578. if (!drvdata->ctxid_size || !drvdata->numcidc)
  1579. return -EINVAL;
  1580. if (kstrtoul(buf, 16, &vpid))
  1581. return -EINVAL;
  1582. pid = coresight_vpid_to_pid(vpid);
  1583. spin_lock(&drvdata->spinlock);
  1584. idx = drvdata->ctxid_idx;
  1585. drvdata->ctxid_pid[idx] = (u64)pid;
  1586. drvdata->ctxid_vpid[idx] = (u64)vpid;
  1587. spin_unlock(&drvdata->spinlock);
  1588. return size;
  1589. }
  1590. static DEVICE_ATTR_RW(ctxid_pid);
  1591. static ssize_t ctxid_masks_show(struct device *dev,
  1592. struct device_attribute *attr,
  1593. char *buf)
  1594. {
  1595. unsigned long val1, val2;
  1596. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  1597. spin_lock(&drvdata->spinlock);
  1598. val1 = drvdata->ctxid_mask0;
  1599. val2 = drvdata->ctxid_mask1;
  1600. spin_unlock(&drvdata->spinlock);
  1601. return scnprintf(buf, PAGE_SIZE, "%#lx %#lx\n", val1, val2);
  1602. }
  1603. static ssize_t ctxid_masks_store(struct device *dev,
  1604. struct device_attribute *attr,
  1605. const char *buf, size_t size)
  1606. {
  1607. u8 i, j, maskbyte;
  1608. unsigned long val1, val2, mask;
  1609. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  1610. /*
  1611. * only implemented when ctxid tracing is enabled, i.e. at least one
  1612. * ctxid comparator is implemented and ctxid is greater than 0 bits
  1613. * in length
  1614. */
  1615. if (!drvdata->ctxid_size || !drvdata->numcidc)
  1616. return -EINVAL;
  1617. if (sscanf(buf, "%lx %lx", &val1, &val2) != 2)
  1618. return -EINVAL;
  1619. spin_lock(&drvdata->spinlock);
  1620. /*
  1621. * each byte[0..3] controls mask value applied to ctxid
  1622. * comparator[0..3]
  1623. */
  1624. switch (drvdata->numcidc) {
  1625. case 0x1:
  1626. /* COMP0, bits[7:0] */
  1627. drvdata->ctxid_mask0 = val1 & 0xFF;
  1628. break;
  1629. case 0x2:
  1630. /* COMP1, bits[15:8] */
  1631. drvdata->ctxid_mask0 = val1 & 0xFFFF;
  1632. break;
  1633. case 0x3:
  1634. /* COMP2, bits[23:16] */
  1635. drvdata->ctxid_mask0 = val1 & 0xFFFFFF;
  1636. break;
  1637. case 0x4:
  1638. /* COMP3, bits[31:24] */
  1639. drvdata->ctxid_mask0 = val1;
  1640. break;
  1641. case 0x5:
  1642. /* COMP4, bits[7:0] */
  1643. drvdata->ctxid_mask0 = val1;
  1644. drvdata->ctxid_mask1 = val2 & 0xFF;
  1645. break;
  1646. case 0x6:
  1647. /* COMP5, bits[15:8] */
  1648. drvdata->ctxid_mask0 = val1;
  1649. drvdata->ctxid_mask1 = val2 & 0xFFFF;
  1650. break;
  1651. case 0x7:
  1652. /* COMP6, bits[23:16] */
  1653. drvdata->ctxid_mask0 = val1;
  1654. drvdata->ctxid_mask1 = val2 & 0xFFFFFF;
  1655. break;
  1656. case 0x8:
  1657. /* COMP7, bits[31:24] */
  1658. drvdata->ctxid_mask0 = val1;
  1659. drvdata->ctxid_mask1 = val2;
  1660. break;
  1661. default:
  1662. break;
  1663. }
  1664. /*
  1665. * If software sets a mask bit to 1, it must program relevant byte
  1666. * of ctxid comparator value 0x0, otherwise behavior is unpredictable.
  1667. * For example, if bit[3] of ctxid_mask0 is 1, we must clear bits[31:24]
  1668. * of ctxid comparator0 value (corresponding to byte 0) register.
  1669. */
  1670. mask = drvdata->ctxid_mask0;
  1671. for (i = 0; i < drvdata->numcidc; i++) {
  1672. /* mask value of corresponding ctxid comparator */
  1673. maskbyte = mask & ETMv4_EVENT_MASK;
  1674. /*
  1675. * each bit corresponds to a byte of respective ctxid comparator
  1676. * value register
  1677. */
  1678. for (j = 0; j < 8; j++) {
  1679. if (maskbyte & 1)
  1680. drvdata->ctxid_pid[i] &= ~(0xFF << (j * 8));
  1681. maskbyte >>= 1;
  1682. }
  1683. /* Select the next ctxid comparator mask value */
  1684. if (i == 3)
  1685. /* ctxid comparators[4-7] */
  1686. mask = drvdata->ctxid_mask1;
  1687. else
  1688. mask >>= 0x8;
  1689. }
  1690. spin_unlock(&drvdata->spinlock);
  1691. return size;
  1692. }
  1693. static DEVICE_ATTR_RW(ctxid_masks);
  1694. static ssize_t vmid_idx_show(struct device *dev,
  1695. struct device_attribute *attr,
  1696. char *buf)
  1697. {
  1698. unsigned long val;
  1699. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  1700. val = drvdata->vmid_idx;
  1701. return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
  1702. }
  1703. static ssize_t vmid_idx_store(struct device *dev,
  1704. struct device_attribute *attr,
  1705. const char *buf, size_t size)
  1706. {
  1707. unsigned long val;
  1708. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  1709. if (kstrtoul(buf, 16, &val))
  1710. return -EINVAL;
  1711. if (val >= drvdata->numvmidc)
  1712. return -EINVAL;
  1713. /*
  1714. * Use spinlock to ensure index doesn't change while it gets
  1715. * dereferenced multiple times within a spinlock block elsewhere.
  1716. */
  1717. spin_lock(&drvdata->spinlock);
  1718. drvdata->vmid_idx = val;
  1719. spin_unlock(&drvdata->spinlock);
  1720. return size;
  1721. }
  1722. static DEVICE_ATTR_RW(vmid_idx);
  1723. static ssize_t vmid_val_show(struct device *dev,
  1724. struct device_attribute *attr,
  1725. char *buf)
  1726. {
  1727. unsigned long val;
  1728. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  1729. val = (unsigned long)drvdata->vmid_val[drvdata->vmid_idx];
  1730. return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
  1731. }
  1732. static ssize_t vmid_val_store(struct device *dev,
  1733. struct device_attribute *attr,
  1734. const char *buf, size_t size)
  1735. {
  1736. unsigned long val;
  1737. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  1738. /*
  1739. * only implemented when vmid tracing is enabled, i.e. at least one
  1740. * vmid comparator is implemented and at least 8 bit vmid size
  1741. */
  1742. if (!drvdata->vmid_size || !drvdata->numvmidc)
  1743. return -EINVAL;
  1744. if (kstrtoul(buf, 16, &val))
  1745. return -EINVAL;
  1746. spin_lock(&drvdata->spinlock);
  1747. drvdata->vmid_val[drvdata->vmid_idx] = (u64)val;
  1748. spin_unlock(&drvdata->spinlock);
  1749. return size;
  1750. }
  1751. static DEVICE_ATTR_RW(vmid_val);
  1752. static ssize_t vmid_masks_show(struct device *dev,
  1753. struct device_attribute *attr, char *buf)
  1754. {
  1755. unsigned long val1, val2;
  1756. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  1757. spin_lock(&drvdata->spinlock);
  1758. val1 = drvdata->vmid_mask0;
  1759. val2 = drvdata->vmid_mask1;
  1760. spin_unlock(&drvdata->spinlock);
  1761. return scnprintf(buf, PAGE_SIZE, "%#lx %#lx\n", val1, val2);
  1762. }
  1763. static ssize_t vmid_masks_store(struct device *dev,
  1764. struct device_attribute *attr,
  1765. const char *buf, size_t size)
  1766. {
  1767. u8 i, j, maskbyte;
  1768. unsigned long val1, val2, mask;
  1769. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  1770. /*
  1771. * only implemented when vmid tracing is enabled, i.e. at least one
  1772. * vmid comparator is implemented and at least 8 bit vmid size
  1773. */
  1774. if (!drvdata->vmid_size || !drvdata->numvmidc)
  1775. return -EINVAL;
  1776. if (sscanf(buf, "%lx %lx", &val1, &val2) != 2)
  1777. return -EINVAL;
  1778. spin_lock(&drvdata->spinlock);
  1779. /*
  1780. * each byte[0..3] controls mask value applied to vmid
  1781. * comparator[0..3]
  1782. */
  1783. switch (drvdata->numvmidc) {
  1784. case 0x1:
  1785. /* COMP0, bits[7:0] */
  1786. drvdata->vmid_mask0 = val1 & 0xFF;
  1787. break;
  1788. case 0x2:
  1789. /* COMP1, bits[15:8] */
  1790. drvdata->vmid_mask0 = val1 & 0xFFFF;
  1791. break;
  1792. case 0x3:
  1793. /* COMP2, bits[23:16] */
  1794. drvdata->vmid_mask0 = val1 & 0xFFFFFF;
  1795. break;
  1796. case 0x4:
  1797. /* COMP3, bits[31:24] */
  1798. drvdata->vmid_mask0 = val1;
  1799. break;
  1800. case 0x5:
  1801. /* COMP4, bits[7:0] */
  1802. drvdata->vmid_mask0 = val1;
  1803. drvdata->vmid_mask1 = val2 & 0xFF;
  1804. break;
  1805. case 0x6:
  1806. /* COMP5, bits[15:8] */
  1807. drvdata->vmid_mask0 = val1;
  1808. drvdata->vmid_mask1 = val2 & 0xFFFF;
  1809. break;
  1810. case 0x7:
  1811. /* COMP6, bits[23:16] */
  1812. drvdata->vmid_mask0 = val1;
  1813. drvdata->vmid_mask1 = val2 & 0xFFFFFF;
  1814. break;
  1815. case 0x8:
  1816. /* COMP7, bits[31:24] */
  1817. drvdata->vmid_mask0 = val1;
  1818. drvdata->vmid_mask1 = val2;
  1819. break;
  1820. default:
  1821. break;
  1822. }
  1823. /*
  1824. * If software sets a mask bit to 1, it must program relevant byte
  1825. * of vmid comparator value 0x0, otherwise behavior is unpredictable.
  1826. * For example, if bit[3] of vmid_mask0 is 1, we must clear bits[31:24]
  1827. * of vmid comparator0 value (corresponding to byte 0) register.
  1828. */
  1829. mask = drvdata->vmid_mask0;
  1830. for (i = 0; i < drvdata->numvmidc; i++) {
  1831. /* mask value of corresponding vmid comparator */
  1832. maskbyte = mask & ETMv4_EVENT_MASK;
  1833. /*
  1834. * each bit corresponds to a byte of respective vmid comparator
  1835. * value register
  1836. */
  1837. for (j = 0; j < 8; j++) {
  1838. if (maskbyte & 1)
  1839. drvdata->vmid_val[i] &= ~(0xFF << (j * 8));
  1840. maskbyte >>= 1;
  1841. }
  1842. /* Select the next vmid comparator mask value */
  1843. if (i == 3)
  1844. /* vmid comparators[4-7] */
  1845. mask = drvdata->vmid_mask1;
  1846. else
  1847. mask >>= 0x8;
  1848. }
  1849. spin_unlock(&drvdata->spinlock);
  1850. return size;
  1851. }
  1852. static DEVICE_ATTR_RW(vmid_masks);
  1853. static ssize_t cpu_show(struct device *dev,
  1854. struct device_attribute *attr, char *buf)
  1855. {
  1856. int val;
  1857. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  1858. val = drvdata->cpu;
  1859. return scnprintf(buf, PAGE_SIZE, "%d\n", val);
  1860. }
  1861. static DEVICE_ATTR_RO(cpu);
  1862. static struct attribute *coresight_etmv4_attrs[] = {
  1863. &dev_attr_nr_pe_cmp.attr,
  1864. &dev_attr_nr_addr_cmp.attr,
  1865. &dev_attr_nr_cntr.attr,
  1866. &dev_attr_nr_ext_inp.attr,
  1867. &dev_attr_numcidc.attr,
  1868. &dev_attr_numvmidc.attr,
  1869. &dev_attr_nrseqstate.attr,
  1870. &dev_attr_nr_resource.attr,
  1871. &dev_attr_nr_ss_cmp.attr,
  1872. &dev_attr_reset.attr,
  1873. &dev_attr_mode.attr,
  1874. &dev_attr_pe.attr,
  1875. &dev_attr_event.attr,
  1876. &dev_attr_event_instren.attr,
  1877. &dev_attr_event_ts.attr,
  1878. &dev_attr_syncfreq.attr,
  1879. &dev_attr_cyc_threshold.attr,
  1880. &dev_attr_bb_ctrl.attr,
  1881. &dev_attr_event_vinst.attr,
  1882. &dev_attr_s_exlevel_vinst.attr,
  1883. &dev_attr_ns_exlevel_vinst.attr,
  1884. &dev_attr_addr_idx.attr,
  1885. &dev_attr_addr_instdatatype.attr,
  1886. &dev_attr_addr_single.attr,
  1887. &dev_attr_addr_range.attr,
  1888. &dev_attr_addr_start.attr,
  1889. &dev_attr_addr_stop.attr,
  1890. &dev_attr_addr_ctxtype.attr,
  1891. &dev_attr_addr_context.attr,
  1892. &dev_attr_seq_idx.attr,
  1893. &dev_attr_seq_state.attr,
  1894. &dev_attr_seq_event.attr,
  1895. &dev_attr_seq_reset_event.attr,
  1896. &dev_attr_cntr_idx.attr,
  1897. &dev_attr_cntrldvr.attr,
  1898. &dev_attr_cntr_val.attr,
  1899. &dev_attr_cntr_ctrl.attr,
  1900. &dev_attr_res_idx.attr,
  1901. &dev_attr_res_ctrl.attr,
  1902. &dev_attr_ctxid_idx.attr,
  1903. &dev_attr_ctxid_pid.attr,
  1904. &dev_attr_ctxid_masks.attr,
  1905. &dev_attr_vmid_idx.attr,
  1906. &dev_attr_vmid_val.attr,
  1907. &dev_attr_vmid_masks.attr,
  1908. &dev_attr_cpu.attr,
  1909. NULL,
  1910. };
  1911. #define coresight_simple_func(name, offset) \
  1912. static ssize_t name##_show(struct device *_dev, \
  1913. struct device_attribute *attr, char *buf) \
  1914. { \
  1915. struct etmv4_drvdata *drvdata = dev_get_drvdata(_dev->parent); \
  1916. return scnprintf(buf, PAGE_SIZE, "0x%x\n", \
  1917. readl_relaxed(drvdata->base + offset)); \
  1918. } \
  1919. static DEVICE_ATTR_RO(name)
  1920. coresight_simple_func(trcoslsr, TRCOSLSR);
  1921. coresight_simple_func(trcpdcr, TRCPDCR);
  1922. coresight_simple_func(trcpdsr, TRCPDSR);
  1923. coresight_simple_func(trclsr, TRCLSR);
  1924. coresight_simple_func(trcauthstatus, TRCAUTHSTATUS);
  1925. coresight_simple_func(trcdevid, TRCDEVID);
  1926. coresight_simple_func(trcdevtype, TRCDEVTYPE);
  1927. coresight_simple_func(trcpidr0, TRCPIDR0);
  1928. coresight_simple_func(trcpidr1, TRCPIDR1);
  1929. coresight_simple_func(trcpidr2, TRCPIDR2);
  1930. coresight_simple_func(trcpidr3, TRCPIDR3);
  1931. static struct attribute *coresight_etmv4_mgmt_attrs[] = {
  1932. &dev_attr_trcoslsr.attr,
  1933. &dev_attr_trcpdcr.attr,
  1934. &dev_attr_trcpdsr.attr,
  1935. &dev_attr_trclsr.attr,
  1936. &dev_attr_trcauthstatus.attr,
  1937. &dev_attr_trcdevid.attr,
  1938. &dev_attr_trcdevtype.attr,
  1939. &dev_attr_trcpidr0.attr,
  1940. &dev_attr_trcpidr1.attr,
  1941. &dev_attr_trcpidr2.attr,
  1942. &dev_attr_trcpidr3.attr,
  1943. NULL,
  1944. };
  1945. coresight_simple_func(trcidr0, TRCIDR0);
  1946. coresight_simple_func(trcidr1, TRCIDR1);
  1947. coresight_simple_func(trcidr2, TRCIDR2);
  1948. coresight_simple_func(trcidr3, TRCIDR3);
  1949. coresight_simple_func(trcidr4, TRCIDR4);
  1950. coresight_simple_func(trcidr5, TRCIDR5);
  1951. /* trcidr[6,7] are reserved */
  1952. coresight_simple_func(trcidr8, TRCIDR8);
  1953. coresight_simple_func(trcidr9, TRCIDR9);
  1954. coresight_simple_func(trcidr10, TRCIDR10);
  1955. coresight_simple_func(trcidr11, TRCIDR11);
  1956. coresight_simple_func(trcidr12, TRCIDR12);
  1957. coresight_simple_func(trcidr13, TRCIDR13);
  1958. static struct attribute *coresight_etmv4_trcidr_attrs[] = {
  1959. &dev_attr_trcidr0.attr,
  1960. &dev_attr_trcidr1.attr,
  1961. &dev_attr_trcidr2.attr,
  1962. &dev_attr_trcidr3.attr,
  1963. &dev_attr_trcidr4.attr,
  1964. &dev_attr_trcidr5.attr,
  1965. /* trcidr[6,7] are reserved */
  1966. &dev_attr_trcidr8.attr,
  1967. &dev_attr_trcidr9.attr,
  1968. &dev_attr_trcidr10.attr,
  1969. &dev_attr_trcidr11.attr,
  1970. &dev_attr_trcidr12.attr,
  1971. &dev_attr_trcidr13.attr,
  1972. NULL,
  1973. };
  1974. static const struct attribute_group coresight_etmv4_group = {
  1975. .attrs = coresight_etmv4_attrs,
  1976. };
  1977. static const struct attribute_group coresight_etmv4_mgmt_group = {
  1978. .attrs = coresight_etmv4_mgmt_attrs,
  1979. .name = "mgmt",
  1980. };
  1981. static const struct attribute_group coresight_etmv4_trcidr_group = {
  1982. .attrs = coresight_etmv4_trcidr_attrs,
  1983. .name = "trcidr",
  1984. };
  1985. static const struct attribute_group *coresight_etmv4_groups[] = {
  1986. &coresight_etmv4_group,
  1987. &coresight_etmv4_mgmt_group,
  1988. &coresight_etmv4_trcidr_group,
  1989. NULL,
  1990. };
  1991. static void etm4_init_arch_data(void *info)
  1992. {
  1993. u32 etmidr0;
  1994. u32 etmidr1;
  1995. u32 etmidr2;
  1996. u32 etmidr3;
  1997. u32 etmidr4;
  1998. u32 etmidr5;
  1999. struct etmv4_drvdata *drvdata = info;
  2000. CS_UNLOCK(drvdata->base);
  2001. /* find all capabilities of the tracing unit */
  2002. etmidr0 = readl_relaxed(drvdata->base + TRCIDR0);
  2003. /* INSTP0, bits[2:1] P0 tracing support field */
  2004. if (BMVAL(etmidr0, 1, 1) && BMVAL(etmidr0, 2, 2))
  2005. drvdata->instrp0 = true;
  2006. else
  2007. drvdata->instrp0 = false;
  2008. /* TRCBB, bit[5] Branch broadcast tracing support bit */
  2009. if (BMVAL(etmidr0, 5, 5))
  2010. drvdata->trcbb = true;
  2011. else
  2012. drvdata->trcbb = false;
  2013. /* TRCCOND, bit[6] Conditional instruction tracing support bit */
  2014. if (BMVAL(etmidr0, 6, 6))
  2015. drvdata->trccond = true;
  2016. else
  2017. drvdata->trccond = false;
  2018. /* TRCCCI, bit[7] Cycle counting instruction bit */
  2019. if (BMVAL(etmidr0, 7, 7))
  2020. drvdata->trccci = true;
  2021. else
  2022. drvdata->trccci = false;
  2023. /* RETSTACK, bit[9] Return stack bit */
  2024. if (BMVAL(etmidr0, 9, 9))
  2025. drvdata->retstack = true;
  2026. else
  2027. drvdata->retstack = false;
  2028. /* NUMEVENT, bits[11:10] Number of events field */
  2029. drvdata->nr_event = BMVAL(etmidr0, 10, 11);
  2030. /* QSUPP, bits[16:15] Q element support field */
  2031. drvdata->q_support = BMVAL(etmidr0, 15, 16);
  2032. /* TSSIZE, bits[28:24] Global timestamp size field */
  2033. drvdata->ts_size = BMVAL(etmidr0, 24, 28);
  2034. /* base architecture of trace unit */
  2035. etmidr1 = readl_relaxed(drvdata->base + TRCIDR1);
  2036. /*
  2037. * TRCARCHMIN, bits[7:4] architecture the minor version number
  2038. * TRCARCHMAJ, bits[11:8] architecture major versin number
  2039. */
  2040. drvdata->arch = BMVAL(etmidr1, 4, 11);
  2041. /* maximum size of resources */
  2042. etmidr2 = readl_relaxed(drvdata->base + TRCIDR2);
  2043. /* CIDSIZE, bits[9:5] Indicates the Context ID size */
  2044. drvdata->ctxid_size = BMVAL(etmidr2, 5, 9);
  2045. /* VMIDSIZE, bits[14:10] Indicates the VMID size */
  2046. drvdata->vmid_size = BMVAL(etmidr2, 10, 14);
  2047. /* CCSIZE, bits[28:25] size of the cycle counter in bits minus 12 */
  2048. drvdata->ccsize = BMVAL(etmidr2, 25, 28);
  2049. etmidr3 = readl_relaxed(drvdata->base + TRCIDR3);
  2050. /* CCITMIN, bits[11:0] minimum threshold value that can be programmed */
  2051. drvdata->ccitmin = BMVAL(etmidr3, 0, 11);
  2052. /* EXLEVEL_S, bits[19:16] Secure state instruction tracing */
  2053. drvdata->s_ex_level = BMVAL(etmidr3, 16, 19);
  2054. /* EXLEVEL_NS, bits[23:20] Non-secure state instruction tracing */
  2055. drvdata->ns_ex_level = BMVAL(etmidr3, 20, 23);
  2056. /*
  2057. * TRCERR, bit[24] whether a trace unit can trace a
  2058. * system error exception.
  2059. */
  2060. if (BMVAL(etmidr3, 24, 24))
  2061. drvdata->trc_error = true;
  2062. else
  2063. drvdata->trc_error = false;
  2064. /* SYNCPR, bit[25] implementation has a fixed synchronization period? */
  2065. if (BMVAL(etmidr3, 25, 25))
  2066. drvdata->syncpr = true;
  2067. else
  2068. drvdata->syncpr = false;
  2069. /* STALLCTL, bit[26] is stall control implemented? */
  2070. if (BMVAL(etmidr3, 26, 26))
  2071. drvdata->stallctl = true;
  2072. else
  2073. drvdata->stallctl = false;
  2074. /* SYSSTALL, bit[27] implementation can support stall control? */
  2075. if (BMVAL(etmidr3, 27, 27))
  2076. drvdata->sysstall = true;
  2077. else
  2078. drvdata->sysstall = false;
  2079. /* NUMPROC, bits[30:28] the number of PEs available for tracing */
  2080. drvdata->nr_pe = BMVAL(etmidr3, 28, 30);
  2081. /* NOOVERFLOW, bit[31] is trace overflow prevention supported */
  2082. if (BMVAL(etmidr3, 31, 31))
  2083. drvdata->nooverflow = true;
  2084. else
  2085. drvdata->nooverflow = false;
  2086. /* number of resources trace unit supports */
  2087. etmidr4 = readl_relaxed(drvdata->base + TRCIDR4);
  2088. /* NUMACPAIRS, bits[0:3] number of addr comparator pairs for tracing */
  2089. drvdata->nr_addr_cmp = BMVAL(etmidr4, 0, 3);
  2090. /* NUMPC, bits[15:12] number of PE comparator inputs for tracing */
  2091. drvdata->nr_pe_cmp = BMVAL(etmidr4, 12, 15);
  2092. /*
  2093. * NUMRSPAIR, bits[19:16]
  2094. * The number of resource pairs conveyed by the HW starts at 0, i.e a
  2095. * value of 0x0 indicate 1 resource pair, 0x1 indicate two and so on.
  2096. * As such add 1 to the value of NUMRSPAIR for a better representation.
  2097. */
  2098. drvdata->nr_resource = BMVAL(etmidr4, 16, 19) + 1;
  2099. /*
  2100. * NUMSSCC, bits[23:20] the number of single-shot
  2101. * comparator control for tracing
  2102. */
  2103. drvdata->nr_ss_cmp = BMVAL(etmidr4, 20, 23);
  2104. /* NUMCIDC, bits[27:24] number of Context ID comparators for tracing */
  2105. drvdata->numcidc = BMVAL(etmidr4, 24, 27);
  2106. /* NUMVMIDC, bits[31:28] number of VMID comparators for tracing */
  2107. drvdata->numvmidc = BMVAL(etmidr4, 28, 31);
  2108. etmidr5 = readl_relaxed(drvdata->base + TRCIDR5);
  2109. /* NUMEXTIN, bits[8:0] number of external inputs implemented */
  2110. drvdata->nr_ext_inp = BMVAL(etmidr5, 0, 8);
  2111. /* TRACEIDSIZE, bits[21:16] indicates the trace ID width */
  2112. drvdata->trcid_size = BMVAL(etmidr5, 16, 21);
  2113. /* ATBTRIG, bit[22] implementation can support ATB triggers? */
  2114. if (BMVAL(etmidr5, 22, 22))
  2115. drvdata->atbtrig = true;
  2116. else
  2117. drvdata->atbtrig = false;
  2118. /*
  2119. * LPOVERRIDE, bit[23] implementation supports
  2120. * low-power state override
  2121. */
  2122. if (BMVAL(etmidr5, 23, 23))
  2123. drvdata->lpoverride = true;
  2124. else
  2125. drvdata->lpoverride = false;
  2126. /* NUMSEQSTATE, bits[27:25] number of sequencer states implemented */
  2127. drvdata->nrseqstate = BMVAL(etmidr5, 25, 27);
  2128. /* NUMCNTR, bits[30:28] number of counters available for tracing */
  2129. drvdata->nr_cntr = BMVAL(etmidr5, 28, 30);
  2130. CS_LOCK(drvdata->base);
  2131. }
  2132. static void etm4_init_default_data(struct etmv4_drvdata *drvdata)
  2133. {
  2134. int i;
  2135. drvdata->pe_sel = 0x0;
  2136. drvdata->cfg = (ETMv4_MODE_CTXID | ETM_MODE_VMID |
  2137. ETMv4_MODE_TIMESTAMP | ETM_MODE_RETURNSTACK);
  2138. /* disable all events tracing */
  2139. drvdata->eventctrl0 = 0x0;
  2140. drvdata->eventctrl1 = 0x0;
  2141. /* disable stalling */
  2142. drvdata->stall_ctrl = 0x0;
  2143. /* disable timestamp event */
  2144. drvdata->ts_ctrl = 0x0;
  2145. /* enable trace synchronization every 4096 bytes for trace */
  2146. if (drvdata->syncpr == false)
  2147. drvdata->syncfreq = 0xC;
  2148. /*
  2149. * enable viewInst to trace everything with start-stop logic in
  2150. * started state
  2151. */
  2152. drvdata->vinst_ctrl |= BIT(0);
  2153. /* set initial state of start-stop logic */
  2154. if (drvdata->nr_addr_cmp)
  2155. drvdata->vinst_ctrl |= BIT(9);
  2156. /* no address range filtering for ViewInst */
  2157. drvdata->viiectlr = 0x0;
  2158. /* no start-stop filtering for ViewInst */
  2159. drvdata->vissctlr = 0x0;
  2160. /* disable seq events */
  2161. for (i = 0; i < drvdata->nrseqstate-1; i++)
  2162. drvdata->seq_ctrl[i] = 0x0;
  2163. drvdata->seq_rst = 0x0;
  2164. drvdata->seq_state = 0x0;
  2165. /* disable external input events */
  2166. drvdata->ext_inp = 0x0;
  2167. for (i = 0; i < drvdata->nr_cntr; i++) {
  2168. drvdata->cntrldvr[i] = 0x0;
  2169. drvdata->cntr_ctrl[i] = 0x0;
  2170. drvdata->cntr_val[i] = 0x0;
  2171. }
  2172. /* Resource selector pair 0 is always implemented and reserved */
  2173. drvdata->res_idx = 0x2;
  2174. for (i = 2; i < drvdata->nr_resource * 2; i++)
  2175. drvdata->res_ctrl[i] = 0x0;
  2176. for (i = 0; i < drvdata->nr_ss_cmp; i++) {
  2177. drvdata->ss_ctrl[i] = 0x0;
  2178. drvdata->ss_pe_cmp[i] = 0x0;
  2179. }
  2180. if (drvdata->nr_addr_cmp >= 1) {
  2181. drvdata->addr_val[0] = (unsigned long)_stext;
  2182. drvdata->addr_val[1] = (unsigned long)_etext;
  2183. drvdata->addr_type[0] = ETM_ADDR_TYPE_RANGE;
  2184. drvdata->addr_type[1] = ETM_ADDR_TYPE_RANGE;
  2185. }
  2186. for (i = 0; i < drvdata->numcidc; i++) {
  2187. drvdata->ctxid_pid[i] = 0x0;
  2188. drvdata->ctxid_vpid[i] = 0x0;
  2189. }
  2190. drvdata->ctxid_mask0 = 0x0;
  2191. drvdata->ctxid_mask1 = 0x0;
  2192. for (i = 0; i < drvdata->numvmidc; i++)
  2193. drvdata->vmid_val[i] = 0x0;
  2194. drvdata->vmid_mask0 = 0x0;
  2195. drvdata->vmid_mask1 = 0x0;
  2196. /*
  2197. * A trace ID value of 0 is invalid, so let's start at some
  2198. * random value that fits in 7 bits. ETMv3.x has 0x10 so let's
  2199. * start at 0x20.
  2200. */
  2201. drvdata->trcid = 0x20 + drvdata->cpu;
  2202. }
  2203. static int etm4_cpu_callback(struct notifier_block *nfb, unsigned long action,
  2204. void *hcpu)
  2205. {
  2206. unsigned int cpu = (unsigned long)hcpu;
  2207. if (!etmdrvdata[cpu])
  2208. goto out;
  2209. switch (action & (~CPU_TASKS_FROZEN)) {
  2210. case CPU_STARTING:
  2211. spin_lock(&etmdrvdata[cpu]->spinlock);
  2212. if (!etmdrvdata[cpu]->os_unlock) {
  2213. etm4_os_unlock(etmdrvdata[cpu]);
  2214. etmdrvdata[cpu]->os_unlock = true;
  2215. }
  2216. if (etmdrvdata[cpu]->enable)
  2217. etm4_enable_hw(etmdrvdata[cpu]);
  2218. spin_unlock(&etmdrvdata[cpu]->spinlock);
  2219. break;
  2220. case CPU_ONLINE:
  2221. if (etmdrvdata[cpu]->boot_enable &&
  2222. !etmdrvdata[cpu]->sticky_enable)
  2223. coresight_enable(etmdrvdata[cpu]->csdev);
  2224. break;
  2225. case CPU_DYING:
  2226. spin_lock(&etmdrvdata[cpu]->spinlock);
  2227. if (etmdrvdata[cpu]->enable)
  2228. etm4_disable_hw(etmdrvdata[cpu]);
  2229. spin_unlock(&etmdrvdata[cpu]->spinlock);
  2230. break;
  2231. }
  2232. out:
  2233. return NOTIFY_OK;
  2234. }
  2235. static struct notifier_block etm4_cpu_notifier = {
  2236. .notifier_call = etm4_cpu_callback,
  2237. };
  2238. static int etm4_probe(struct amba_device *adev, const struct amba_id *id)
  2239. {
  2240. int ret;
  2241. void __iomem *base;
  2242. struct device *dev = &adev->dev;
  2243. struct coresight_platform_data *pdata = NULL;
  2244. struct etmv4_drvdata *drvdata;
  2245. struct resource *res = &adev->res;
  2246. struct coresight_desc *desc;
  2247. struct device_node *np = adev->dev.of_node;
  2248. desc = devm_kzalloc(dev, sizeof(*desc), GFP_KERNEL);
  2249. if (!desc)
  2250. return -ENOMEM;
  2251. drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
  2252. if (!drvdata)
  2253. return -ENOMEM;
  2254. if (np) {
  2255. pdata = of_get_coresight_platform_data(dev, np);
  2256. if (IS_ERR(pdata))
  2257. return PTR_ERR(pdata);
  2258. adev->dev.platform_data = pdata;
  2259. }
  2260. drvdata->dev = &adev->dev;
  2261. dev_set_drvdata(dev, drvdata);
  2262. /* Validity for the resource is already checked by the AMBA core */
  2263. base = devm_ioremap_resource(dev, res);
  2264. if (IS_ERR(base))
  2265. return PTR_ERR(base);
  2266. drvdata->base = base;
  2267. spin_lock_init(&drvdata->spinlock);
  2268. drvdata->cpu = pdata ? pdata->cpu : 0;
  2269. get_online_cpus();
  2270. etmdrvdata[drvdata->cpu] = drvdata;
  2271. if (!smp_call_function_single(drvdata->cpu, etm4_os_unlock, drvdata, 1))
  2272. drvdata->os_unlock = true;
  2273. if (smp_call_function_single(drvdata->cpu,
  2274. etm4_init_arch_data, drvdata, 1))
  2275. dev_err(dev, "ETM arch init failed\n");
  2276. if (!etm4_count++)
  2277. register_hotcpu_notifier(&etm4_cpu_notifier);
  2278. put_online_cpus();
  2279. if (etm4_arch_supported(drvdata->arch) == false) {
  2280. ret = -EINVAL;
  2281. goto err_arch_supported;
  2282. }
  2283. etm4_init_default_data(drvdata);
  2284. pm_runtime_put(&adev->dev);
  2285. desc->type = CORESIGHT_DEV_TYPE_SOURCE;
  2286. desc->subtype.source_subtype = CORESIGHT_DEV_SUBTYPE_SOURCE_PROC;
  2287. desc->ops = &etm4_cs_ops;
  2288. desc->pdata = pdata;
  2289. desc->dev = dev;
  2290. desc->groups = coresight_etmv4_groups;
  2291. drvdata->csdev = coresight_register(desc);
  2292. if (IS_ERR(drvdata->csdev)) {
  2293. ret = PTR_ERR(drvdata->csdev);
  2294. goto err_coresight_register;
  2295. }
  2296. dev_info(dev, "%s initialized\n", (char *)id->data);
  2297. if (boot_enable) {
  2298. coresight_enable(drvdata->csdev);
  2299. drvdata->boot_enable = true;
  2300. }
  2301. return 0;
  2302. err_arch_supported:
  2303. pm_runtime_put(&adev->dev);
  2304. err_coresight_register:
  2305. if (--etm4_count == 0)
  2306. unregister_hotcpu_notifier(&etm4_cpu_notifier);
  2307. return ret;
  2308. }
  2309. static struct amba_id etm4_ids[] = {
  2310. { /* ETM 4.0 - Qualcomm */
  2311. .id = 0x0003b95d,
  2312. .mask = 0x0003ffff,
  2313. .data = "ETM 4.0",
  2314. },
  2315. { /* ETM 4.0 - Juno board */
  2316. .id = 0x000bb95e,
  2317. .mask = 0x000fffff,
  2318. .data = "ETM 4.0",
  2319. },
  2320. { 0, 0},
  2321. };
  2322. static struct amba_driver etm4x_driver = {
  2323. .drv = {
  2324. .name = "coresight-etm4x",
  2325. .suppress_bind_attrs = true,
  2326. },
  2327. .probe = etm4_probe,
  2328. .id_table = etm4_ids,
  2329. };
  2330. builtin_amba_driver(etm4x_driver);