coresight-etm4x.c 71 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721
  1. /* Copyright (c) 2014, The Linux Foundation. All rights reserved.
  2. *
  3. * This program is free software; you can redistribute it and/or modify
  4. * it under the terms of the GNU General Public License version 2 and
  5. * only version 2 as published by the Free Software Foundation.
  6. *
  7. * This program is distributed in the hope that it will be useful,
  8. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  9. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  10. * GNU General Public License for more details.
  11. */
  12. #include <linux/kernel.h>
  13. #include <linux/moduleparam.h>
  14. #include <linux/init.h>
  15. #include <linux/types.h>
  16. #include <linux/device.h>
  17. #include <linux/module.h>
  18. #include <linux/io.h>
  19. #include <linux/err.h>
  20. #include <linux/fs.h>
  21. #include <linux/slab.h>
  22. #include <linux/delay.h>
  23. #include <linux/smp.h>
  24. #include <linux/sysfs.h>
  25. #include <linux/stat.h>
  26. #include <linux/clk.h>
  27. #include <linux/cpu.h>
  28. #include <linux/coresight.h>
  29. #include <linux/pm_wakeup.h>
  30. #include <linux/amba/bus.h>
  31. #include <linux/seq_file.h>
  32. #include <linux/uaccess.h>
  33. #include <linux/pm_runtime.h>
  34. #include <asm/sections.h>
  35. #include "coresight-etm4x.h"
  36. static int boot_enable;
  37. module_param_named(boot_enable, boot_enable, int, S_IRUGO);
  38. /* The number of ETMv4 currently registered */
  39. static int etm4_count;
  40. static struct etmv4_drvdata *etmdrvdata[NR_CPUS];
  41. static void etm4_os_unlock(void *info)
  42. {
  43. struct etmv4_drvdata *drvdata = (struct etmv4_drvdata *)info;
  44. /* Writing any value to ETMOSLAR unlocks the trace registers */
  45. writel_relaxed(0x0, drvdata->base + TRCOSLAR);
  46. isb();
  47. }
  48. static bool etm4_arch_supported(u8 arch)
  49. {
  50. switch (arch) {
  51. case ETM_ARCH_V4:
  52. break;
  53. default:
  54. return false;
  55. }
  56. return true;
  57. }
  58. static int etm4_trace_id(struct coresight_device *csdev)
  59. {
  60. struct etmv4_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
  61. unsigned long flags;
  62. int trace_id = -1;
  63. if (!drvdata->enable)
  64. return drvdata->trcid;
  65. pm_runtime_get_sync(drvdata->dev);
  66. spin_lock_irqsave(&drvdata->spinlock, flags);
  67. CS_UNLOCK(drvdata->base);
  68. trace_id = readl_relaxed(drvdata->base + TRCTRACEIDR);
  69. trace_id &= ETM_TRACEID_MASK;
  70. CS_LOCK(drvdata->base);
  71. spin_unlock_irqrestore(&drvdata->spinlock, flags);
  72. pm_runtime_put(drvdata->dev);
  73. return trace_id;
  74. }
  75. static void etm4_enable_hw(void *info)
  76. {
  77. int i;
  78. struct etmv4_drvdata *drvdata = info;
  79. CS_UNLOCK(drvdata->base);
  80. etm4_os_unlock(drvdata);
  81. /* Disable the trace unit before programming trace registers */
  82. writel_relaxed(0, drvdata->base + TRCPRGCTLR);
  83. /* wait for TRCSTATR.IDLE to go up */
  84. if (coresight_timeout(drvdata->base, TRCSTATR, TRCSTATR_IDLE_BIT, 1))
  85. dev_err(drvdata->dev,
  86. "timeout observed when probing at offset %#x\n",
  87. TRCSTATR);
  88. writel_relaxed(drvdata->pe_sel, drvdata->base + TRCPROCSELR);
  89. writel_relaxed(drvdata->cfg, drvdata->base + TRCCONFIGR);
  90. /* nothing specific implemented */
  91. writel_relaxed(0x0, drvdata->base + TRCAUXCTLR);
  92. writel_relaxed(drvdata->eventctrl0, drvdata->base + TRCEVENTCTL0R);
  93. writel_relaxed(drvdata->eventctrl1, drvdata->base + TRCEVENTCTL1R);
  94. writel_relaxed(drvdata->stall_ctrl, drvdata->base + TRCSTALLCTLR);
  95. writel_relaxed(drvdata->ts_ctrl, drvdata->base + TRCTSCTLR);
  96. writel_relaxed(drvdata->syncfreq, drvdata->base + TRCSYNCPR);
  97. writel_relaxed(drvdata->ccctlr, drvdata->base + TRCCCCTLR);
  98. writel_relaxed(drvdata->bb_ctrl, drvdata->base + TRCBBCTLR);
  99. writel_relaxed(drvdata->trcid, drvdata->base + TRCTRACEIDR);
  100. writel_relaxed(drvdata->vinst_ctrl, drvdata->base + TRCVICTLR);
  101. writel_relaxed(drvdata->viiectlr, drvdata->base + TRCVIIECTLR);
  102. writel_relaxed(drvdata->vissctlr,
  103. drvdata->base + TRCVISSCTLR);
  104. writel_relaxed(drvdata->vipcssctlr,
  105. drvdata->base + TRCVIPCSSCTLR);
  106. for (i = 0; i < drvdata->nrseqstate - 1; i++)
  107. writel_relaxed(drvdata->seq_ctrl[i],
  108. drvdata->base + TRCSEQEVRn(i));
  109. writel_relaxed(drvdata->seq_rst, drvdata->base + TRCSEQRSTEVR);
  110. writel_relaxed(drvdata->seq_state, drvdata->base + TRCSEQSTR);
  111. writel_relaxed(drvdata->ext_inp, drvdata->base + TRCEXTINSELR);
  112. for (i = 0; i < drvdata->nr_cntr; i++) {
  113. writel_relaxed(drvdata->cntrldvr[i],
  114. drvdata->base + TRCCNTRLDVRn(i));
  115. writel_relaxed(drvdata->cntr_ctrl[i],
  116. drvdata->base + TRCCNTCTLRn(i));
  117. writel_relaxed(drvdata->cntr_val[i],
  118. drvdata->base + TRCCNTVRn(i));
  119. }
  120. /* Resource selector pair 0 is always implemented and reserved */
  121. for (i = 2; i < drvdata->nr_resource * 2; i++)
  122. writel_relaxed(drvdata->res_ctrl[i],
  123. drvdata->base + TRCRSCTLRn(i));
  124. for (i = 0; i < drvdata->nr_ss_cmp; i++) {
  125. writel_relaxed(drvdata->ss_ctrl[i],
  126. drvdata->base + TRCSSCCRn(i));
  127. writel_relaxed(drvdata->ss_status[i],
  128. drvdata->base + TRCSSCSRn(i));
  129. writel_relaxed(drvdata->ss_pe_cmp[i],
  130. drvdata->base + TRCSSPCICRn(i));
  131. }
  132. for (i = 0; i < drvdata->nr_addr_cmp; i++) {
  133. writeq_relaxed(drvdata->addr_val[i],
  134. drvdata->base + TRCACVRn(i));
  135. writeq_relaxed(drvdata->addr_acc[i],
  136. drvdata->base + TRCACATRn(i));
  137. }
  138. for (i = 0; i < drvdata->numcidc; i++)
  139. writeq_relaxed(drvdata->ctxid_pid[i],
  140. drvdata->base + TRCCIDCVRn(i));
  141. writel_relaxed(drvdata->ctxid_mask0, drvdata->base + TRCCIDCCTLR0);
  142. writel_relaxed(drvdata->ctxid_mask1, drvdata->base + TRCCIDCCTLR1);
  143. for (i = 0; i < drvdata->numvmidc; i++)
  144. writeq_relaxed(drvdata->vmid_val[i],
  145. drvdata->base + TRCVMIDCVRn(i));
  146. writel_relaxed(drvdata->vmid_mask0, drvdata->base + TRCVMIDCCTLR0);
  147. writel_relaxed(drvdata->vmid_mask1, drvdata->base + TRCVMIDCCTLR1);
  148. /* Enable the trace unit */
  149. writel_relaxed(1, drvdata->base + TRCPRGCTLR);
  150. /* wait for TRCSTATR.IDLE to go back down to '0' */
  151. if (coresight_timeout(drvdata->base, TRCSTATR, TRCSTATR_IDLE_BIT, 0))
  152. dev_err(drvdata->dev,
  153. "timeout observed when probing at offset %#x\n",
  154. TRCSTATR);
  155. CS_LOCK(drvdata->base);
  156. dev_dbg(drvdata->dev, "cpu: %d enable smp call done\n", drvdata->cpu);
  157. }
  158. static int etm4_enable(struct coresight_device *csdev)
  159. {
  160. struct etmv4_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
  161. int ret;
  162. pm_runtime_get_sync(drvdata->dev);
  163. spin_lock(&drvdata->spinlock);
  164. /*
  165. * Executing etm4_enable_hw on the cpu whose ETM is being enabled
  166. * ensures that register writes occur when cpu is powered.
  167. */
  168. ret = smp_call_function_single(drvdata->cpu,
  169. etm4_enable_hw, drvdata, 1);
  170. if (ret)
  171. goto err;
  172. drvdata->enable = true;
  173. drvdata->sticky_enable = true;
  174. spin_unlock(&drvdata->spinlock);
  175. dev_info(drvdata->dev, "ETM tracing enabled\n");
  176. return 0;
  177. err:
  178. spin_unlock(&drvdata->spinlock);
  179. pm_runtime_put(drvdata->dev);
  180. return ret;
  181. }
  182. static void etm4_disable_hw(void *info)
  183. {
  184. u32 control;
  185. struct etmv4_drvdata *drvdata = info;
  186. CS_UNLOCK(drvdata->base);
  187. control = readl_relaxed(drvdata->base + TRCPRGCTLR);
  188. /* EN, bit[0] Trace unit enable bit */
  189. control &= ~0x1;
  190. /* make sure everything completes before disabling */
  191. mb();
  192. isb();
  193. writel_relaxed(control, drvdata->base + TRCPRGCTLR);
  194. CS_LOCK(drvdata->base);
  195. dev_dbg(drvdata->dev, "cpu: %d disable smp call done\n", drvdata->cpu);
  196. }
  197. static void etm4_disable(struct coresight_device *csdev)
  198. {
  199. struct etmv4_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
  200. /*
  201. * Taking hotplug lock here protects from clocks getting disabled
  202. * with tracing being left on (crash scenario) if user disable occurs
  203. * after cpu online mask indicates the cpu is offline but before the
  204. * DYING hotplug callback is serviced by the ETM driver.
  205. */
  206. get_online_cpus();
  207. spin_lock(&drvdata->spinlock);
  208. /*
  209. * Executing etm4_disable_hw on the cpu whose ETM is being disabled
  210. * ensures that register writes occur when cpu is powered.
  211. */
  212. smp_call_function_single(drvdata->cpu, etm4_disable_hw, drvdata, 1);
  213. drvdata->enable = false;
  214. spin_unlock(&drvdata->spinlock);
  215. put_online_cpus();
  216. pm_runtime_put(drvdata->dev);
  217. dev_info(drvdata->dev, "ETM tracing disabled\n");
  218. }
  219. static const struct coresight_ops_source etm4_source_ops = {
  220. .trace_id = etm4_trace_id,
  221. .enable = etm4_enable,
  222. .disable = etm4_disable,
  223. };
  224. static const struct coresight_ops etm4_cs_ops = {
  225. .source_ops = &etm4_source_ops,
  226. };
  227. static int etm4_set_mode_exclude(struct etmv4_drvdata *drvdata, bool exclude)
  228. {
  229. u8 idx = drvdata->addr_idx;
  230. /*
  231. * TRCACATRn.TYPE bit[1:0]: type of comparison
  232. * the trace unit performs
  233. */
  234. if (BMVAL(drvdata->addr_acc[idx], 0, 1) == ETM_INSTR_ADDR) {
  235. if (idx % 2 != 0)
  236. return -EINVAL;
  237. /*
  238. * We are performing instruction address comparison. Set the
  239. * relevant bit of ViewInst Include/Exclude Control register
  240. * for corresponding address comparator pair.
  241. */
  242. if (drvdata->addr_type[idx] != ETM_ADDR_TYPE_RANGE ||
  243. drvdata->addr_type[idx + 1] != ETM_ADDR_TYPE_RANGE)
  244. return -EINVAL;
  245. if (exclude == true) {
  246. /*
  247. * Set exclude bit and unset the include bit
  248. * corresponding to comparator pair
  249. */
  250. drvdata->viiectlr |= BIT(idx / 2 + 16);
  251. drvdata->viiectlr &= ~BIT(idx / 2);
  252. } else {
  253. /*
  254. * Set include bit and unset exclude bit
  255. * corresponding to comparator pair
  256. */
  257. drvdata->viiectlr |= BIT(idx / 2);
  258. drvdata->viiectlr &= ~BIT(idx / 2 + 16);
  259. }
  260. }
  261. return 0;
  262. }
  263. static ssize_t nr_pe_cmp_show(struct device *dev,
  264. struct device_attribute *attr,
  265. char *buf)
  266. {
  267. unsigned long val;
  268. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  269. val = drvdata->nr_pe_cmp;
  270. return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
  271. }
  272. static DEVICE_ATTR_RO(nr_pe_cmp);
  273. static ssize_t nr_addr_cmp_show(struct device *dev,
  274. struct device_attribute *attr,
  275. char *buf)
  276. {
  277. unsigned long val;
  278. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  279. val = drvdata->nr_addr_cmp;
  280. return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
  281. }
  282. static DEVICE_ATTR_RO(nr_addr_cmp);
  283. static ssize_t nr_cntr_show(struct device *dev,
  284. struct device_attribute *attr,
  285. char *buf)
  286. {
  287. unsigned long val;
  288. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  289. val = drvdata->nr_cntr;
  290. return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
  291. }
  292. static DEVICE_ATTR_RO(nr_cntr);
  293. static ssize_t nr_ext_inp_show(struct device *dev,
  294. struct device_attribute *attr,
  295. char *buf)
  296. {
  297. unsigned long val;
  298. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  299. val = drvdata->nr_ext_inp;
  300. return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
  301. }
  302. static DEVICE_ATTR_RO(nr_ext_inp);
  303. static ssize_t numcidc_show(struct device *dev,
  304. struct device_attribute *attr,
  305. char *buf)
  306. {
  307. unsigned long val;
  308. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  309. val = drvdata->numcidc;
  310. return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
  311. }
  312. static DEVICE_ATTR_RO(numcidc);
  313. static ssize_t numvmidc_show(struct device *dev,
  314. struct device_attribute *attr,
  315. char *buf)
  316. {
  317. unsigned long val;
  318. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  319. val = drvdata->numvmidc;
  320. return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
  321. }
  322. static DEVICE_ATTR_RO(numvmidc);
  323. static ssize_t nrseqstate_show(struct device *dev,
  324. struct device_attribute *attr,
  325. char *buf)
  326. {
  327. unsigned long val;
  328. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  329. val = drvdata->nrseqstate;
  330. return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
  331. }
  332. static DEVICE_ATTR_RO(nrseqstate);
  333. static ssize_t nr_resource_show(struct device *dev,
  334. struct device_attribute *attr,
  335. char *buf)
  336. {
  337. unsigned long val;
  338. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  339. val = drvdata->nr_resource;
  340. return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
  341. }
  342. static DEVICE_ATTR_RO(nr_resource);
  343. static ssize_t nr_ss_cmp_show(struct device *dev,
  344. struct device_attribute *attr,
  345. char *buf)
  346. {
  347. unsigned long val;
  348. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  349. val = drvdata->nr_ss_cmp;
  350. return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
  351. }
  352. static DEVICE_ATTR_RO(nr_ss_cmp);
  353. static ssize_t reset_store(struct device *dev,
  354. struct device_attribute *attr,
  355. const char *buf, size_t size)
  356. {
  357. int i;
  358. unsigned long val;
  359. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  360. if (kstrtoul(buf, 16, &val))
  361. return -EINVAL;
  362. spin_lock(&drvdata->spinlock);
  363. if (val)
  364. drvdata->mode = 0x0;
  365. /* Disable data tracing: do not trace load and store data transfers */
  366. drvdata->mode &= ~(ETM_MODE_LOAD | ETM_MODE_STORE);
  367. drvdata->cfg &= ~(BIT(1) | BIT(2));
  368. /* Disable data value and data address tracing */
  369. drvdata->mode &= ~(ETM_MODE_DATA_TRACE_ADDR |
  370. ETM_MODE_DATA_TRACE_VAL);
  371. drvdata->cfg &= ~(BIT(16) | BIT(17));
  372. /* Disable all events tracing */
  373. drvdata->eventctrl0 = 0x0;
  374. drvdata->eventctrl1 = 0x0;
  375. /* Disable timestamp event */
  376. drvdata->ts_ctrl = 0x0;
  377. /* Disable stalling */
  378. drvdata->stall_ctrl = 0x0;
  379. /* Reset trace synchronization period to 2^8 = 256 bytes*/
  380. if (drvdata->syncpr == false)
  381. drvdata->syncfreq = 0x8;
  382. /*
  383. * Enable ViewInst to trace everything with start-stop logic in
  384. * started state. ARM recommends start-stop logic is set before
  385. * each trace run.
  386. */
  387. drvdata->vinst_ctrl |= BIT(0);
  388. if (drvdata->nr_addr_cmp == true) {
  389. drvdata->mode |= ETM_MODE_VIEWINST_STARTSTOP;
  390. /* SSSTATUS, bit[9] */
  391. drvdata->vinst_ctrl |= BIT(9);
  392. }
  393. /* No address range filtering for ViewInst */
  394. drvdata->viiectlr = 0x0;
  395. /* No start-stop filtering for ViewInst */
  396. drvdata->vissctlr = 0x0;
  397. /* Disable seq events */
  398. for (i = 0; i < drvdata->nrseqstate-1; i++)
  399. drvdata->seq_ctrl[i] = 0x0;
  400. drvdata->seq_rst = 0x0;
  401. drvdata->seq_state = 0x0;
  402. /* Disable external input events */
  403. drvdata->ext_inp = 0x0;
  404. drvdata->cntr_idx = 0x0;
  405. for (i = 0; i < drvdata->nr_cntr; i++) {
  406. drvdata->cntrldvr[i] = 0x0;
  407. drvdata->cntr_ctrl[i] = 0x0;
  408. drvdata->cntr_val[i] = 0x0;
  409. }
  410. /* Resource selector pair 0 is always implemented and reserved */
  411. drvdata->res_idx = 0x2;
  412. for (i = 2; i < drvdata->nr_resource * 2; i++)
  413. drvdata->res_ctrl[i] = 0x0;
  414. for (i = 0; i < drvdata->nr_ss_cmp; i++) {
  415. drvdata->ss_ctrl[i] = 0x0;
  416. drvdata->ss_pe_cmp[i] = 0x0;
  417. }
  418. drvdata->addr_idx = 0x0;
  419. for (i = 0; i < drvdata->nr_addr_cmp * 2; i++) {
  420. drvdata->addr_val[i] = 0x0;
  421. drvdata->addr_acc[i] = 0x0;
  422. drvdata->addr_type[i] = ETM_ADDR_TYPE_NONE;
  423. }
  424. drvdata->ctxid_idx = 0x0;
  425. for (i = 0; i < drvdata->numcidc; i++) {
  426. drvdata->ctxid_pid[i] = 0x0;
  427. drvdata->ctxid_vpid[i] = 0x0;
  428. }
  429. drvdata->ctxid_mask0 = 0x0;
  430. drvdata->ctxid_mask1 = 0x0;
  431. drvdata->vmid_idx = 0x0;
  432. for (i = 0; i < drvdata->numvmidc; i++)
  433. drvdata->vmid_val[i] = 0x0;
  434. drvdata->vmid_mask0 = 0x0;
  435. drvdata->vmid_mask1 = 0x0;
  436. drvdata->trcid = drvdata->cpu + 1;
  437. spin_unlock(&drvdata->spinlock);
  438. return size;
  439. }
  440. static DEVICE_ATTR_WO(reset);
  441. static ssize_t mode_show(struct device *dev,
  442. struct device_attribute *attr,
  443. char *buf)
  444. {
  445. unsigned long val;
  446. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  447. val = drvdata->mode;
  448. return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
  449. }
  450. static ssize_t mode_store(struct device *dev,
  451. struct device_attribute *attr,
  452. const char *buf, size_t size)
  453. {
  454. unsigned long val, mode;
  455. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  456. if (kstrtoul(buf, 16, &val))
  457. return -EINVAL;
  458. spin_lock(&drvdata->spinlock);
  459. drvdata->mode = val & ETMv4_MODE_ALL;
  460. if (drvdata->mode & ETM_MODE_EXCLUDE)
  461. etm4_set_mode_exclude(drvdata, true);
  462. else
  463. etm4_set_mode_exclude(drvdata, false);
  464. if (drvdata->instrp0 == true) {
  465. /* start by clearing instruction P0 field */
  466. drvdata->cfg &= ~(BIT(1) | BIT(2));
  467. if (drvdata->mode & ETM_MODE_LOAD)
  468. /* 0b01 Trace load instructions as P0 instructions */
  469. drvdata->cfg |= BIT(1);
  470. if (drvdata->mode & ETM_MODE_STORE)
  471. /* 0b10 Trace store instructions as P0 instructions */
  472. drvdata->cfg |= BIT(2);
  473. if (drvdata->mode & ETM_MODE_LOAD_STORE)
  474. /*
  475. * 0b11 Trace load and store instructions
  476. * as P0 instructions
  477. */
  478. drvdata->cfg |= BIT(1) | BIT(2);
  479. }
  480. /* bit[3], Branch broadcast mode */
  481. if ((drvdata->mode & ETM_MODE_BB) && (drvdata->trcbb == true))
  482. drvdata->cfg |= BIT(3);
  483. else
  484. drvdata->cfg &= ~BIT(3);
  485. /* bit[4], Cycle counting instruction trace bit */
  486. if ((drvdata->mode & ETMv4_MODE_CYCACC) &&
  487. (drvdata->trccci == true))
  488. drvdata->cfg |= BIT(4);
  489. else
  490. drvdata->cfg &= ~BIT(4);
  491. /* bit[6], Context ID tracing bit */
  492. if ((drvdata->mode & ETMv4_MODE_CTXID) && (drvdata->ctxid_size))
  493. drvdata->cfg |= BIT(6);
  494. else
  495. drvdata->cfg &= ~BIT(6);
  496. if ((drvdata->mode & ETM_MODE_VMID) && (drvdata->vmid_size))
  497. drvdata->cfg |= BIT(7);
  498. else
  499. drvdata->cfg &= ~BIT(7);
  500. /* bits[10:8], Conditional instruction tracing bit */
  501. mode = ETM_MODE_COND(drvdata->mode);
  502. if (drvdata->trccond == true) {
  503. drvdata->cfg &= ~(BIT(8) | BIT(9) | BIT(10));
  504. drvdata->cfg |= mode << 8;
  505. }
  506. /* bit[11], Global timestamp tracing bit */
  507. if ((drvdata->mode & ETMv4_MODE_TIMESTAMP) && (drvdata->ts_size))
  508. drvdata->cfg |= BIT(11);
  509. else
  510. drvdata->cfg &= ~BIT(11);
  511. /* bit[12], Return stack enable bit */
  512. if ((drvdata->mode & ETM_MODE_RETURNSTACK) &&
  513. (drvdata->retstack == true))
  514. drvdata->cfg |= BIT(12);
  515. else
  516. drvdata->cfg &= ~BIT(12);
  517. /* bits[14:13], Q element enable field */
  518. mode = ETM_MODE_QELEM(drvdata->mode);
  519. /* start by clearing QE bits */
  520. drvdata->cfg &= ~(BIT(13) | BIT(14));
  521. /* if supported, Q elements with instruction counts are enabled */
  522. if ((mode & BIT(0)) && (drvdata->q_support & BIT(0)))
  523. drvdata->cfg |= BIT(13);
  524. /*
  525. * if supported, Q elements with and without instruction
  526. * counts are enabled
  527. */
  528. if ((mode & BIT(1)) && (drvdata->q_support & BIT(1)))
  529. drvdata->cfg |= BIT(14);
  530. /* bit[11], AMBA Trace Bus (ATB) trigger enable bit */
  531. if ((drvdata->mode & ETM_MODE_ATB_TRIGGER) &&
  532. (drvdata->atbtrig == true))
  533. drvdata->eventctrl1 |= BIT(11);
  534. else
  535. drvdata->eventctrl1 &= ~BIT(11);
  536. /* bit[12], Low-power state behavior override bit */
  537. if ((drvdata->mode & ETM_MODE_LPOVERRIDE) &&
  538. (drvdata->lpoverride == true))
  539. drvdata->eventctrl1 |= BIT(12);
  540. else
  541. drvdata->eventctrl1 &= ~BIT(12);
  542. /* bit[8], Instruction stall bit */
  543. if (drvdata->mode & ETM_MODE_ISTALL_EN)
  544. drvdata->stall_ctrl |= BIT(8);
  545. else
  546. drvdata->stall_ctrl &= ~BIT(8);
  547. /* bit[10], Prioritize instruction trace bit */
  548. if (drvdata->mode & ETM_MODE_INSTPRIO)
  549. drvdata->stall_ctrl |= BIT(10);
  550. else
  551. drvdata->stall_ctrl &= ~BIT(10);
  552. /* bit[13], Trace overflow prevention bit */
  553. if ((drvdata->mode & ETM_MODE_NOOVERFLOW) &&
  554. (drvdata->nooverflow == true))
  555. drvdata->stall_ctrl |= BIT(13);
  556. else
  557. drvdata->stall_ctrl &= ~BIT(13);
  558. /* bit[9] Start/stop logic control bit */
  559. if (drvdata->mode & ETM_MODE_VIEWINST_STARTSTOP)
  560. drvdata->vinst_ctrl |= BIT(9);
  561. else
  562. drvdata->vinst_ctrl &= ~BIT(9);
  563. /* bit[10], Whether a trace unit must trace a Reset exception */
  564. if (drvdata->mode & ETM_MODE_TRACE_RESET)
  565. drvdata->vinst_ctrl |= BIT(10);
  566. else
  567. drvdata->vinst_ctrl &= ~BIT(10);
  568. /* bit[11], Whether a trace unit must trace a system error exception */
  569. if ((drvdata->mode & ETM_MODE_TRACE_ERR) &&
  570. (drvdata->trc_error == true))
  571. drvdata->vinst_ctrl |= BIT(11);
  572. else
  573. drvdata->vinst_ctrl &= ~BIT(11);
  574. spin_unlock(&drvdata->spinlock);
  575. return size;
  576. }
  577. static DEVICE_ATTR_RW(mode);
  578. static ssize_t pe_show(struct device *dev,
  579. struct device_attribute *attr,
  580. char *buf)
  581. {
  582. unsigned long val;
  583. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  584. val = drvdata->pe_sel;
  585. return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
  586. }
  587. static ssize_t pe_store(struct device *dev,
  588. struct device_attribute *attr,
  589. const char *buf, size_t size)
  590. {
  591. unsigned long val;
  592. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  593. if (kstrtoul(buf, 16, &val))
  594. return -EINVAL;
  595. spin_lock(&drvdata->spinlock);
  596. if (val > drvdata->nr_pe) {
  597. spin_unlock(&drvdata->spinlock);
  598. return -EINVAL;
  599. }
  600. drvdata->pe_sel = val;
  601. spin_unlock(&drvdata->spinlock);
  602. return size;
  603. }
  604. static DEVICE_ATTR_RW(pe);
  605. static ssize_t event_show(struct device *dev,
  606. struct device_attribute *attr,
  607. char *buf)
  608. {
  609. unsigned long val;
  610. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  611. val = drvdata->eventctrl0;
  612. return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
  613. }
  614. static ssize_t event_store(struct device *dev,
  615. struct device_attribute *attr,
  616. const char *buf, size_t size)
  617. {
  618. unsigned long val;
  619. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  620. if (kstrtoul(buf, 16, &val))
  621. return -EINVAL;
  622. spin_lock(&drvdata->spinlock);
  623. switch (drvdata->nr_event) {
  624. case 0x0:
  625. /* EVENT0, bits[7:0] */
  626. drvdata->eventctrl0 = val & 0xFF;
  627. break;
  628. case 0x1:
  629. /* EVENT1, bits[15:8] */
  630. drvdata->eventctrl0 = val & 0xFFFF;
  631. break;
  632. case 0x2:
  633. /* EVENT2, bits[23:16] */
  634. drvdata->eventctrl0 = val & 0xFFFFFF;
  635. break;
  636. case 0x3:
  637. /* EVENT3, bits[31:24] */
  638. drvdata->eventctrl0 = val;
  639. break;
  640. default:
  641. break;
  642. }
  643. spin_unlock(&drvdata->spinlock);
  644. return size;
  645. }
  646. static DEVICE_ATTR_RW(event);
  647. static ssize_t event_instren_show(struct device *dev,
  648. struct device_attribute *attr,
  649. char *buf)
  650. {
  651. unsigned long val;
  652. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  653. val = BMVAL(drvdata->eventctrl1, 0, 3);
  654. return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
  655. }
  656. static ssize_t event_instren_store(struct device *dev,
  657. struct device_attribute *attr,
  658. const char *buf, size_t size)
  659. {
  660. unsigned long val;
  661. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  662. if (kstrtoul(buf, 16, &val))
  663. return -EINVAL;
  664. spin_lock(&drvdata->spinlock);
  665. /* start by clearing all instruction event enable bits */
  666. drvdata->eventctrl1 &= ~(BIT(0) | BIT(1) | BIT(2) | BIT(3));
  667. switch (drvdata->nr_event) {
  668. case 0x0:
  669. /* generate Event element for event 1 */
  670. drvdata->eventctrl1 |= val & BIT(1);
  671. break;
  672. case 0x1:
  673. /* generate Event element for event 1 and 2 */
  674. drvdata->eventctrl1 |= val & (BIT(0) | BIT(1));
  675. break;
  676. case 0x2:
  677. /* generate Event element for event 1, 2 and 3 */
  678. drvdata->eventctrl1 |= val & (BIT(0) | BIT(1) | BIT(2));
  679. break;
  680. case 0x3:
  681. /* generate Event element for all 4 events */
  682. drvdata->eventctrl1 |= val & 0xF;
  683. break;
  684. default:
  685. break;
  686. }
  687. spin_unlock(&drvdata->spinlock);
  688. return size;
  689. }
  690. static DEVICE_ATTR_RW(event_instren);
  691. static ssize_t event_ts_show(struct device *dev,
  692. struct device_attribute *attr,
  693. char *buf)
  694. {
  695. unsigned long val;
  696. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  697. val = drvdata->ts_ctrl;
  698. return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
  699. }
  700. static ssize_t event_ts_store(struct device *dev,
  701. struct device_attribute *attr,
  702. const char *buf, size_t size)
  703. {
  704. unsigned long val;
  705. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  706. if (kstrtoul(buf, 16, &val))
  707. return -EINVAL;
  708. if (!drvdata->ts_size)
  709. return -EINVAL;
  710. drvdata->ts_ctrl = val & ETMv4_EVENT_MASK;
  711. return size;
  712. }
  713. static DEVICE_ATTR_RW(event_ts);
  714. static ssize_t syncfreq_show(struct device *dev,
  715. struct device_attribute *attr,
  716. char *buf)
  717. {
  718. unsigned long val;
  719. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  720. val = drvdata->syncfreq;
  721. return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
  722. }
  723. static ssize_t syncfreq_store(struct device *dev,
  724. struct device_attribute *attr,
  725. const char *buf, size_t size)
  726. {
  727. unsigned long val;
  728. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  729. if (kstrtoul(buf, 16, &val))
  730. return -EINVAL;
  731. if (drvdata->syncpr == true)
  732. return -EINVAL;
  733. drvdata->syncfreq = val & ETMv4_SYNC_MASK;
  734. return size;
  735. }
  736. static DEVICE_ATTR_RW(syncfreq);
  737. static ssize_t cyc_threshold_show(struct device *dev,
  738. struct device_attribute *attr,
  739. char *buf)
  740. {
  741. unsigned long val;
  742. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  743. val = drvdata->ccctlr;
  744. return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
  745. }
  746. static ssize_t cyc_threshold_store(struct device *dev,
  747. struct device_attribute *attr,
  748. const char *buf, size_t size)
  749. {
  750. unsigned long val;
  751. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  752. if (kstrtoul(buf, 16, &val))
  753. return -EINVAL;
  754. if (val < drvdata->ccitmin)
  755. return -EINVAL;
  756. drvdata->ccctlr = val & ETM_CYC_THRESHOLD_MASK;
  757. return size;
  758. }
  759. static DEVICE_ATTR_RW(cyc_threshold);
  760. static ssize_t bb_ctrl_show(struct device *dev,
  761. struct device_attribute *attr,
  762. char *buf)
  763. {
  764. unsigned long val;
  765. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  766. val = drvdata->bb_ctrl;
  767. return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
  768. }
  769. static ssize_t bb_ctrl_store(struct device *dev,
  770. struct device_attribute *attr,
  771. const char *buf, size_t size)
  772. {
  773. unsigned long val;
  774. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  775. if (kstrtoul(buf, 16, &val))
  776. return -EINVAL;
  777. if (drvdata->trcbb == false)
  778. return -EINVAL;
  779. if (!drvdata->nr_addr_cmp)
  780. return -EINVAL;
  781. /*
  782. * Bit[7:0] selects which address range comparator is used for
  783. * branch broadcast control.
  784. */
  785. if (BMVAL(val, 0, 7) > drvdata->nr_addr_cmp)
  786. return -EINVAL;
  787. drvdata->bb_ctrl = val;
  788. return size;
  789. }
  790. static DEVICE_ATTR_RW(bb_ctrl);
  791. static ssize_t event_vinst_show(struct device *dev,
  792. struct device_attribute *attr,
  793. char *buf)
  794. {
  795. unsigned long val;
  796. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  797. val = drvdata->vinst_ctrl & ETMv4_EVENT_MASK;
  798. return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
  799. }
  800. static ssize_t event_vinst_store(struct device *dev,
  801. struct device_attribute *attr,
  802. const char *buf, size_t size)
  803. {
  804. unsigned long val;
  805. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  806. if (kstrtoul(buf, 16, &val))
  807. return -EINVAL;
  808. spin_lock(&drvdata->spinlock);
  809. val &= ETMv4_EVENT_MASK;
  810. drvdata->vinst_ctrl &= ~ETMv4_EVENT_MASK;
  811. drvdata->vinst_ctrl |= val;
  812. spin_unlock(&drvdata->spinlock);
  813. return size;
  814. }
  815. static DEVICE_ATTR_RW(event_vinst);
  816. static ssize_t s_exlevel_vinst_show(struct device *dev,
  817. struct device_attribute *attr,
  818. char *buf)
  819. {
  820. unsigned long val;
  821. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  822. val = BMVAL(drvdata->vinst_ctrl, 16, 19);
  823. return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
  824. }
  825. static ssize_t s_exlevel_vinst_store(struct device *dev,
  826. struct device_attribute *attr,
  827. const char *buf, size_t size)
  828. {
  829. unsigned long val;
  830. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  831. if (kstrtoul(buf, 16, &val))
  832. return -EINVAL;
  833. spin_lock(&drvdata->spinlock);
  834. /* clear all EXLEVEL_S bits (bit[18] is never implemented) */
  835. drvdata->vinst_ctrl &= ~(BIT(16) | BIT(17) | BIT(19));
  836. /* enable instruction tracing for corresponding exception level */
  837. val &= drvdata->s_ex_level;
  838. drvdata->vinst_ctrl |= (val << 16);
  839. spin_unlock(&drvdata->spinlock);
  840. return size;
  841. }
  842. static DEVICE_ATTR_RW(s_exlevel_vinst);
  843. static ssize_t ns_exlevel_vinst_show(struct device *dev,
  844. struct device_attribute *attr,
  845. char *buf)
  846. {
  847. unsigned long val;
  848. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  849. /* EXLEVEL_NS, bits[23:20] */
  850. val = BMVAL(drvdata->vinst_ctrl, 20, 23);
  851. return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
  852. }
  853. static ssize_t ns_exlevel_vinst_store(struct device *dev,
  854. struct device_attribute *attr,
  855. const char *buf, size_t size)
  856. {
  857. unsigned long val;
  858. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  859. if (kstrtoul(buf, 16, &val))
  860. return -EINVAL;
  861. spin_lock(&drvdata->spinlock);
  862. /* clear EXLEVEL_NS bits (bit[23] is never implemented */
  863. drvdata->vinst_ctrl &= ~(BIT(20) | BIT(21) | BIT(22));
  864. /* enable instruction tracing for corresponding exception level */
  865. val &= drvdata->ns_ex_level;
  866. drvdata->vinst_ctrl |= (val << 20);
  867. spin_unlock(&drvdata->spinlock);
  868. return size;
  869. }
  870. static DEVICE_ATTR_RW(ns_exlevel_vinst);
  871. static ssize_t addr_idx_show(struct device *dev,
  872. struct device_attribute *attr,
  873. char *buf)
  874. {
  875. unsigned long val;
  876. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  877. val = drvdata->addr_idx;
  878. return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
  879. }
  880. static ssize_t addr_idx_store(struct device *dev,
  881. struct device_attribute *attr,
  882. const char *buf, size_t size)
  883. {
  884. unsigned long val;
  885. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  886. if (kstrtoul(buf, 16, &val))
  887. return -EINVAL;
  888. if (val >= drvdata->nr_addr_cmp * 2)
  889. return -EINVAL;
  890. /*
  891. * Use spinlock to ensure index doesn't change while it gets
  892. * dereferenced multiple times within a spinlock block elsewhere.
  893. */
  894. spin_lock(&drvdata->spinlock);
  895. drvdata->addr_idx = val;
  896. spin_unlock(&drvdata->spinlock);
  897. return size;
  898. }
  899. static DEVICE_ATTR_RW(addr_idx);
  900. static ssize_t addr_instdatatype_show(struct device *dev,
  901. struct device_attribute *attr,
  902. char *buf)
  903. {
  904. ssize_t len;
  905. u8 val, idx;
  906. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  907. spin_lock(&drvdata->spinlock);
  908. idx = drvdata->addr_idx;
  909. val = BMVAL(drvdata->addr_acc[idx], 0, 1);
  910. len = scnprintf(buf, PAGE_SIZE, "%s\n",
  911. val == ETM_INSTR_ADDR ? "instr" :
  912. (val == ETM_DATA_LOAD_ADDR ? "data_load" :
  913. (val == ETM_DATA_STORE_ADDR ? "data_store" :
  914. "data_load_store")));
  915. spin_unlock(&drvdata->spinlock);
  916. return len;
  917. }
  918. static ssize_t addr_instdatatype_store(struct device *dev,
  919. struct device_attribute *attr,
  920. const char *buf, size_t size)
  921. {
  922. u8 idx;
  923. char str[20] = "";
  924. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  925. if (strlen(buf) >= 20)
  926. return -EINVAL;
  927. if (sscanf(buf, "%s", str) != 1)
  928. return -EINVAL;
  929. spin_lock(&drvdata->spinlock);
  930. idx = drvdata->addr_idx;
  931. if (!strcmp(str, "instr"))
  932. /* TYPE, bits[1:0] */
  933. drvdata->addr_acc[idx] &= ~(BIT(0) | BIT(1));
  934. spin_unlock(&drvdata->spinlock);
  935. return size;
  936. }
  937. static DEVICE_ATTR_RW(addr_instdatatype);
  938. static ssize_t addr_single_show(struct device *dev,
  939. struct device_attribute *attr,
  940. char *buf)
  941. {
  942. u8 idx;
  943. unsigned long val;
  944. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  945. idx = drvdata->addr_idx;
  946. spin_lock(&drvdata->spinlock);
  947. if (!(drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
  948. drvdata->addr_type[idx] == ETM_ADDR_TYPE_SINGLE)) {
  949. spin_unlock(&drvdata->spinlock);
  950. return -EPERM;
  951. }
  952. val = (unsigned long)drvdata->addr_val[idx];
  953. spin_unlock(&drvdata->spinlock);
  954. return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
  955. }
  956. static ssize_t addr_single_store(struct device *dev,
  957. struct device_attribute *attr,
  958. const char *buf, size_t size)
  959. {
  960. u8 idx;
  961. unsigned long val;
  962. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  963. if (kstrtoul(buf, 16, &val))
  964. return -EINVAL;
  965. spin_lock(&drvdata->spinlock);
  966. idx = drvdata->addr_idx;
  967. if (!(drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
  968. drvdata->addr_type[idx] == ETM_ADDR_TYPE_SINGLE)) {
  969. spin_unlock(&drvdata->spinlock);
  970. return -EPERM;
  971. }
  972. drvdata->addr_val[idx] = (u64)val;
  973. drvdata->addr_type[idx] = ETM_ADDR_TYPE_SINGLE;
  974. spin_unlock(&drvdata->spinlock);
  975. return size;
  976. }
  977. static DEVICE_ATTR_RW(addr_single);
  978. static ssize_t addr_range_show(struct device *dev,
  979. struct device_attribute *attr,
  980. char *buf)
  981. {
  982. u8 idx;
  983. unsigned long val1, val2;
  984. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  985. spin_lock(&drvdata->spinlock);
  986. idx = drvdata->addr_idx;
  987. if (idx % 2 != 0) {
  988. spin_unlock(&drvdata->spinlock);
  989. return -EPERM;
  990. }
  991. if (!((drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE &&
  992. drvdata->addr_type[idx + 1] == ETM_ADDR_TYPE_NONE) ||
  993. (drvdata->addr_type[idx] == ETM_ADDR_TYPE_RANGE &&
  994. drvdata->addr_type[idx + 1] == ETM_ADDR_TYPE_RANGE))) {
  995. spin_unlock(&drvdata->spinlock);
  996. return -EPERM;
  997. }
  998. val1 = (unsigned long)drvdata->addr_val[idx];
  999. val2 = (unsigned long)drvdata->addr_val[idx + 1];
  1000. spin_unlock(&drvdata->spinlock);
  1001. return scnprintf(buf, PAGE_SIZE, "%#lx %#lx\n", val1, val2);
  1002. }
  1003. static ssize_t addr_range_store(struct device *dev,
  1004. struct device_attribute *attr,
  1005. const char *buf, size_t size)
  1006. {
  1007. u8 idx;
  1008. unsigned long val1, val2;
  1009. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  1010. if (sscanf(buf, "%lx %lx", &val1, &val2) != 2)
  1011. return -EINVAL;
  1012. /* lower address comparator cannot have a higher address value */
  1013. if (val1 > val2)
  1014. return -EINVAL;
  1015. spin_lock(&drvdata->spinlock);
  1016. idx = drvdata->addr_idx;
  1017. if (idx % 2 != 0) {
  1018. spin_unlock(&drvdata->spinlock);
  1019. return -EPERM;
  1020. }
  1021. if (!((drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE &&
  1022. drvdata->addr_type[idx + 1] == ETM_ADDR_TYPE_NONE) ||
  1023. (drvdata->addr_type[idx] == ETM_ADDR_TYPE_RANGE &&
  1024. drvdata->addr_type[idx + 1] == ETM_ADDR_TYPE_RANGE))) {
  1025. spin_unlock(&drvdata->spinlock);
  1026. return -EPERM;
  1027. }
  1028. drvdata->addr_val[idx] = (u64)val1;
  1029. drvdata->addr_type[idx] = ETM_ADDR_TYPE_RANGE;
  1030. drvdata->addr_val[idx + 1] = (u64)val2;
  1031. drvdata->addr_type[idx + 1] = ETM_ADDR_TYPE_RANGE;
  1032. /*
  1033. * Program include or exclude control bits for vinst or vdata
  1034. * whenever we change addr comparators to ETM_ADDR_TYPE_RANGE
  1035. */
  1036. if (drvdata->mode & ETM_MODE_EXCLUDE)
  1037. etm4_set_mode_exclude(drvdata, true);
  1038. else
  1039. etm4_set_mode_exclude(drvdata, false);
  1040. spin_unlock(&drvdata->spinlock);
  1041. return size;
  1042. }
  1043. static DEVICE_ATTR_RW(addr_range);
  1044. static ssize_t addr_start_show(struct device *dev,
  1045. struct device_attribute *attr,
  1046. char *buf)
  1047. {
  1048. u8 idx;
  1049. unsigned long val;
  1050. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  1051. spin_lock(&drvdata->spinlock);
  1052. idx = drvdata->addr_idx;
  1053. if (!(drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
  1054. drvdata->addr_type[idx] == ETM_ADDR_TYPE_START)) {
  1055. spin_unlock(&drvdata->spinlock);
  1056. return -EPERM;
  1057. }
  1058. val = (unsigned long)drvdata->addr_val[idx];
  1059. spin_unlock(&drvdata->spinlock);
  1060. return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
  1061. }
  1062. static ssize_t addr_start_store(struct device *dev,
  1063. struct device_attribute *attr,
  1064. const char *buf, size_t size)
  1065. {
  1066. u8 idx;
  1067. unsigned long val;
  1068. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  1069. if (kstrtoul(buf, 16, &val))
  1070. return -EINVAL;
  1071. spin_lock(&drvdata->spinlock);
  1072. idx = drvdata->addr_idx;
  1073. if (!drvdata->nr_addr_cmp) {
  1074. spin_unlock(&drvdata->spinlock);
  1075. return -EINVAL;
  1076. }
  1077. if (!(drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
  1078. drvdata->addr_type[idx] == ETM_ADDR_TYPE_START)) {
  1079. spin_unlock(&drvdata->spinlock);
  1080. return -EPERM;
  1081. }
  1082. drvdata->addr_val[idx] = (u64)val;
  1083. drvdata->addr_type[idx] = ETM_ADDR_TYPE_START;
  1084. drvdata->vissctlr |= BIT(idx);
  1085. /* SSSTATUS, bit[9] - turn on start/stop logic */
  1086. drvdata->vinst_ctrl |= BIT(9);
  1087. spin_unlock(&drvdata->spinlock);
  1088. return size;
  1089. }
  1090. static DEVICE_ATTR_RW(addr_start);
  1091. static ssize_t addr_stop_show(struct device *dev,
  1092. struct device_attribute *attr,
  1093. char *buf)
  1094. {
  1095. u8 idx;
  1096. unsigned long val;
  1097. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  1098. spin_lock(&drvdata->spinlock);
  1099. idx = drvdata->addr_idx;
  1100. if (!(drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
  1101. drvdata->addr_type[idx] == ETM_ADDR_TYPE_STOP)) {
  1102. spin_unlock(&drvdata->spinlock);
  1103. return -EPERM;
  1104. }
  1105. val = (unsigned long)drvdata->addr_val[idx];
  1106. spin_unlock(&drvdata->spinlock);
  1107. return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
  1108. }
  1109. static ssize_t addr_stop_store(struct device *dev,
  1110. struct device_attribute *attr,
  1111. const char *buf, size_t size)
  1112. {
  1113. u8 idx;
  1114. unsigned long val;
  1115. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  1116. if (kstrtoul(buf, 16, &val))
  1117. return -EINVAL;
  1118. spin_lock(&drvdata->spinlock);
  1119. idx = drvdata->addr_idx;
  1120. if (!drvdata->nr_addr_cmp) {
  1121. spin_unlock(&drvdata->spinlock);
  1122. return -EINVAL;
  1123. }
  1124. if (!(drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
  1125. drvdata->addr_type[idx] == ETM_ADDR_TYPE_STOP)) {
  1126. spin_unlock(&drvdata->spinlock);
  1127. return -EPERM;
  1128. }
  1129. drvdata->addr_val[idx] = (u64)val;
  1130. drvdata->addr_type[idx] = ETM_ADDR_TYPE_STOP;
  1131. drvdata->vissctlr |= BIT(idx + 16);
  1132. /* SSSTATUS, bit[9] - turn on start/stop logic */
  1133. drvdata->vinst_ctrl |= BIT(9);
  1134. spin_unlock(&drvdata->spinlock);
  1135. return size;
  1136. }
  1137. static DEVICE_ATTR_RW(addr_stop);
  1138. static ssize_t addr_ctxtype_show(struct device *dev,
  1139. struct device_attribute *attr,
  1140. char *buf)
  1141. {
  1142. ssize_t len;
  1143. u8 idx, val;
  1144. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  1145. spin_lock(&drvdata->spinlock);
  1146. idx = drvdata->addr_idx;
  1147. /* CONTEXTTYPE, bits[3:2] */
  1148. val = BMVAL(drvdata->addr_acc[idx], 2, 3);
  1149. len = scnprintf(buf, PAGE_SIZE, "%s\n", val == ETM_CTX_NONE ? "none" :
  1150. (val == ETM_CTX_CTXID ? "ctxid" :
  1151. (val == ETM_CTX_VMID ? "vmid" : "all")));
  1152. spin_unlock(&drvdata->spinlock);
  1153. return len;
  1154. }
  1155. static ssize_t addr_ctxtype_store(struct device *dev,
  1156. struct device_attribute *attr,
  1157. const char *buf, size_t size)
  1158. {
  1159. u8 idx;
  1160. char str[10] = "";
  1161. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  1162. if (strlen(buf) >= 10)
  1163. return -EINVAL;
  1164. if (sscanf(buf, "%s", str) != 1)
  1165. return -EINVAL;
  1166. spin_lock(&drvdata->spinlock);
  1167. idx = drvdata->addr_idx;
  1168. if (!strcmp(str, "none"))
  1169. /* start by clearing context type bits */
  1170. drvdata->addr_acc[idx] &= ~(BIT(2) | BIT(3));
  1171. else if (!strcmp(str, "ctxid")) {
  1172. /* 0b01 The trace unit performs a Context ID */
  1173. if (drvdata->numcidc) {
  1174. drvdata->addr_acc[idx] |= BIT(2);
  1175. drvdata->addr_acc[idx] &= ~BIT(3);
  1176. }
  1177. } else if (!strcmp(str, "vmid")) {
  1178. /* 0b10 The trace unit performs a VMID */
  1179. if (drvdata->numvmidc) {
  1180. drvdata->addr_acc[idx] &= ~BIT(2);
  1181. drvdata->addr_acc[idx] |= BIT(3);
  1182. }
  1183. } else if (!strcmp(str, "all")) {
  1184. /*
  1185. * 0b11 The trace unit performs a Context ID
  1186. * comparison and a VMID
  1187. */
  1188. if (drvdata->numcidc)
  1189. drvdata->addr_acc[idx] |= BIT(2);
  1190. if (drvdata->numvmidc)
  1191. drvdata->addr_acc[idx] |= BIT(3);
  1192. }
  1193. spin_unlock(&drvdata->spinlock);
  1194. return size;
  1195. }
  1196. static DEVICE_ATTR_RW(addr_ctxtype);
  1197. static ssize_t addr_context_show(struct device *dev,
  1198. struct device_attribute *attr,
  1199. char *buf)
  1200. {
  1201. u8 idx;
  1202. unsigned long val;
  1203. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  1204. spin_lock(&drvdata->spinlock);
  1205. idx = drvdata->addr_idx;
  1206. /* context ID comparator bits[6:4] */
  1207. val = BMVAL(drvdata->addr_acc[idx], 4, 6);
  1208. spin_unlock(&drvdata->spinlock);
  1209. return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
  1210. }
  1211. static ssize_t addr_context_store(struct device *dev,
  1212. struct device_attribute *attr,
  1213. const char *buf, size_t size)
  1214. {
  1215. u8 idx;
  1216. unsigned long val;
  1217. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  1218. if (kstrtoul(buf, 16, &val))
  1219. return -EINVAL;
  1220. if ((drvdata->numcidc <= 1) && (drvdata->numvmidc <= 1))
  1221. return -EINVAL;
  1222. if (val >= (drvdata->numcidc >= drvdata->numvmidc ?
  1223. drvdata->numcidc : drvdata->numvmidc))
  1224. return -EINVAL;
  1225. spin_lock(&drvdata->spinlock);
  1226. idx = drvdata->addr_idx;
  1227. /* clear context ID comparator bits[6:4] */
  1228. drvdata->addr_acc[idx] &= ~(BIT(4) | BIT(5) | BIT(6));
  1229. drvdata->addr_acc[idx] |= (val << 4);
  1230. spin_unlock(&drvdata->spinlock);
  1231. return size;
  1232. }
  1233. static DEVICE_ATTR_RW(addr_context);
  1234. static ssize_t seq_idx_show(struct device *dev,
  1235. struct device_attribute *attr,
  1236. char *buf)
  1237. {
  1238. unsigned long val;
  1239. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  1240. val = drvdata->seq_idx;
  1241. return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
  1242. }
  1243. static ssize_t seq_idx_store(struct device *dev,
  1244. struct device_attribute *attr,
  1245. const char *buf, size_t size)
  1246. {
  1247. unsigned long val;
  1248. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  1249. if (kstrtoul(buf, 16, &val))
  1250. return -EINVAL;
  1251. if (val >= drvdata->nrseqstate - 1)
  1252. return -EINVAL;
  1253. /*
  1254. * Use spinlock to ensure index doesn't change while it gets
  1255. * dereferenced multiple times within a spinlock block elsewhere.
  1256. */
  1257. spin_lock(&drvdata->spinlock);
  1258. drvdata->seq_idx = val;
  1259. spin_unlock(&drvdata->spinlock);
  1260. return size;
  1261. }
  1262. static DEVICE_ATTR_RW(seq_idx);
  1263. static ssize_t seq_state_show(struct device *dev,
  1264. struct device_attribute *attr,
  1265. char *buf)
  1266. {
  1267. unsigned long val;
  1268. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  1269. val = drvdata->seq_state;
  1270. return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
  1271. }
  1272. static ssize_t seq_state_store(struct device *dev,
  1273. struct device_attribute *attr,
  1274. const char *buf, size_t size)
  1275. {
  1276. unsigned long val;
  1277. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  1278. if (kstrtoul(buf, 16, &val))
  1279. return -EINVAL;
  1280. if (val >= drvdata->nrseqstate)
  1281. return -EINVAL;
  1282. drvdata->seq_state = val;
  1283. return size;
  1284. }
  1285. static DEVICE_ATTR_RW(seq_state);
  1286. static ssize_t seq_event_show(struct device *dev,
  1287. struct device_attribute *attr,
  1288. char *buf)
  1289. {
  1290. u8 idx;
  1291. unsigned long val;
  1292. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  1293. spin_lock(&drvdata->spinlock);
  1294. idx = drvdata->seq_idx;
  1295. val = drvdata->seq_ctrl[idx];
  1296. spin_unlock(&drvdata->spinlock);
  1297. return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
  1298. }
  1299. static ssize_t seq_event_store(struct device *dev,
  1300. struct device_attribute *attr,
  1301. const char *buf, size_t size)
  1302. {
  1303. u8 idx;
  1304. unsigned long val;
  1305. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  1306. if (kstrtoul(buf, 16, &val))
  1307. return -EINVAL;
  1308. spin_lock(&drvdata->spinlock);
  1309. idx = drvdata->seq_idx;
  1310. /* RST, bits[7:0] */
  1311. drvdata->seq_ctrl[idx] = val & 0xFF;
  1312. spin_unlock(&drvdata->spinlock);
  1313. return size;
  1314. }
  1315. static DEVICE_ATTR_RW(seq_event);
  1316. static ssize_t seq_reset_event_show(struct device *dev,
  1317. struct device_attribute *attr,
  1318. char *buf)
  1319. {
  1320. unsigned long val;
  1321. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  1322. val = drvdata->seq_rst;
  1323. return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
  1324. }
  1325. static ssize_t seq_reset_event_store(struct device *dev,
  1326. struct device_attribute *attr,
  1327. const char *buf, size_t size)
  1328. {
  1329. unsigned long val;
  1330. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  1331. if (kstrtoul(buf, 16, &val))
  1332. return -EINVAL;
  1333. if (!(drvdata->nrseqstate))
  1334. return -EINVAL;
  1335. drvdata->seq_rst = val & ETMv4_EVENT_MASK;
  1336. return size;
  1337. }
  1338. static DEVICE_ATTR_RW(seq_reset_event);
  1339. static ssize_t cntr_idx_show(struct device *dev,
  1340. struct device_attribute *attr,
  1341. char *buf)
  1342. {
  1343. unsigned long val;
  1344. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  1345. val = drvdata->cntr_idx;
  1346. return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
  1347. }
  1348. static ssize_t cntr_idx_store(struct device *dev,
  1349. struct device_attribute *attr,
  1350. const char *buf, size_t size)
  1351. {
  1352. unsigned long val;
  1353. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  1354. if (kstrtoul(buf, 16, &val))
  1355. return -EINVAL;
  1356. if (val >= drvdata->nr_cntr)
  1357. return -EINVAL;
  1358. /*
  1359. * Use spinlock to ensure index doesn't change while it gets
  1360. * dereferenced multiple times within a spinlock block elsewhere.
  1361. */
  1362. spin_lock(&drvdata->spinlock);
  1363. drvdata->cntr_idx = val;
  1364. spin_unlock(&drvdata->spinlock);
  1365. return size;
  1366. }
  1367. static DEVICE_ATTR_RW(cntr_idx);
  1368. static ssize_t cntrldvr_show(struct device *dev,
  1369. struct device_attribute *attr,
  1370. char *buf)
  1371. {
  1372. u8 idx;
  1373. unsigned long val;
  1374. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  1375. spin_lock(&drvdata->spinlock);
  1376. idx = drvdata->cntr_idx;
  1377. val = drvdata->cntrldvr[idx];
  1378. spin_unlock(&drvdata->spinlock);
  1379. return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
  1380. }
  1381. static ssize_t cntrldvr_store(struct device *dev,
  1382. struct device_attribute *attr,
  1383. const char *buf, size_t size)
  1384. {
  1385. u8 idx;
  1386. unsigned long val;
  1387. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  1388. if (kstrtoul(buf, 16, &val))
  1389. return -EINVAL;
  1390. if (val > ETM_CNTR_MAX_VAL)
  1391. return -EINVAL;
  1392. spin_lock(&drvdata->spinlock);
  1393. idx = drvdata->cntr_idx;
  1394. drvdata->cntrldvr[idx] = val;
  1395. spin_unlock(&drvdata->spinlock);
  1396. return size;
  1397. }
  1398. static DEVICE_ATTR_RW(cntrldvr);
  1399. static ssize_t cntr_val_show(struct device *dev,
  1400. struct device_attribute *attr,
  1401. char *buf)
  1402. {
  1403. u8 idx;
  1404. unsigned long val;
  1405. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  1406. spin_lock(&drvdata->spinlock);
  1407. idx = drvdata->cntr_idx;
  1408. val = drvdata->cntr_val[idx];
  1409. spin_unlock(&drvdata->spinlock);
  1410. return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
  1411. }
  1412. static ssize_t cntr_val_store(struct device *dev,
  1413. struct device_attribute *attr,
  1414. const char *buf, size_t size)
  1415. {
  1416. u8 idx;
  1417. unsigned long val;
  1418. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  1419. if (kstrtoul(buf, 16, &val))
  1420. return -EINVAL;
  1421. if (val > ETM_CNTR_MAX_VAL)
  1422. return -EINVAL;
  1423. spin_lock(&drvdata->spinlock);
  1424. idx = drvdata->cntr_idx;
  1425. drvdata->cntr_val[idx] = val;
  1426. spin_unlock(&drvdata->spinlock);
  1427. return size;
  1428. }
  1429. static DEVICE_ATTR_RW(cntr_val);
  1430. static ssize_t cntr_ctrl_show(struct device *dev,
  1431. struct device_attribute *attr,
  1432. char *buf)
  1433. {
  1434. u8 idx;
  1435. unsigned long val;
  1436. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  1437. spin_lock(&drvdata->spinlock);
  1438. idx = drvdata->cntr_idx;
  1439. val = drvdata->cntr_ctrl[idx];
  1440. spin_unlock(&drvdata->spinlock);
  1441. return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
  1442. }
  1443. static ssize_t cntr_ctrl_store(struct device *dev,
  1444. struct device_attribute *attr,
  1445. const char *buf, size_t size)
  1446. {
  1447. u8 idx;
  1448. unsigned long val;
  1449. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  1450. if (kstrtoul(buf, 16, &val))
  1451. return -EINVAL;
  1452. spin_lock(&drvdata->spinlock);
  1453. idx = drvdata->cntr_idx;
  1454. drvdata->cntr_ctrl[idx] = val;
  1455. spin_unlock(&drvdata->spinlock);
  1456. return size;
  1457. }
  1458. static DEVICE_ATTR_RW(cntr_ctrl);
  1459. static ssize_t res_idx_show(struct device *dev,
  1460. struct device_attribute *attr,
  1461. char *buf)
  1462. {
  1463. unsigned long val;
  1464. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  1465. val = drvdata->res_idx;
  1466. return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
  1467. }
  1468. static ssize_t res_idx_store(struct device *dev,
  1469. struct device_attribute *attr,
  1470. const char *buf, size_t size)
  1471. {
  1472. unsigned long val;
  1473. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  1474. if (kstrtoul(buf, 16, &val))
  1475. return -EINVAL;
  1476. /* Resource selector pair 0 is always implemented and reserved */
  1477. if (val < 2 || val >= drvdata->nr_resource * 2)
  1478. return -EINVAL;
  1479. /*
  1480. * Use spinlock to ensure index doesn't change while it gets
  1481. * dereferenced multiple times within a spinlock block elsewhere.
  1482. */
  1483. spin_lock(&drvdata->spinlock);
  1484. drvdata->res_idx = val;
  1485. spin_unlock(&drvdata->spinlock);
  1486. return size;
  1487. }
  1488. static DEVICE_ATTR_RW(res_idx);
  1489. static ssize_t res_ctrl_show(struct device *dev,
  1490. struct device_attribute *attr,
  1491. char *buf)
  1492. {
  1493. u8 idx;
  1494. unsigned long val;
  1495. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  1496. spin_lock(&drvdata->spinlock);
  1497. idx = drvdata->res_idx;
  1498. val = drvdata->res_ctrl[idx];
  1499. spin_unlock(&drvdata->spinlock);
  1500. return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
  1501. }
  1502. static ssize_t res_ctrl_store(struct device *dev,
  1503. struct device_attribute *attr,
  1504. const char *buf, size_t size)
  1505. {
  1506. u8 idx;
  1507. unsigned long val;
  1508. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  1509. if (kstrtoul(buf, 16, &val))
  1510. return -EINVAL;
  1511. spin_lock(&drvdata->spinlock);
  1512. idx = drvdata->res_idx;
  1513. /* For odd idx pair inversal bit is RES0 */
  1514. if (idx % 2 != 0)
  1515. /* PAIRINV, bit[21] */
  1516. val &= ~BIT(21);
  1517. drvdata->res_ctrl[idx] = val;
  1518. spin_unlock(&drvdata->spinlock);
  1519. return size;
  1520. }
  1521. static DEVICE_ATTR_RW(res_ctrl);
  1522. static ssize_t ctxid_idx_show(struct device *dev,
  1523. struct device_attribute *attr,
  1524. char *buf)
  1525. {
  1526. unsigned long val;
  1527. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  1528. val = drvdata->ctxid_idx;
  1529. return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
  1530. }
  1531. static ssize_t ctxid_idx_store(struct device *dev,
  1532. struct device_attribute *attr,
  1533. const char *buf, size_t size)
  1534. {
  1535. unsigned long val;
  1536. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  1537. if (kstrtoul(buf, 16, &val))
  1538. return -EINVAL;
  1539. if (val >= drvdata->numcidc)
  1540. return -EINVAL;
  1541. /*
  1542. * Use spinlock to ensure index doesn't change while it gets
  1543. * dereferenced multiple times within a spinlock block elsewhere.
  1544. */
  1545. spin_lock(&drvdata->spinlock);
  1546. drvdata->ctxid_idx = val;
  1547. spin_unlock(&drvdata->spinlock);
  1548. return size;
  1549. }
  1550. static DEVICE_ATTR_RW(ctxid_idx);
  1551. static ssize_t ctxid_pid_show(struct device *dev,
  1552. struct device_attribute *attr,
  1553. char *buf)
  1554. {
  1555. u8 idx;
  1556. unsigned long val;
  1557. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  1558. spin_lock(&drvdata->spinlock);
  1559. idx = drvdata->ctxid_idx;
  1560. val = (unsigned long)drvdata->ctxid_vpid[idx];
  1561. spin_unlock(&drvdata->spinlock);
  1562. return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
  1563. }
  1564. static ssize_t ctxid_pid_store(struct device *dev,
  1565. struct device_attribute *attr,
  1566. const char *buf, size_t size)
  1567. {
  1568. u8 idx;
  1569. unsigned long vpid, pid;
  1570. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  1571. /*
  1572. * only implemented when ctxid tracing is enabled, i.e. at least one
  1573. * ctxid comparator is implemented and ctxid is greater than 0 bits
  1574. * in length
  1575. */
  1576. if (!drvdata->ctxid_size || !drvdata->numcidc)
  1577. return -EINVAL;
  1578. if (kstrtoul(buf, 16, &vpid))
  1579. return -EINVAL;
  1580. pid = coresight_vpid_to_pid(vpid);
  1581. spin_lock(&drvdata->spinlock);
  1582. idx = drvdata->ctxid_idx;
  1583. drvdata->ctxid_pid[idx] = (u64)pid;
  1584. drvdata->ctxid_vpid[idx] = (u64)vpid;
  1585. spin_unlock(&drvdata->spinlock);
  1586. return size;
  1587. }
  1588. static DEVICE_ATTR_RW(ctxid_pid);
  1589. static ssize_t ctxid_masks_show(struct device *dev,
  1590. struct device_attribute *attr,
  1591. char *buf)
  1592. {
  1593. unsigned long val1, val2;
  1594. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  1595. spin_lock(&drvdata->spinlock);
  1596. val1 = drvdata->ctxid_mask0;
  1597. val2 = drvdata->ctxid_mask1;
  1598. spin_unlock(&drvdata->spinlock);
  1599. return scnprintf(buf, PAGE_SIZE, "%#lx %#lx\n", val1, val2);
  1600. }
  1601. static ssize_t ctxid_masks_store(struct device *dev,
  1602. struct device_attribute *attr,
  1603. const char *buf, size_t size)
  1604. {
  1605. u8 i, j, maskbyte;
  1606. unsigned long val1, val2, mask;
  1607. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  1608. /*
  1609. * only implemented when ctxid tracing is enabled, i.e. at least one
  1610. * ctxid comparator is implemented and ctxid is greater than 0 bits
  1611. * in length
  1612. */
  1613. if (!drvdata->ctxid_size || !drvdata->numcidc)
  1614. return -EINVAL;
  1615. if (sscanf(buf, "%lx %lx", &val1, &val2) != 2)
  1616. return -EINVAL;
  1617. spin_lock(&drvdata->spinlock);
  1618. /*
  1619. * each byte[0..3] controls mask value applied to ctxid
  1620. * comparator[0..3]
  1621. */
  1622. switch (drvdata->numcidc) {
  1623. case 0x1:
  1624. /* COMP0, bits[7:0] */
  1625. drvdata->ctxid_mask0 = val1 & 0xFF;
  1626. break;
  1627. case 0x2:
  1628. /* COMP1, bits[15:8] */
  1629. drvdata->ctxid_mask0 = val1 & 0xFFFF;
  1630. break;
  1631. case 0x3:
  1632. /* COMP2, bits[23:16] */
  1633. drvdata->ctxid_mask0 = val1 & 0xFFFFFF;
  1634. break;
  1635. case 0x4:
  1636. /* COMP3, bits[31:24] */
  1637. drvdata->ctxid_mask0 = val1;
  1638. break;
  1639. case 0x5:
  1640. /* COMP4, bits[7:0] */
  1641. drvdata->ctxid_mask0 = val1;
  1642. drvdata->ctxid_mask1 = val2 & 0xFF;
  1643. break;
  1644. case 0x6:
  1645. /* COMP5, bits[15:8] */
  1646. drvdata->ctxid_mask0 = val1;
  1647. drvdata->ctxid_mask1 = val2 & 0xFFFF;
  1648. break;
  1649. case 0x7:
  1650. /* COMP6, bits[23:16] */
  1651. drvdata->ctxid_mask0 = val1;
  1652. drvdata->ctxid_mask1 = val2 & 0xFFFFFF;
  1653. break;
  1654. case 0x8:
  1655. /* COMP7, bits[31:24] */
  1656. drvdata->ctxid_mask0 = val1;
  1657. drvdata->ctxid_mask1 = val2;
  1658. break;
  1659. default:
  1660. break;
  1661. }
  1662. /*
  1663. * If software sets a mask bit to 1, it must program relevant byte
  1664. * of ctxid comparator value 0x0, otherwise behavior is unpredictable.
  1665. * For example, if bit[3] of ctxid_mask0 is 1, we must clear bits[31:24]
  1666. * of ctxid comparator0 value (corresponding to byte 0) register.
  1667. */
  1668. mask = drvdata->ctxid_mask0;
  1669. for (i = 0; i < drvdata->numcidc; i++) {
  1670. /* mask value of corresponding ctxid comparator */
  1671. maskbyte = mask & ETMv4_EVENT_MASK;
  1672. /*
  1673. * each bit corresponds to a byte of respective ctxid comparator
  1674. * value register
  1675. */
  1676. for (j = 0; j < 8; j++) {
  1677. if (maskbyte & 1)
  1678. drvdata->ctxid_pid[i] &= ~(0xFF << (j * 8));
  1679. maskbyte >>= 1;
  1680. }
  1681. /* Select the next ctxid comparator mask value */
  1682. if (i == 3)
  1683. /* ctxid comparators[4-7] */
  1684. mask = drvdata->ctxid_mask1;
  1685. else
  1686. mask >>= 0x8;
  1687. }
  1688. spin_unlock(&drvdata->spinlock);
  1689. return size;
  1690. }
  1691. static DEVICE_ATTR_RW(ctxid_masks);
  1692. static ssize_t vmid_idx_show(struct device *dev,
  1693. struct device_attribute *attr,
  1694. char *buf)
  1695. {
  1696. unsigned long val;
  1697. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  1698. val = drvdata->vmid_idx;
  1699. return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
  1700. }
  1701. static ssize_t vmid_idx_store(struct device *dev,
  1702. struct device_attribute *attr,
  1703. const char *buf, size_t size)
  1704. {
  1705. unsigned long val;
  1706. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  1707. if (kstrtoul(buf, 16, &val))
  1708. return -EINVAL;
  1709. if (val >= drvdata->numvmidc)
  1710. return -EINVAL;
  1711. /*
  1712. * Use spinlock to ensure index doesn't change while it gets
  1713. * dereferenced multiple times within a spinlock block elsewhere.
  1714. */
  1715. spin_lock(&drvdata->spinlock);
  1716. drvdata->vmid_idx = val;
  1717. spin_unlock(&drvdata->spinlock);
  1718. return size;
  1719. }
  1720. static DEVICE_ATTR_RW(vmid_idx);
  1721. static ssize_t vmid_val_show(struct device *dev,
  1722. struct device_attribute *attr,
  1723. char *buf)
  1724. {
  1725. unsigned long val;
  1726. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  1727. val = (unsigned long)drvdata->vmid_val[drvdata->vmid_idx];
  1728. return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
  1729. }
  1730. static ssize_t vmid_val_store(struct device *dev,
  1731. struct device_attribute *attr,
  1732. const char *buf, size_t size)
  1733. {
  1734. unsigned long val;
  1735. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  1736. /*
  1737. * only implemented when vmid tracing is enabled, i.e. at least one
  1738. * vmid comparator is implemented and at least 8 bit vmid size
  1739. */
  1740. if (!drvdata->vmid_size || !drvdata->numvmidc)
  1741. return -EINVAL;
  1742. if (kstrtoul(buf, 16, &val))
  1743. return -EINVAL;
  1744. spin_lock(&drvdata->spinlock);
  1745. drvdata->vmid_val[drvdata->vmid_idx] = (u64)val;
  1746. spin_unlock(&drvdata->spinlock);
  1747. return size;
  1748. }
  1749. static DEVICE_ATTR_RW(vmid_val);
  1750. static ssize_t vmid_masks_show(struct device *dev,
  1751. struct device_attribute *attr, char *buf)
  1752. {
  1753. unsigned long val1, val2;
  1754. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  1755. spin_lock(&drvdata->spinlock);
  1756. val1 = drvdata->vmid_mask0;
  1757. val2 = drvdata->vmid_mask1;
  1758. spin_unlock(&drvdata->spinlock);
  1759. return scnprintf(buf, PAGE_SIZE, "%#lx %#lx\n", val1, val2);
  1760. }
  1761. static ssize_t vmid_masks_store(struct device *dev,
  1762. struct device_attribute *attr,
  1763. const char *buf, size_t size)
  1764. {
  1765. u8 i, j, maskbyte;
  1766. unsigned long val1, val2, mask;
  1767. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  1768. /*
  1769. * only implemented when vmid tracing is enabled, i.e. at least one
  1770. * vmid comparator is implemented and at least 8 bit vmid size
  1771. */
  1772. if (!drvdata->vmid_size || !drvdata->numvmidc)
  1773. return -EINVAL;
  1774. if (sscanf(buf, "%lx %lx", &val1, &val2) != 2)
  1775. return -EINVAL;
  1776. spin_lock(&drvdata->spinlock);
  1777. /*
  1778. * each byte[0..3] controls mask value applied to vmid
  1779. * comparator[0..3]
  1780. */
  1781. switch (drvdata->numvmidc) {
  1782. case 0x1:
  1783. /* COMP0, bits[7:0] */
  1784. drvdata->vmid_mask0 = val1 & 0xFF;
  1785. break;
  1786. case 0x2:
  1787. /* COMP1, bits[15:8] */
  1788. drvdata->vmid_mask0 = val1 & 0xFFFF;
  1789. break;
  1790. case 0x3:
  1791. /* COMP2, bits[23:16] */
  1792. drvdata->vmid_mask0 = val1 & 0xFFFFFF;
  1793. break;
  1794. case 0x4:
  1795. /* COMP3, bits[31:24] */
  1796. drvdata->vmid_mask0 = val1;
  1797. break;
  1798. case 0x5:
  1799. /* COMP4, bits[7:0] */
  1800. drvdata->vmid_mask0 = val1;
  1801. drvdata->vmid_mask1 = val2 & 0xFF;
  1802. break;
  1803. case 0x6:
  1804. /* COMP5, bits[15:8] */
  1805. drvdata->vmid_mask0 = val1;
  1806. drvdata->vmid_mask1 = val2 & 0xFFFF;
  1807. break;
  1808. case 0x7:
  1809. /* COMP6, bits[23:16] */
  1810. drvdata->vmid_mask0 = val1;
  1811. drvdata->vmid_mask1 = val2 & 0xFFFFFF;
  1812. break;
  1813. case 0x8:
  1814. /* COMP7, bits[31:24] */
  1815. drvdata->vmid_mask0 = val1;
  1816. drvdata->vmid_mask1 = val2;
  1817. break;
  1818. default:
  1819. break;
  1820. }
  1821. /*
  1822. * If software sets a mask bit to 1, it must program relevant byte
  1823. * of vmid comparator value 0x0, otherwise behavior is unpredictable.
  1824. * For example, if bit[3] of vmid_mask0 is 1, we must clear bits[31:24]
  1825. * of vmid comparator0 value (corresponding to byte 0) register.
  1826. */
  1827. mask = drvdata->vmid_mask0;
  1828. for (i = 0; i < drvdata->numvmidc; i++) {
  1829. /* mask value of corresponding vmid comparator */
  1830. maskbyte = mask & ETMv4_EVENT_MASK;
  1831. /*
  1832. * each bit corresponds to a byte of respective vmid comparator
  1833. * value register
  1834. */
  1835. for (j = 0; j < 8; j++) {
  1836. if (maskbyte & 1)
  1837. drvdata->vmid_val[i] &= ~(0xFF << (j * 8));
  1838. maskbyte >>= 1;
  1839. }
  1840. /* Select the next vmid comparator mask value */
  1841. if (i == 3)
  1842. /* vmid comparators[4-7] */
  1843. mask = drvdata->vmid_mask1;
  1844. else
  1845. mask >>= 0x8;
  1846. }
  1847. spin_unlock(&drvdata->spinlock);
  1848. return size;
  1849. }
  1850. static DEVICE_ATTR_RW(vmid_masks);
  1851. static ssize_t cpu_show(struct device *dev,
  1852. struct device_attribute *attr, char *buf)
  1853. {
  1854. int val;
  1855. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  1856. val = drvdata->cpu;
  1857. return scnprintf(buf, PAGE_SIZE, "%d\n", val);
  1858. }
  1859. static DEVICE_ATTR_RO(cpu);
  1860. static struct attribute *coresight_etmv4_attrs[] = {
  1861. &dev_attr_nr_pe_cmp.attr,
  1862. &dev_attr_nr_addr_cmp.attr,
  1863. &dev_attr_nr_cntr.attr,
  1864. &dev_attr_nr_ext_inp.attr,
  1865. &dev_attr_numcidc.attr,
  1866. &dev_attr_numvmidc.attr,
  1867. &dev_attr_nrseqstate.attr,
  1868. &dev_attr_nr_resource.attr,
  1869. &dev_attr_nr_ss_cmp.attr,
  1870. &dev_attr_reset.attr,
  1871. &dev_attr_mode.attr,
  1872. &dev_attr_pe.attr,
  1873. &dev_attr_event.attr,
  1874. &dev_attr_event_instren.attr,
  1875. &dev_attr_event_ts.attr,
  1876. &dev_attr_syncfreq.attr,
  1877. &dev_attr_cyc_threshold.attr,
  1878. &dev_attr_bb_ctrl.attr,
  1879. &dev_attr_event_vinst.attr,
  1880. &dev_attr_s_exlevel_vinst.attr,
  1881. &dev_attr_ns_exlevel_vinst.attr,
  1882. &dev_attr_addr_idx.attr,
  1883. &dev_attr_addr_instdatatype.attr,
  1884. &dev_attr_addr_single.attr,
  1885. &dev_attr_addr_range.attr,
  1886. &dev_attr_addr_start.attr,
  1887. &dev_attr_addr_stop.attr,
  1888. &dev_attr_addr_ctxtype.attr,
  1889. &dev_attr_addr_context.attr,
  1890. &dev_attr_seq_idx.attr,
  1891. &dev_attr_seq_state.attr,
  1892. &dev_attr_seq_event.attr,
  1893. &dev_attr_seq_reset_event.attr,
  1894. &dev_attr_cntr_idx.attr,
  1895. &dev_attr_cntrldvr.attr,
  1896. &dev_attr_cntr_val.attr,
  1897. &dev_attr_cntr_ctrl.attr,
  1898. &dev_attr_res_idx.attr,
  1899. &dev_attr_res_ctrl.attr,
  1900. &dev_attr_ctxid_idx.attr,
  1901. &dev_attr_ctxid_pid.attr,
  1902. &dev_attr_ctxid_masks.attr,
  1903. &dev_attr_vmid_idx.attr,
  1904. &dev_attr_vmid_val.attr,
  1905. &dev_attr_vmid_masks.attr,
  1906. &dev_attr_cpu.attr,
  1907. NULL,
  1908. };
  1909. #define coresight_simple_func(name, offset) \
  1910. static ssize_t name##_show(struct device *_dev, \
  1911. struct device_attribute *attr, char *buf) \
  1912. { \
  1913. struct etmv4_drvdata *drvdata = dev_get_drvdata(_dev->parent); \
  1914. return scnprintf(buf, PAGE_SIZE, "0x%x\n", \
  1915. readl_relaxed(drvdata->base + offset)); \
  1916. } \
  1917. DEVICE_ATTR_RO(name)
  1918. coresight_simple_func(trcoslsr, TRCOSLSR);
  1919. coresight_simple_func(trcpdcr, TRCPDCR);
  1920. coresight_simple_func(trcpdsr, TRCPDSR);
  1921. coresight_simple_func(trclsr, TRCLSR);
  1922. coresight_simple_func(trcauthstatus, TRCAUTHSTATUS);
  1923. coresight_simple_func(trcdevid, TRCDEVID);
  1924. coresight_simple_func(trcdevtype, TRCDEVTYPE);
  1925. coresight_simple_func(trcpidr0, TRCPIDR0);
  1926. coresight_simple_func(trcpidr1, TRCPIDR1);
  1927. coresight_simple_func(trcpidr2, TRCPIDR2);
  1928. coresight_simple_func(trcpidr3, TRCPIDR3);
  1929. static struct attribute *coresight_etmv4_mgmt_attrs[] = {
  1930. &dev_attr_trcoslsr.attr,
  1931. &dev_attr_trcpdcr.attr,
  1932. &dev_attr_trcpdsr.attr,
  1933. &dev_attr_trclsr.attr,
  1934. &dev_attr_trcauthstatus.attr,
  1935. &dev_attr_trcdevid.attr,
  1936. &dev_attr_trcdevtype.attr,
  1937. &dev_attr_trcpidr0.attr,
  1938. &dev_attr_trcpidr1.attr,
  1939. &dev_attr_trcpidr2.attr,
  1940. &dev_attr_trcpidr3.attr,
  1941. NULL,
  1942. };
  1943. coresight_simple_func(trcidr0, TRCIDR0);
  1944. coresight_simple_func(trcidr1, TRCIDR1);
  1945. coresight_simple_func(trcidr2, TRCIDR2);
  1946. coresight_simple_func(trcidr3, TRCIDR3);
  1947. coresight_simple_func(trcidr4, TRCIDR4);
  1948. coresight_simple_func(trcidr5, TRCIDR5);
  1949. /* trcidr[6,7] are reserved */
  1950. coresight_simple_func(trcidr8, TRCIDR8);
  1951. coresight_simple_func(trcidr9, TRCIDR9);
  1952. coresight_simple_func(trcidr10, TRCIDR10);
  1953. coresight_simple_func(trcidr11, TRCIDR11);
  1954. coresight_simple_func(trcidr12, TRCIDR12);
  1955. coresight_simple_func(trcidr13, TRCIDR13);
  1956. static struct attribute *coresight_etmv4_trcidr_attrs[] = {
  1957. &dev_attr_trcidr0.attr,
  1958. &dev_attr_trcidr1.attr,
  1959. &dev_attr_trcidr2.attr,
  1960. &dev_attr_trcidr3.attr,
  1961. &dev_attr_trcidr4.attr,
  1962. &dev_attr_trcidr5.attr,
  1963. /* trcidr[6,7] are reserved */
  1964. &dev_attr_trcidr8.attr,
  1965. &dev_attr_trcidr9.attr,
  1966. &dev_attr_trcidr10.attr,
  1967. &dev_attr_trcidr11.attr,
  1968. &dev_attr_trcidr12.attr,
  1969. &dev_attr_trcidr13.attr,
  1970. NULL,
  1971. };
  1972. static const struct attribute_group coresight_etmv4_group = {
  1973. .attrs = coresight_etmv4_attrs,
  1974. };
  1975. static const struct attribute_group coresight_etmv4_mgmt_group = {
  1976. .attrs = coresight_etmv4_mgmt_attrs,
  1977. .name = "mgmt",
  1978. };
  1979. static const struct attribute_group coresight_etmv4_trcidr_group = {
  1980. .attrs = coresight_etmv4_trcidr_attrs,
  1981. .name = "trcidr",
  1982. };
  1983. static const struct attribute_group *coresight_etmv4_groups[] = {
  1984. &coresight_etmv4_group,
  1985. &coresight_etmv4_mgmt_group,
  1986. &coresight_etmv4_trcidr_group,
  1987. NULL,
  1988. };
  1989. static void etm4_init_arch_data(void *info)
  1990. {
  1991. u32 etmidr0;
  1992. u32 etmidr1;
  1993. u32 etmidr2;
  1994. u32 etmidr3;
  1995. u32 etmidr4;
  1996. u32 etmidr5;
  1997. struct etmv4_drvdata *drvdata = info;
  1998. CS_UNLOCK(drvdata->base);
  1999. /* find all capabilities of the tracing unit */
  2000. etmidr0 = readl_relaxed(drvdata->base + TRCIDR0);
  2001. /* INSTP0, bits[2:1] P0 tracing support field */
  2002. if (BMVAL(etmidr0, 1, 1) && BMVAL(etmidr0, 2, 2))
  2003. drvdata->instrp0 = true;
  2004. else
  2005. drvdata->instrp0 = false;
  2006. /* TRCBB, bit[5] Branch broadcast tracing support bit */
  2007. if (BMVAL(etmidr0, 5, 5))
  2008. drvdata->trcbb = true;
  2009. else
  2010. drvdata->trcbb = false;
  2011. /* TRCCOND, bit[6] Conditional instruction tracing support bit */
  2012. if (BMVAL(etmidr0, 6, 6))
  2013. drvdata->trccond = true;
  2014. else
  2015. drvdata->trccond = false;
  2016. /* TRCCCI, bit[7] Cycle counting instruction bit */
  2017. if (BMVAL(etmidr0, 7, 7))
  2018. drvdata->trccci = true;
  2019. else
  2020. drvdata->trccci = false;
  2021. /* RETSTACK, bit[9] Return stack bit */
  2022. if (BMVAL(etmidr0, 9, 9))
  2023. drvdata->retstack = true;
  2024. else
  2025. drvdata->retstack = false;
  2026. /* NUMEVENT, bits[11:10] Number of events field */
  2027. drvdata->nr_event = BMVAL(etmidr0, 10, 11);
  2028. /* QSUPP, bits[16:15] Q element support field */
  2029. drvdata->q_support = BMVAL(etmidr0, 15, 16);
  2030. /* TSSIZE, bits[28:24] Global timestamp size field */
  2031. drvdata->ts_size = BMVAL(etmidr0, 24, 28);
  2032. /* base architecture of trace unit */
  2033. etmidr1 = readl_relaxed(drvdata->base + TRCIDR1);
  2034. /*
  2035. * TRCARCHMIN, bits[7:4] architecture the minor version number
  2036. * TRCARCHMAJ, bits[11:8] architecture major versin number
  2037. */
  2038. drvdata->arch = BMVAL(etmidr1, 4, 11);
  2039. /* maximum size of resources */
  2040. etmidr2 = readl_relaxed(drvdata->base + TRCIDR2);
  2041. /* CIDSIZE, bits[9:5] Indicates the Context ID size */
  2042. drvdata->ctxid_size = BMVAL(etmidr2, 5, 9);
  2043. /* VMIDSIZE, bits[14:10] Indicates the VMID size */
  2044. drvdata->vmid_size = BMVAL(etmidr2, 10, 14);
  2045. /* CCSIZE, bits[28:25] size of the cycle counter in bits minus 12 */
  2046. drvdata->ccsize = BMVAL(etmidr2, 25, 28);
  2047. etmidr3 = readl_relaxed(drvdata->base + TRCIDR3);
  2048. /* CCITMIN, bits[11:0] minimum threshold value that can be programmed */
  2049. drvdata->ccitmin = BMVAL(etmidr3, 0, 11);
  2050. /* EXLEVEL_S, bits[19:16] Secure state instruction tracing */
  2051. drvdata->s_ex_level = BMVAL(etmidr3, 16, 19);
  2052. /* EXLEVEL_NS, bits[23:20] Non-secure state instruction tracing */
  2053. drvdata->ns_ex_level = BMVAL(etmidr3, 20, 23);
  2054. /*
  2055. * TRCERR, bit[24] whether a trace unit can trace a
  2056. * system error exception.
  2057. */
  2058. if (BMVAL(etmidr3, 24, 24))
  2059. drvdata->trc_error = true;
  2060. else
  2061. drvdata->trc_error = false;
  2062. /* SYNCPR, bit[25] implementation has a fixed synchronization period? */
  2063. if (BMVAL(etmidr3, 25, 25))
  2064. drvdata->syncpr = true;
  2065. else
  2066. drvdata->syncpr = false;
  2067. /* STALLCTL, bit[26] is stall control implemented? */
  2068. if (BMVAL(etmidr3, 26, 26))
  2069. drvdata->stallctl = true;
  2070. else
  2071. drvdata->stallctl = false;
  2072. /* SYSSTALL, bit[27] implementation can support stall control? */
  2073. if (BMVAL(etmidr3, 27, 27))
  2074. drvdata->sysstall = true;
  2075. else
  2076. drvdata->sysstall = false;
  2077. /* NUMPROC, bits[30:28] the number of PEs available for tracing */
  2078. drvdata->nr_pe = BMVAL(etmidr3, 28, 30);
  2079. /* NOOVERFLOW, bit[31] is trace overflow prevention supported */
  2080. if (BMVAL(etmidr3, 31, 31))
  2081. drvdata->nooverflow = true;
  2082. else
  2083. drvdata->nooverflow = false;
  2084. /* number of resources trace unit supports */
  2085. etmidr4 = readl_relaxed(drvdata->base + TRCIDR4);
  2086. /* NUMACPAIRS, bits[0:3] number of addr comparator pairs for tracing */
  2087. drvdata->nr_addr_cmp = BMVAL(etmidr4, 0, 3);
  2088. /* NUMPC, bits[15:12] number of PE comparator inputs for tracing */
  2089. drvdata->nr_pe_cmp = BMVAL(etmidr4, 12, 15);
  2090. /*
  2091. * NUMRSPAIR, bits[19:16]
  2092. * The number of resource pairs conveyed by the HW starts at 0, i.e a
  2093. * value of 0x0 indicate 1 resource pair, 0x1 indicate two and so on.
  2094. * As such add 1 to the value of NUMRSPAIR for a better representation.
  2095. */
  2096. drvdata->nr_resource = BMVAL(etmidr4, 16, 19) + 1;
  2097. /*
  2098. * NUMSSCC, bits[23:20] the number of single-shot
  2099. * comparator control for tracing
  2100. */
  2101. drvdata->nr_ss_cmp = BMVAL(etmidr4, 20, 23);
  2102. /* NUMCIDC, bits[27:24] number of Context ID comparators for tracing */
  2103. drvdata->numcidc = BMVAL(etmidr4, 24, 27);
  2104. /* NUMVMIDC, bits[31:28] number of VMID comparators for tracing */
  2105. drvdata->numvmidc = BMVAL(etmidr4, 28, 31);
  2106. etmidr5 = readl_relaxed(drvdata->base + TRCIDR5);
  2107. /* NUMEXTIN, bits[8:0] number of external inputs implemented */
  2108. drvdata->nr_ext_inp = BMVAL(etmidr5, 0, 8);
  2109. /* TRACEIDSIZE, bits[21:16] indicates the trace ID width */
  2110. drvdata->trcid_size = BMVAL(etmidr5, 16, 21);
  2111. /* ATBTRIG, bit[22] implementation can support ATB triggers? */
  2112. if (BMVAL(etmidr5, 22, 22))
  2113. drvdata->atbtrig = true;
  2114. else
  2115. drvdata->atbtrig = false;
  2116. /*
  2117. * LPOVERRIDE, bit[23] implementation supports
  2118. * low-power state override
  2119. */
  2120. if (BMVAL(etmidr5, 23, 23))
  2121. drvdata->lpoverride = true;
  2122. else
  2123. drvdata->lpoverride = false;
  2124. /* NUMSEQSTATE, bits[27:25] number of sequencer states implemented */
  2125. drvdata->nrseqstate = BMVAL(etmidr5, 25, 27);
  2126. /* NUMCNTR, bits[30:28] number of counters available for tracing */
  2127. drvdata->nr_cntr = BMVAL(etmidr5, 28, 30);
  2128. CS_LOCK(drvdata->base);
  2129. }
  2130. static void etm4_init_default_data(struct etmv4_drvdata *drvdata)
  2131. {
  2132. int i;
  2133. drvdata->pe_sel = 0x0;
  2134. drvdata->cfg = (ETMv4_MODE_CTXID | ETM_MODE_VMID |
  2135. ETMv4_MODE_TIMESTAMP | ETM_MODE_RETURNSTACK);
  2136. /* disable all events tracing */
  2137. drvdata->eventctrl0 = 0x0;
  2138. drvdata->eventctrl1 = 0x0;
  2139. /* disable stalling */
  2140. drvdata->stall_ctrl = 0x0;
  2141. /* disable timestamp event */
  2142. drvdata->ts_ctrl = 0x0;
  2143. /* enable trace synchronization every 4096 bytes for trace */
  2144. if (drvdata->syncpr == false)
  2145. drvdata->syncfreq = 0xC;
  2146. /*
  2147. * enable viewInst to trace everything with start-stop logic in
  2148. * started state
  2149. */
  2150. drvdata->vinst_ctrl |= BIT(0);
  2151. /* set initial state of start-stop logic */
  2152. if (drvdata->nr_addr_cmp)
  2153. drvdata->vinst_ctrl |= BIT(9);
  2154. /* no address range filtering for ViewInst */
  2155. drvdata->viiectlr = 0x0;
  2156. /* no start-stop filtering for ViewInst */
  2157. drvdata->vissctlr = 0x0;
  2158. /* disable seq events */
  2159. for (i = 0; i < drvdata->nrseqstate-1; i++)
  2160. drvdata->seq_ctrl[i] = 0x0;
  2161. drvdata->seq_rst = 0x0;
  2162. drvdata->seq_state = 0x0;
  2163. /* disable external input events */
  2164. drvdata->ext_inp = 0x0;
  2165. for (i = 0; i < drvdata->nr_cntr; i++) {
  2166. drvdata->cntrldvr[i] = 0x0;
  2167. drvdata->cntr_ctrl[i] = 0x0;
  2168. drvdata->cntr_val[i] = 0x0;
  2169. }
  2170. /* Resource selector pair 0 is always implemented and reserved */
  2171. drvdata->res_idx = 0x2;
  2172. for (i = 2; i < drvdata->nr_resource * 2; i++)
  2173. drvdata->res_ctrl[i] = 0x0;
  2174. for (i = 0; i < drvdata->nr_ss_cmp; i++) {
  2175. drvdata->ss_ctrl[i] = 0x0;
  2176. drvdata->ss_pe_cmp[i] = 0x0;
  2177. }
  2178. if (drvdata->nr_addr_cmp >= 1) {
  2179. drvdata->addr_val[0] = (unsigned long)_stext;
  2180. drvdata->addr_val[1] = (unsigned long)_etext;
  2181. drvdata->addr_type[0] = ETM_ADDR_TYPE_RANGE;
  2182. drvdata->addr_type[1] = ETM_ADDR_TYPE_RANGE;
  2183. }
  2184. for (i = 0; i < drvdata->numcidc; i++) {
  2185. drvdata->ctxid_pid[i] = 0x0;
  2186. drvdata->ctxid_vpid[i] = 0x0;
  2187. }
  2188. drvdata->ctxid_mask0 = 0x0;
  2189. drvdata->ctxid_mask1 = 0x0;
  2190. for (i = 0; i < drvdata->numvmidc; i++)
  2191. drvdata->vmid_val[i] = 0x0;
  2192. drvdata->vmid_mask0 = 0x0;
  2193. drvdata->vmid_mask1 = 0x0;
  2194. /*
  2195. * A trace ID value of 0 is invalid, so let's start at some
  2196. * random value that fits in 7 bits. ETMv3.x has 0x10 so let's
  2197. * start at 0x20.
  2198. */
  2199. drvdata->trcid = 0x20 + drvdata->cpu;
  2200. }
  2201. static int etm4_cpu_callback(struct notifier_block *nfb, unsigned long action,
  2202. void *hcpu)
  2203. {
  2204. unsigned int cpu = (unsigned long)hcpu;
  2205. if (!etmdrvdata[cpu])
  2206. goto out;
  2207. switch (action & (~CPU_TASKS_FROZEN)) {
  2208. case CPU_STARTING:
  2209. spin_lock(&etmdrvdata[cpu]->spinlock);
  2210. if (!etmdrvdata[cpu]->os_unlock) {
  2211. etm4_os_unlock(etmdrvdata[cpu]);
  2212. etmdrvdata[cpu]->os_unlock = true;
  2213. }
  2214. if (etmdrvdata[cpu]->enable)
  2215. etm4_enable_hw(etmdrvdata[cpu]);
  2216. spin_unlock(&etmdrvdata[cpu]->spinlock);
  2217. break;
  2218. case CPU_ONLINE:
  2219. if (etmdrvdata[cpu]->boot_enable &&
  2220. !etmdrvdata[cpu]->sticky_enable)
  2221. coresight_enable(etmdrvdata[cpu]->csdev);
  2222. break;
  2223. case CPU_DYING:
  2224. spin_lock(&etmdrvdata[cpu]->spinlock);
  2225. if (etmdrvdata[cpu]->enable)
  2226. etm4_disable_hw(etmdrvdata[cpu]);
  2227. spin_unlock(&etmdrvdata[cpu]->spinlock);
  2228. break;
  2229. }
  2230. out:
  2231. return NOTIFY_OK;
  2232. }
  2233. static struct notifier_block etm4_cpu_notifier = {
  2234. .notifier_call = etm4_cpu_callback,
  2235. };
  2236. static int etm4_probe(struct amba_device *adev, const struct amba_id *id)
  2237. {
  2238. int ret;
  2239. void __iomem *base;
  2240. struct device *dev = &adev->dev;
  2241. struct coresight_platform_data *pdata = NULL;
  2242. struct etmv4_drvdata *drvdata;
  2243. struct resource *res = &adev->res;
  2244. struct coresight_desc *desc;
  2245. struct device_node *np = adev->dev.of_node;
  2246. desc = devm_kzalloc(dev, sizeof(*desc), GFP_KERNEL);
  2247. if (!desc)
  2248. return -ENOMEM;
  2249. drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
  2250. if (!drvdata)
  2251. return -ENOMEM;
  2252. if (np) {
  2253. pdata = of_get_coresight_platform_data(dev, np);
  2254. if (IS_ERR(pdata))
  2255. return PTR_ERR(pdata);
  2256. adev->dev.platform_data = pdata;
  2257. }
  2258. drvdata->dev = &adev->dev;
  2259. dev_set_drvdata(dev, drvdata);
  2260. /* Validity for the resource is already checked by the AMBA core */
  2261. base = devm_ioremap_resource(dev, res);
  2262. if (IS_ERR(base))
  2263. return PTR_ERR(base);
  2264. drvdata->base = base;
  2265. spin_lock_init(&drvdata->spinlock);
  2266. drvdata->cpu = pdata ? pdata->cpu : 0;
  2267. get_online_cpus();
  2268. etmdrvdata[drvdata->cpu] = drvdata;
  2269. if (!smp_call_function_single(drvdata->cpu, etm4_os_unlock, drvdata, 1))
  2270. drvdata->os_unlock = true;
  2271. if (smp_call_function_single(drvdata->cpu,
  2272. etm4_init_arch_data, drvdata, 1))
  2273. dev_err(dev, "ETM arch init failed\n");
  2274. if (!etm4_count++)
  2275. register_hotcpu_notifier(&etm4_cpu_notifier);
  2276. put_online_cpus();
  2277. if (etm4_arch_supported(drvdata->arch) == false) {
  2278. ret = -EINVAL;
  2279. goto err_arch_supported;
  2280. }
  2281. etm4_init_default_data(drvdata);
  2282. pm_runtime_put(&adev->dev);
  2283. desc->type = CORESIGHT_DEV_TYPE_SOURCE;
  2284. desc->subtype.source_subtype = CORESIGHT_DEV_SUBTYPE_SOURCE_PROC;
  2285. desc->ops = &etm4_cs_ops;
  2286. desc->pdata = pdata;
  2287. desc->dev = dev;
  2288. desc->groups = coresight_etmv4_groups;
  2289. drvdata->csdev = coresight_register(desc);
  2290. if (IS_ERR(drvdata->csdev)) {
  2291. ret = PTR_ERR(drvdata->csdev);
  2292. goto err_coresight_register;
  2293. }
  2294. dev_info(dev, "%s initialized\n", (char *)id->data);
  2295. if (boot_enable) {
  2296. coresight_enable(drvdata->csdev);
  2297. drvdata->boot_enable = true;
  2298. }
  2299. return 0;
  2300. err_arch_supported:
  2301. pm_runtime_put(&adev->dev);
  2302. err_coresight_register:
  2303. if (--etm4_count == 0)
  2304. unregister_hotcpu_notifier(&etm4_cpu_notifier);
  2305. return ret;
  2306. }
  2307. static int etm4_remove(struct amba_device *adev)
  2308. {
  2309. struct etmv4_drvdata *drvdata = amba_get_drvdata(adev);
  2310. coresight_unregister(drvdata->csdev);
  2311. if (--etm4_count == 0)
  2312. unregister_hotcpu_notifier(&etm4_cpu_notifier);
  2313. return 0;
  2314. }
  2315. static struct amba_id etm4_ids[] = {
  2316. { /* ETM 4.0 - Qualcomm */
  2317. .id = 0x0003b95d,
  2318. .mask = 0x0003ffff,
  2319. .data = "ETM 4.0",
  2320. },
  2321. { /* ETM 4.0 - Juno board */
  2322. .id = 0x000bb95e,
  2323. .mask = 0x000fffff,
  2324. .data = "ETM 4.0",
  2325. },
  2326. { 0, 0},
  2327. };
  2328. static struct amba_driver etm4x_driver = {
  2329. .drv = {
  2330. .name = "coresight-etm4x",
  2331. },
  2332. .probe = etm4_probe,
  2333. .remove = etm4_remove,
  2334. .id_table = etm4_ids,
  2335. };
  2336. module_amba_driver(etm4x_driver);