coresight-etm4x.c 70 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711
  1. /* Copyright (c) 2014, The Linux Foundation. All rights reserved.
  2. *
  3. * This program is free software; you can redistribute it and/or modify
  4. * it under the terms of the GNU General Public License version 2 and
  5. * only version 2 as published by the Free Software Foundation.
  6. *
  7. * This program is distributed in the hope that it will be useful,
  8. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  9. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  10. * GNU General Public License for more details.
  11. */
  12. #include <linux/kernel.h>
  13. #include <linux/moduleparam.h>
  14. #include <linux/init.h>
  15. #include <linux/types.h>
  16. #include <linux/device.h>
  17. #include <linux/module.h>
  18. #include <linux/io.h>
  19. #include <linux/err.h>
  20. #include <linux/fs.h>
  21. #include <linux/slab.h>
  22. #include <linux/delay.h>
  23. #include <linux/smp.h>
  24. #include <linux/sysfs.h>
  25. #include <linux/stat.h>
  26. #include <linux/clk.h>
  27. #include <linux/cpu.h>
  28. #include <linux/coresight.h>
  29. #include <linux/pm_wakeup.h>
  30. #include <linux/amba/bus.h>
  31. #include <linux/seq_file.h>
  32. #include <linux/uaccess.h>
  33. #include <linux/pm_runtime.h>
  34. #include <asm/sections.h>
  35. #include "coresight-etm4x.h"
  36. static int boot_enable;
  37. module_param_named(boot_enable, boot_enable, int, S_IRUGO);
  38. /* The number of ETMv4 currently registered */
  39. static int etm4_count;
  40. static struct etmv4_drvdata *etmdrvdata[NR_CPUS];
  41. static void etm4_os_unlock(void *info)
  42. {
  43. struct etmv4_drvdata *drvdata = (struct etmv4_drvdata *)info;
  44. /* Writing any value to ETMOSLAR unlocks the trace registers */
  45. writel_relaxed(0x0, drvdata->base + TRCOSLAR);
  46. isb();
  47. }
  48. static bool etm4_arch_supported(u8 arch)
  49. {
  50. switch (arch) {
  51. case ETM_ARCH_V4:
  52. break;
  53. default:
  54. return false;
  55. }
  56. return true;
  57. }
  58. static int etm4_trace_id(struct coresight_device *csdev)
  59. {
  60. struct etmv4_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
  61. unsigned long flags;
  62. int trace_id = -1;
  63. if (!drvdata->enable)
  64. return drvdata->trcid;
  65. pm_runtime_get_sync(drvdata->dev);
  66. spin_lock_irqsave(&drvdata->spinlock, flags);
  67. CS_UNLOCK(drvdata->base);
  68. trace_id = readl_relaxed(drvdata->base + TRCTRACEIDR);
  69. trace_id &= ETM_TRACEID_MASK;
  70. CS_LOCK(drvdata->base);
  71. spin_unlock_irqrestore(&drvdata->spinlock, flags);
  72. pm_runtime_put(drvdata->dev);
  73. return trace_id;
  74. }
  75. static void etm4_enable_hw(void *info)
  76. {
  77. int i;
  78. struct etmv4_drvdata *drvdata = info;
  79. CS_UNLOCK(drvdata->base);
  80. etm4_os_unlock(drvdata);
  81. /* Disable the trace unit before programming trace registers */
  82. writel_relaxed(0, drvdata->base + TRCPRGCTLR);
  83. /* wait for TRCSTATR.IDLE to go up */
  84. if (coresight_timeout(drvdata->base, TRCSTATR, TRCSTATR_IDLE_BIT, 1))
  85. dev_err(drvdata->dev,
  86. "timeout observed when probing at offset %#x\n",
  87. TRCSTATR);
  88. writel_relaxed(drvdata->pe_sel, drvdata->base + TRCPROCSELR);
  89. writel_relaxed(drvdata->cfg, drvdata->base + TRCCONFIGR);
  90. /* nothing specific implemented */
  91. writel_relaxed(0x0, drvdata->base + TRCAUXCTLR);
  92. writel_relaxed(drvdata->eventctrl0, drvdata->base + TRCEVENTCTL0R);
  93. writel_relaxed(drvdata->eventctrl1, drvdata->base + TRCEVENTCTL1R);
  94. writel_relaxed(drvdata->stall_ctrl, drvdata->base + TRCSTALLCTLR);
  95. writel_relaxed(drvdata->ts_ctrl, drvdata->base + TRCTSCTLR);
  96. writel_relaxed(drvdata->syncfreq, drvdata->base + TRCSYNCPR);
  97. writel_relaxed(drvdata->ccctlr, drvdata->base + TRCCCCTLR);
  98. writel_relaxed(drvdata->bb_ctrl, drvdata->base + TRCBBCTLR);
  99. writel_relaxed(drvdata->trcid, drvdata->base + TRCTRACEIDR);
  100. writel_relaxed(drvdata->vinst_ctrl, drvdata->base + TRCVICTLR);
  101. writel_relaxed(drvdata->viiectlr, drvdata->base + TRCVIIECTLR);
  102. writel_relaxed(drvdata->vissctlr,
  103. drvdata->base + TRCVISSCTLR);
  104. writel_relaxed(drvdata->vipcssctlr,
  105. drvdata->base + TRCVIPCSSCTLR);
  106. for (i = 0; i < drvdata->nrseqstate - 1; i++)
  107. writel_relaxed(drvdata->seq_ctrl[i],
  108. drvdata->base + TRCSEQEVRn(i));
  109. writel_relaxed(drvdata->seq_rst, drvdata->base + TRCSEQRSTEVR);
  110. writel_relaxed(drvdata->seq_state, drvdata->base + TRCSEQSTR);
  111. writel_relaxed(drvdata->ext_inp, drvdata->base + TRCEXTINSELR);
  112. for (i = 0; i < drvdata->nr_cntr; i++) {
  113. writel_relaxed(drvdata->cntrldvr[i],
  114. drvdata->base + TRCCNTRLDVRn(i));
  115. writel_relaxed(drvdata->cntr_ctrl[i],
  116. drvdata->base + TRCCNTCTLRn(i));
  117. writel_relaxed(drvdata->cntr_val[i],
  118. drvdata->base + TRCCNTVRn(i));
  119. }
  120. for (i = 0; i < drvdata->nr_resource; i++)
  121. writel_relaxed(drvdata->res_ctrl[i],
  122. drvdata->base + TRCRSCTLRn(i));
  123. for (i = 0; i < drvdata->nr_ss_cmp; i++) {
  124. writel_relaxed(drvdata->ss_ctrl[i],
  125. drvdata->base + TRCSSCCRn(i));
  126. writel_relaxed(drvdata->ss_status[i],
  127. drvdata->base + TRCSSCSRn(i));
  128. writel_relaxed(drvdata->ss_pe_cmp[i],
  129. drvdata->base + TRCSSPCICRn(i));
  130. }
  131. for (i = 0; i < drvdata->nr_addr_cmp; i++) {
  132. writeq_relaxed(drvdata->addr_val[i],
  133. drvdata->base + TRCACVRn(i));
  134. writeq_relaxed(drvdata->addr_acc[i],
  135. drvdata->base + TRCACATRn(i));
  136. }
  137. for (i = 0; i < drvdata->numcidc; i++)
  138. writeq_relaxed(drvdata->ctxid_pid[i],
  139. drvdata->base + TRCCIDCVRn(i));
  140. writel_relaxed(drvdata->ctxid_mask0, drvdata->base + TRCCIDCCTLR0);
  141. writel_relaxed(drvdata->ctxid_mask1, drvdata->base + TRCCIDCCTLR1);
  142. for (i = 0; i < drvdata->numvmidc; i++)
  143. writeq_relaxed(drvdata->vmid_val[i],
  144. drvdata->base + TRCVMIDCVRn(i));
  145. writel_relaxed(drvdata->vmid_mask0, drvdata->base + TRCVMIDCCTLR0);
  146. writel_relaxed(drvdata->vmid_mask1, drvdata->base + TRCVMIDCCTLR1);
  147. /* Enable the trace unit */
  148. writel_relaxed(1, drvdata->base + TRCPRGCTLR);
  149. /* wait for TRCSTATR.IDLE to go back down to '0' */
  150. if (coresight_timeout(drvdata->base, TRCSTATR, TRCSTATR_IDLE_BIT, 0))
  151. dev_err(drvdata->dev,
  152. "timeout observed when probing at offset %#x\n",
  153. TRCSTATR);
  154. CS_LOCK(drvdata->base);
  155. dev_dbg(drvdata->dev, "cpu: %d enable smp call done\n", drvdata->cpu);
  156. }
  157. static int etm4_enable(struct coresight_device *csdev)
  158. {
  159. struct etmv4_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
  160. int ret;
  161. pm_runtime_get_sync(drvdata->dev);
  162. spin_lock(&drvdata->spinlock);
  163. /*
  164. * Executing etm4_enable_hw on the cpu whose ETM is being enabled
  165. * ensures that register writes occur when cpu is powered.
  166. */
  167. ret = smp_call_function_single(drvdata->cpu,
  168. etm4_enable_hw, drvdata, 1);
  169. if (ret)
  170. goto err;
  171. drvdata->enable = true;
  172. drvdata->sticky_enable = true;
  173. spin_unlock(&drvdata->spinlock);
  174. dev_info(drvdata->dev, "ETM tracing enabled\n");
  175. return 0;
  176. err:
  177. spin_unlock(&drvdata->spinlock);
  178. pm_runtime_put(drvdata->dev);
  179. return ret;
  180. }
  181. static void etm4_disable_hw(void *info)
  182. {
  183. u32 control;
  184. struct etmv4_drvdata *drvdata = info;
  185. CS_UNLOCK(drvdata->base);
  186. control = readl_relaxed(drvdata->base + TRCPRGCTLR);
  187. /* EN, bit[0] Trace unit enable bit */
  188. control &= ~0x1;
  189. /* make sure everything completes before disabling */
  190. mb();
  191. isb();
  192. writel_relaxed(control, drvdata->base + TRCPRGCTLR);
  193. CS_LOCK(drvdata->base);
  194. dev_dbg(drvdata->dev, "cpu: %d disable smp call done\n", drvdata->cpu);
  195. }
  196. static void etm4_disable(struct coresight_device *csdev)
  197. {
  198. struct etmv4_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
  199. /*
  200. * Taking hotplug lock here protects from clocks getting disabled
  201. * with tracing being left on (crash scenario) if user disable occurs
  202. * after cpu online mask indicates the cpu is offline but before the
  203. * DYING hotplug callback is serviced by the ETM driver.
  204. */
  205. get_online_cpus();
  206. spin_lock(&drvdata->spinlock);
  207. /*
  208. * Executing etm4_disable_hw on the cpu whose ETM is being disabled
  209. * ensures that register writes occur when cpu is powered.
  210. */
  211. smp_call_function_single(drvdata->cpu, etm4_disable_hw, drvdata, 1);
  212. drvdata->enable = false;
  213. spin_unlock(&drvdata->spinlock);
  214. put_online_cpus();
  215. pm_runtime_put(drvdata->dev);
  216. dev_info(drvdata->dev, "ETM tracing disabled\n");
  217. }
  218. static const struct coresight_ops_source etm4_source_ops = {
  219. .trace_id = etm4_trace_id,
  220. .enable = etm4_enable,
  221. .disable = etm4_disable,
  222. };
  223. static const struct coresight_ops etm4_cs_ops = {
  224. .source_ops = &etm4_source_ops,
  225. };
  226. static int etm4_set_mode_exclude(struct etmv4_drvdata *drvdata, bool exclude)
  227. {
  228. u8 idx = drvdata->addr_idx;
  229. /*
  230. * TRCACATRn.TYPE bit[1:0]: type of comparison
  231. * the trace unit performs
  232. */
  233. if (BMVAL(drvdata->addr_acc[idx], 0, 1) == ETM_INSTR_ADDR) {
  234. if (idx % 2 != 0)
  235. return -EINVAL;
  236. /*
  237. * We are performing instruction address comparison. Set the
  238. * relevant bit of ViewInst Include/Exclude Control register
  239. * for corresponding address comparator pair.
  240. */
  241. if (drvdata->addr_type[idx] != ETM_ADDR_TYPE_RANGE ||
  242. drvdata->addr_type[idx + 1] != ETM_ADDR_TYPE_RANGE)
  243. return -EINVAL;
  244. if (exclude == true) {
  245. /*
  246. * Set exclude bit and unset the include bit
  247. * corresponding to comparator pair
  248. */
  249. drvdata->viiectlr |= BIT(idx / 2 + 16);
  250. drvdata->viiectlr &= ~BIT(idx / 2);
  251. } else {
  252. /*
  253. * Set include bit and unset exclude bit
  254. * corresponding to comparator pair
  255. */
  256. drvdata->viiectlr |= BIT(idx / 2);
  257. drvdata->viiectlr &= ~BIT(idx / 2 + 16);
  258. }
  259. }
  260. return 0;
  261. }
  262. static ssize_t nr_pe_cmp_show(struct device *dev,
  263. struct device_attribute *attr,
  264. char *buf)
  265. {
  266. unsigned long val;
  267. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  268. val = drvdata->nr_pe_cmp;
  269. return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
  270. }
  271. static DEVICE_ATTR_RO(nr_pe_cmp);
  272. static ssize_t nr_addr_cmp_show(struct device *dev,
  273. struct device_attribute *attr,
  274. char *buf)
  275. {
  276. unsigned long val;
  277. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  278. val = drvdata->nr_addr_cmp;
  279. return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
  280. }
  281. static DEVICE_ATTR_RO(nr_addr_cmp);
  282. static ssize_t nr_cntr_show(struct device *dev,
  283. struct device_attribute *attr,
  284. char *buf)
  285. {
  286. unsigned long val;
  287. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  288. val = drvdata->nr_cntr;
  289. return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
  290. }
  291. static DEVICE_ATTR_RO(nr_cntr);
  292. static ssize_t nr_ext_inp_show(struct device *dev,
  293. struct device_attribute *attr,
  294. char *buf)
  295. {
  296. unsigned long val;
  297. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  298. val = drvdata->nr_ext_inp;
  299. return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
  300. }
  301. static DEVICE_ATTR_RO(nr_ext_inp);
  302. static ssize_t numcidc_show(struct device *dev,
  303. struct device_attribute *attr,
  304. char *buf)
  305. {
  306. unsigned long val;
  307. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  308. val = drvdata->numcidc;
  309. return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
  310. }
  311. static DEVICE_ATTR_RO(numcidc);
  312. static ssize_t numvmidc_show(struct device *dev,
  313. struct device_attribute *attr,
  314. char *buf)
  315. {
  316. unsigned long val;
  317. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  318. val = drvdata->numvmidc;
  319. return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
  320. }
  321. static DEVICE_ATTR_RO(numvmidc);
  322. static ssize_t nrseqstate_show(struct device *dev,
  323. struct device_attribute *attr,
  324. char *buf)
  325. {
  326. unsigned long val;
  327. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  328. val = drvdata->nrseqstate;
  329. return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
  330. }
  331. static DEVICE_ATTR_RO(nrseqstate);
  332. static ssize_t nr_resource_show(struct device *dev,
  333. struct device_attribute *attr,
  334. char *buf)
  335. {
  336. unsigned long val;
  337. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  338. val = drvdata->nr_resource;
  339. return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
  340. }
  341. static DEVICE_ATTR_RO(nr_resource);
  342. static ssize_t nr_ss_cmp_show(struct device *dev,
  343. struct device_attribute *attr,
  344. char *buf)
  345. {
  346. unsigned long val;
  347. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  348. val = drvdata->nr_ss_cmp;
  349. return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
  350. }
  351. static DEVICE_ATTR_RO(nr_ss_cmp);
  352. static ssize_t reset_store(struct device *dev,
  353. struct device_attribute *attr,
  354. const char *buf, size_t size)
  355. {
  356. int i;
  357. unsigned long val;
  358. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  359. if (kstrtoul(buf, 16, &val))
  360. return -EINVAL;
  361. spin_lock(&drvdata->spinlock);
  362. if (val)
  363. drvdata->mode = 0x0;
  364. /* Disable data tracing: do not trace load and store data transfers */
  365. drvdata->mode &= ~(ETM_MODE_LOAD | ETM_MODE_STORE);
  366. drvdata->cfg &= ~(BIT(1) | BIT(2));
  367. /* Disable data value and data address tracing */
  368. drvdata->mode &= ~(ETM_MODE_DATA_TRACE_ADDR |
  369. ETM_MODE_DATA_TRACE_VAL);
  370. drvdata->cfg &= ~(BIT(16) | BIT(17));
  371. /* Disable all events tracing */
  372. drvdata->eventctrl0 = 0x0;
  373. drvdata->eventctrl1 = 0x0;
  374. /* Disable timestamp event */
  375. drvdata->ts_ctrl = 0x0;
  376. /* Disable stalling */
  377. drvdata->stall_ctrl = 0x0;
  378. /* Reset trace synchronization period to 2^8 = 256 bytes*/
  379. if (drvdata->syncpr == false)
  380. drvdata->syncfreq = 0x8;
  381. /*
  382. * Enable ViewInst to trace everything with start-stop logic in
  383. * started state. ARM recommends start-stop logic is set before
  384. * each trace run.
  385. */
  386. drvdata->vinst_ctrl |= BIT(0);
  387. if (drvdata->nr_addr_cmp == true) {
  388. drvdata->mode |= ETM_MODE_VIEWINST_STARTSTOP;
  389. /* SSSTATUS, bit[9] */
  390. drvdata->vinst_ctrl |= BIT(9);
  391. }
  392. /* No address range filtering for ViewInst */
  393. drvdata->viiectlr = 0x0;
  394. /* No start-stop filtering for ViewInst */
  395. drvdata->vissctlr = 0x0;
  396. /* Disable seq events */
  397. for (i = 0; i < drvdata->nrseqstate-1; i++)
  398. drvdata->seq_ctrl[i] = 0x0;
  399. drvdata->seq_rst = 0x0;
  400. drvdata->seq_state = 0x0;
  401. /* Disable external input events */
  402. drvdata->ext_inp = 0x0;
  403. drvdata->cntr_idx = 0x0;
  404. for (i = 0; i < drvdata->nr_cntr; i++) {
  405. drvdata->cntrldvr[i] = 0x0;
  406. drvdata->cntr_ctrl[i] = 0x0;
  407. drvdata->cntr_val[i] = 0x0;
  408. }
  409. drvdata->res_idx = 0x0;
  410. for (i = 0; i < drvdata->nr_resource; i++)
  411. drvdata->res_ctrl[i] = 0x0;
  412. for (i = 0; i < drvdata->nr_ss_cmp; i++) {
  413. drvdata->ss_ctrl[i] = 0x0;
  414. drvdata->ss_pe_cmp[i] = 0x0;
  415. }
  416. drvdata->addr_idx = 0x0;
  417. for (i = 0; i < drvdata->nr_addr_cmp * 2; i++) {
  418. drvdata->addr_val[i] = 0x0;
  419. drvdata->addr_acc[i] = 0x0;
  420. drvdata->addr_type[i] = ETM_ADDR_TYPE_NONE;
  421. }
  422. drvdata->ctxid_idx = 0x0;
  423. for (i = 0; i < drvdata->numcidc; i++) {
  424. drvdata->ctxid_pid[i] = 0x0;
  425. drvdata->ctxid_vpid[i] = 0x0;
  426. }
  427. drvdata->ctxid_mask0 = 0x0;
  428. drvdata->ctxid_mask1 = 0x0;
  429. drvdata->vmid_idx = 0x0;
  430. for (i = 0; i < drvdata->numvmidc; i++)
  431. drvdata->vmid_val[i] = 0x0;
  432. drvdata->vmid_mask0 = 0x0;
  433. drvdata->vmid_mask1 = 0x0;
  434. drvdata->trcid = drvdata->cpu + 1;
  435. spin_unlock(&drvdata->spinlock);
  436. return size;
  437. }
  438. static DEVICE_ATTR_WO(reset);
  439. static ssize_t mode_show(struct device *dev,
  440. struct device_attribute *attr,
  441. char *buf)
  442. {
  443. unsigned long val;
  444. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  445. val = drvdata->mode;
  446. return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
  447. }
  448. static ssize_t mode_store(struct device *dev,
  449. struct device_attribute *attr,
  450. const char *buf, size_t size)
  451. {
  452. unsigned long val, mode;
  453. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  454. if (kstrtoul(buf, 16, &val))
  455. return -EINVAL;
  456. spin_lock(&drvdata->spinlock);
  457. drvdata->mode = val & ETMv4_MODE_ALL;
  458. if (drvdata->mode & ETM_MODE_EXCLUDE)
  459. etm4_set_mode_exclude(drvdata, true);
  460. else
  461. etm4_set_mode_exclude(drvdata, false);
  462. if (drvdata->instrp0 == true) {
  463. /* start by clearing instruction P0 field */
  464. drvdata->cfg &= ~(BIT(1) | BIT(2));
  465. if (drvdata->mode & ETM_MODE_LOAD)
  466. /* 0b01 Trace load instructions as P0 instructions */
  467. drvdata->cfg |= BIT(1);
  468. if (drvdata->mode & ETM_MODE_STORE)
  469. /* 0b10 Trace store instructions as P0 instructions */
  470. drvdata->cfg |= BIT(2);
  471. if (drvdata->mode & ETM_MODE_LOAD_STORE)
  472. /*
  473. * 0b11 Trace load and store instructions
  474. * as P0 instructions
  475. */
  476. drvdata->cfg |= BIT(1) | BIT(2);
  477. }
  478. /* bit[3], Branch broadcast mode */
  479. if ((drvdata->mode & ETM_MODE_BB) && (drvdata->trcbb == true))
  480. drvdata->cfg |= BIT(3);
  481. else
  482. drvdata->cfg &= ~BIT(3);
  483. /* bit[4], Cycle counting instruction trace bit */
  484. if ((drvdata->mode & ETMv4_MODE_CYCACC) &&
  485. (drvdata->trccci == true))
  486. drvdata->cfg |= BIT(4);
  487. else
  488. drvdata->cfg &= ~BIT(4);
  489. /* bit[6], Context ID tracing bit */
  490. if ((drvdata->mode & ETMv4_MODE_CTXID) && (drvdata->ctxid_size))
  491. drvdata->cfg |= BIT(6);
  492. else
  493. drvdata->cfg &= ~BIT(6);
  494. if ((drvdata->mode & ETM_MODE_VMID) && (drvdata->vmid_size))
  495. drvdata->cfg |= BIT(7);
  496. else
  497. drvdata->cfg &= ~BIT(7);
  498. /* bits[10:8], Conditional instruction tracing bit */
  499. mode = ETM_MODE_COND(drvdata->mode);
  500. if (drvdata->trccond == true) {
  501. drvdata->cfg &= ~(BIT(8) | BIT(9) | BIT(10));
  502. drvdata->cfg |= mode << 8;
  503. }
  504. /* bit[11], Global timestamp tracing bit */
  505. if ((drvdata->mode & ETMv4_MODE_TIMESTAMP) && (drvdata->ts_size))
  506. drvdata->cfg |= BIT(11);
  507. else
  508. drvdata->cfg &= ~BIT(11);
  509. /* bit[12], Return stack enable bit */
  510. if ((drvdata->mode & ETM_MODE_RETURNSTACK) &&
  511. (drvdata->retstack == true))
  512. drvdata->cfg |= BIT(12);
  513. else
  514. drvdata->cfg &= ~BIT(12);
  515. /* bits[14:13], Q element enable field */
  516. mode = ETM_MODE_QELEM(drvdata->mode);
  517. /* start by clearing QE bits */
  518. drvdata->cfg &= ~(BIT(13) | BIT(14));
  519. /* if supported, Q elements with instruction counts are enabled */
  520. if ((mode & BIT(0)) && (drvdata->q_support & BIT(0)))
  521. drvdata->cfg |= BIT(13);
  522. /*
  523. * if supported, Q elements with and without instruction
  524. * counts are enabled
  525. */
  526. if ((mode & BIT(1)) && (drvdata->q_support & BIT(1)))
  527. drvdata->cfg |= BIT(14);
  528. /* bit[11], AMBA Trace Bus (ATB) trigger enable bit */
  529. if ((drvdata->mode & ETM_MODE_ATB_TRIGGER) &&
  530. (drvdata->atbtrig == true))
  531. drvdata->eventctrl1 |= BIT(11);
  532. else
  533. drvdata->eventctrl1 &= ~BIT(11);
  534. /* bit[12], Low-power state behavior override bit */
  535. if ((drvdata->mode & ETM_MODE_LPOVERRIDE) &&
  536. (drvdata->lpoverride == true))
  537. drvdata->eventctrl1 |= BIT(12);
  538. else
  539. drvdata->eventctrl1 &= ~BIT(12);
  540. /* bit[8], Instruction stall bit */
  541. if (drvdata->mode & ETM_MODE_ISTALL_EN)
  542. drvdata->stall_ctrl |= BIT(8);
  543. else
  544. drvdata->stall_ctrl &= ~BIT(8);
  545. /* bit[10], Prioritize instruction trace bit */
  546. if (drvdata->mode & ETM_MODE_INSTPRIO)
  547. drvdata->stall_ctrl |= BIT(10);
  548. else
  549. drvdata->stall_ctrl &= ~BIT(10);
  550. /* bit[13], Trace overflow prevention bit */
  551. if ((drvdata->mode & ETM_MODE_NOOVERFLOW) &&
  552. (drvdata->nooverflow == true))
  553. drvdata->stall_ctrl |= BIT(13);
  554. else
  555. drvdata->stall_ctrl &= ~BIT(13);
  556. /* bit[9] Start/stop logic control bit */
  557. if (drvdata->mode & ETM_MODE_VIEWINST_STARTSTOP)
  558. drvdata->vinst_ctrl |= BIT(9);
  559. else
  560. drvdata->vinst_ctrl &= ~BIT(9);
  561. /* bit[10], Whether a trace unit must trace a Reset exception */
  562. if (drvdata->mode & ETM_MODE_TRACE_RESET)
  563. drvdata->vinst_ctrl |= BIT(10);
  564. else
  565. drvdata->vinst_ctrl &= ~BIT(10);
  566. /* bit[11], Whether a trace unit must trace a system error exception */
  567. if ((drvdata->mode & ETM_MODE_TRACE_ERR) &&
  568. (drvdata->trc_error == true))
  569. drvdata->vinst_ctrl |= BIT(11);
  570. else
  571. drvdata->vinst_ctrl &= ~BIT(11);
  572. spin_unlock(&drvdata->spinlock);
  573. return size;
  574. }
  575. static DEVICE_ATTR_RW(mode);
  576. static ssize_t pe_show(struct device *dev,
  577. struct device_attribute *attr,
  578. char *buf)
  579. {
  580. unsigned long val;
  581. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  582. val = drvdata->pe_sel;
  583. return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
  584. }
  585. static ssize_t pe_store(struct device *dev,
  586. struct device_attribute *attr,
  587. const char *buf, size_t size)
  588. {
  589. unsigned long val;
  590. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  591. if (kstrtoul(buf, 16, &val))
  592. return -EINVAL;
  593. spin_lock(&drvdata->spinlock);
  594. if (val > drvdata->nr_pe) {
  595. spin_unlock(&drvdata->spinlock);
  596. return -EINVAL;
  597. }
  598. drvdata->pe_sel = val;
  599. spin_unlock(&drvdata->spinlock);
  600. return size;
  601. }
  602. static DEVICE_ATTR_RW(pe);
  603. static ssize_t event_show(struct device *dev,
  604. struct device_attribute *attr,
  605. char *buf)
  606. {
  607. unsigned long val;
  608. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  609. val = drvdata->eventctrl0;
  610. return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
  611. }
  612. static ssize_t event_store(struct device *dev,
  613. struct device_attribute *attr,
  614. const char *buf, size_t size)
  615. {
  616. unsigned long val;
  617. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  618. if (kstrtoul(buf, 16, &val))
  619. return -EINVAL;
  620. spin_lock(&drvdata->spinlock);
  621. switch (drvdata->nr_event) {
  622. case 0x0:
  623. /* EVENT0, bits[7:0] */
  624. drvdata->eventctrl0 = val & 0xFF;
  625. break;
  626. case 0x1:
  627. /* EVENT1, bits[15:8] */
  628. drvdata->eventctrl0 = val & 0xFFFF;
  629. break;
  630. case 0x2:
  631. /* EVENT2, bits[23:16] */
  632. drvdata->eventctrl0 = val & 0xFFFFFF;
  633. break;
  634. case 0x3:
  635. /* EVENT3, bits[31:24] */
  636. drvdata->eventctrl0 = val;
  637. break;
  638. default:
  639. break;
  640. }
  641. spin_unlock(&drvdata->spinlock);
  642. return size;
  643. }
  644. static DEVICE_ATTR_RW(event);
  645. static ssize_t event_instren_show(struct device *dev,
  646. struct device_attribute *attr,
  647. char *buf)
  648. {
  649. unsigned long val;
  650. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  651. val = BMVAL(drvdata->eventctrl1, 0, 3);
  652. return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
  653. }
  654. static ssize_t event_instren_store(struct device *dev,
  655. struct device_attribute *attr,
  656. const char *buf, size_t size)
  657. {
  658. unsigned long val;
  659. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  660. if (kstrtoul(buf, 16, &val))
  661. return -EINVAL;
  662. spin_lock(&drvdata->spinlock);
  663. /* start by clearing all instruction event enable bits */
  664. drvdata->eventctrl1 &= ~(BIT(0) | BIT(1) | BIT(2) | BIT(3));
  665. switch (drvdata->nr_event) {
  666. case 0x0:
  667. /* generate Event element for event 1 */
  668. drvdata->eventctrl1 |= val & BIT(1);
  669. break;
  670. case 0x1:
  671. /* generate Event element for event 1 and 2 */
  672. drvdata->eventctrl1 |= val & (BIT(0) | BIT(1));
  673. break;
  674. case 0x2:
  675. /* generate Event element for event 1, 2 and 3 */
  676. drvdata->eventctrl1 |= val & (BIT(0) | BIT(1) | BIT(2));
  677. break;
  678. case 0x3:
  679. /* generate Event element for all 4 events */
  680. drvdata->eventctrl1 |= val & 0xF;
  681. break;
  682. default:
  683. break;
  684. }
  685. spin_unlock(&drvdata->spinlock);
  686. return size;
  687. }
  688. static DEVICE_ATTR_RW(event_instren);
  689. static ssize_t event_ts_show(struct device *dev,
  690. struct device_attribute *attr,
  691. char *buf)
  692. {
  693. unsigned long val;
  694. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  695. val = drvdata->ts_ctrl;
  696. return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
  697. }
  698. static ssize_t event_ts_store(struct device *dev,
  699. struct device_attribute *attr,
  700. const char *buf, size_t size)
  701. {
  702. unsigned long val;
  703. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  704. if (kstrtoul(buf, 16, &val))
  705. return -EINVAL;
  706. if (!drvdata->ts_size)
  707. return -EINVAL;
  708. drvdata->ts_ctrl = val & ETMv4_EVENT_MASK;
  709. return size;
  710. }
  711. static DEVICE_ATTR_RW(event_ts);
  712. static ssize_t syncfreq_show(struct device *dev,
  713. struct device_attribute *attr,
  714. char *buf)
  715. {
  716. unsigned long val;
  717. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  718. val = drvdata->syncfreq;
  719. return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
  720. }
  721. static ssize_t syncfreq_store(struct device *dev,
  722. struct device_attribute *attr,
  723. const char *buf, size_t size)
  724. {
  725. unsigned long val;
  726. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  727. if (kstrtoul(buf, 16, &val))
  728. return -EINVAL;
  729. if (drvdata->syncpr == true)
  730. return -EINVAL;
  731. drvdata->syncfreq = val & ETMv4_SYNC_MASK;
  732. return size;
  733. }
  734. static DEVICE_ATTR_RW(syncfreq);
  735. static ssize_t cyc_threshold_show(struct device *dev,
  736. struct device_attribute *attr,
  737. char *buf)
  738. {
  739. unsigned long val;
  740. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  741. val = drvdata->ccctlr;
  742. return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
  743. }
  744. static ssize_t cyc_threshold_store(struct device *dev,
  745. struct device_attribute *attr,
  746. const char *buf, size_t size)
  747. {
  748. unsigned long val;
  749. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  750. if (kstrtoul(buf, 16, &val))
  751. return -EINVAL;
  752. if (val < drvdata->ccitmin)
  753. return -EINVAL;
  754. drvdata->ccctlr = val & ETM_CYC_THRESHOLD_MASK;
  755. return size;
  756. }
  757. static DEVICE_ATTR_RW(cyc_threshold);
  758. static ssize_t bb_ctrl_show(struct device *dev,
  759. struct device_attribute *attr,
  760. char *buf)
  761. {
  762. unsigned long val;
  763. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  764. val = drvdata->bb_ctrl;
  765. return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
  766. }
  767. static ssize_t bb_ctrl_store(struct device *dev,
  768. struct device_attribute *attr,
  769. const char *buf, size_t size)
  770. {
  771. unsigned long val;
  772. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  773. if (kstrtoul(buf, 16, &val))
  774. return -EINVAL;
  775. if (drvdata->trcbb == false)
  776. return -EINVAL;
  777. if (!drvdata->nr_addr_cmp)
  778. return -EINVAL;
  779. /*
  780. * Bit[7:0] selects which address range comparator is used for
  781. * branch broadcast control.
  782. */
  783. if (BMVAL(val, 0, 7) > drvdata->nr_addr_cmp)
  784. return -EINVAL;
  785. drvdata->bb_ctrl = val;
  786. return size;
  787. }
  788. static DEVICE_ATTR_RW(bb_ctrl);
  789. static ssize_t event_vinst_show(struct device *dev,
  790. struct device_attribute *attr,
  791. char *buf)
  792. {
  793. unsigned long val;
  794. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  795. val = drvdata->vinst_ctrl & ETMv4_EVENT_MASK;
  796. return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
  797. }
  798. static ssize_t event_vinst_store(struct device *dev,
  799. struct device_attribute *attr,
  800. const char *buf, size_t size)
  801. {
  802. unsigned long val;
  803. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  804. if (kstrtoul(buf, 16, &val))
  805. return -EINVAL;
  806. spin_lock(&drvdata->spinlock);
  807. val &= ETMv4_EVENT_MASK;
  808. drvdata->vinst_ctrl &= ~ETMv4_EVENT_MASK;
  809. drvdata->vinst_ctrl |= val;
  810. spin_unlock(&drvdata->spinlock);
  811. return size;
  812. }
  813. static DEVICE_ATTR_RW(event_vinst);
  814. static ssize_t s_exlevel_vinst_show(struct device *dev,
  815. struct device_attribute *attr,
  816. char *buf)
  817. {
  818. unsigned long val;
  819. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  820. val = BMVAL(drvdata->vinst_ctrl, 16, 19);
  821. return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
  822. }
  823. static ssize_t s_exlevel_vinst_store(struct device *dev,
  824. struct device_attribute *attr,
  825. const char *buf, size_t size)
  826. {
  827. unsigned long val;
  828. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  829. if (kstrtoul(buf, 16, &val))
  830. return -EINVAL;
  831. spin_lock(&drvdata->spinlock);
  832. /* clear all EXLEVEL_S bits (bit[18] is never implemented) */
  833. drvdata->vinst_ctrl &= ~(BIT(16) | BIT(17) | BIT(19));
  834. /* enable instruction tracing for corresponding exception level */
  835. val &= drvdata->s_ex_level;
  836. drvdata->vinst_ctrl |= (val << 16);
  837. spin_unlock(&drvdata->spinlock);
  838. return size;
  839. }
  840. static DEVICE_ATTR_RW(s_exlevel_vinst);
  841. static ssize_t ns_exlevel_vinst_show(struct device *dev,
  842. struct device_attribute *attr,
  843. char *buf)
  844. {
  845. unsigned long val;
  846. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  847. /* EXLEVEL_NS, bits[23:20] */
  848. val = BMVAL(drvdata->vinst_ctrl, 20, 23);
  849. return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
  850. }
  851. static ssize_t ns_exlevel_vinst_store(struct device *dev,
  852. struct device_attribute *attr,
  853. const char *buf, size_t size)
  854. {
  855. unsigned long val;
  856. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  857. if (kstrtoul(buf, 16, &val))
  858. return -EINVAL;
  859. spin_lock(&drvdata->spinlock);
  860. /* clear EXLEVEL_NS bits (bit[23] is never implemented */
  861. drvdata->vinst_ctrl &= ~(BIT(20) | BIT(21) | BIT(22));
  862. /* enable instruction tracing for corresponding exception level */
  863. val &= drvdata->ns_ex_level;
  864. drvdata->vinst_ctrl |= (val << 20);
  865. spin_unlock(&drvdata->spinlock);
  866. return size;
  867. }
  868. static DEVICE_ATTR_RW(ns_exlevel_vinst);
  869. static ssize_t addr_idx_show(struct device *dev,
  870. struct device_attribute *attr,
  871. char *buf)
  872. {
  873. unsigned long val;
  874. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  875. val = drvdata->addr_idx;
  876. return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
  877. }
  878. static ssize_t addr_idx_store(struct device *dev,
  879. struct device_attribute *attr,
  880. const char *buf, size_t size)
  881. {
  882. unsigned long val;
  883. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  884. if (kstrtoul(buf, 16, &val))
  885. return -EINVAL;
  886. if (val >= drvdata->nr_addr_cmp * 2)
  887. return -EINVAL;
  888. /*
  889. * Use spinlock to ensure index doesn't change while it gets
  890. * dereferenced multiple times within a spinlock block elsewhere.
  891. */
  892. spin_lock(&drvdata->spinlock);
  893. drvdata->addr_idx = val;
  894. spin_unlock(&drvdata->spinlock);
  895. return size;
  896. }
  897. static DEVICE_ATTR_RW(addr_idx);
  898. static ssize_t addr_instdatatype_show(struct device *dev,
  899. struct device_attribute *attr,
  900. char *buf)
  901. {
  902. ssize_t len;
  903. u8 val, idx;
  904. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  905. spin_lock(&drvdata->spinlock);
  906. idx = drvdata->addr_idx;
  907. val = BMVAL(drvdata->addr_acc[idx], 0, 1);
  908. len = scnprintf(buf, PAGE_SIZE, "%s\n",
  909. val == ETM_INSTR_ADDR ? "instr" :
  910. (val == ETM_DATA_LOAD_ADDR ? "data_load" :
  911. (val == ETM_DATA_STORE_ADDR ? "data_store" :
  912. "data_load_store")));
  913. spin_unlock(&drvdata->spinlock);
  914. return len;
  915. }
  916. static ssize_t addr_instdatatype_store(struct device *dev,
  917. struct device_attribute *attr,
  918. const char *buf, size_t size)
  919. {
  920. u8 idx;
  921. char str[20] = "";
  922. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  923. if (strlen(buf) >= 20)
  924. return -EINVAL;
  925. if (sscanf(buf, "%s", str) != 1)
  926. return -EINVAL;
  927. spin_lock(&drvdata->spinlock);
  928. idx = drvdata->addr_idx;
  929. if (!strcmp(str, "instr"))
  930. /* TYPE, bits[1:0] */
  931. drvdata->addr_acc[idx] &= ~(BIT(0) | BIT(1));
  932. spin_unlock(&drvdata->spinlock);
  933. return size;
  934. }
  935. static DEVICE_ATTR_RW(addr_instdatatype);
  936. static ssize_t addr_single_show(struct device *dev,
  937. struct device_attribute *attr,
  938. char *buf)
  939. {
  940. u8 idx;
  941. unsigned long val;
  942. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  943. idx = drvdata->addr_idx;
  944. spin_lock(&drvdata->spinlock);
  945. if (!(drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
  946. drvdata->addr_type[idx] == ETM_ADDR_TYPE_SINGLE)) {
  947. spin_unlock(&drvdata->spinlock);
  948. return -EPERM;
  949. }
  950. val = (unsigned long)drvdata->addr_val[idx];
  951. spin_unlock(&drvdata->spinlock);
  952. return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
  953. }
  954. static ssize_t addr_single_store(struct device *dev,
  955. struct device_attribute *attr,
  956. const char *buf, size_t size)
  957. {
  958. u8 idx;
  959. unsigned long val;
  960. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  961. if (kstrtoul(buf, 16, &val))
  962. return -EINVAL;
  963. spin_lock(&drvdata->spinlock);
  964. idx = drvdata->addr_idx;
  965. if (!(drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
  966. drvdata->addr_type[idx] == ETM_ADDR_TYPE_SINGLE)) {
  967. spin_unlock(&drvdata->spinlock);
  968. return -EPERM;
  969. }
  970. drvdata->addr_val[idx] = (u64)val;
  971. drvdata->addr_type[idx] = ETM_ADDR_TYPE_SINGLE;
  972. spin_unlock(&drvdata->spinlock);
  973. return size;
  974. }
  975. static DEVICE_ATTR_RW(addr_single);
  976. static ssize_t addr_range_show(struct device *dev,
  977. struct device_attribute *attr,
  978. char *buf)
  979. {
  980. u8 idx;
  981. unsigned long val1, val2;
  982. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  983. spin_lock(&drvdata->spinlock);
  984. idx = drvdata->addr_idx;
  985. if (idx % 2 != 0) {
  986. spin_unlock(&drvdata->spinlock);
  987. return -EPERM;
  988. }
  989. if (!((drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE &&
  990. drvdata->addr_type[idx + 1] == ETM_ADDR_TYPE_NONE) ||
  991. (drvdata->addr_type[idx] == ETM_ADDR_TYPE_RANGE &&
  992. drvdata->addr_type[idx + 1] == ETM_ADDR_TYPE_RANGE))) {
  993. spin_unlock(&drvdata->spinlock);
  994. return -EPERM;
  995. }
  996. val1 = (unsigned long)drvdata->addr_val[idx];
  997. val2 = (unsigned long)drvdata->addr_val[idx + 1];
  998. spin_unlock(&drvdata->spinlock);
  999. return scnprintf(buf, PAGE_SIZE, "%#lx %#lx\n", val1, val2);
  1000. }
  1001. static ssize_t addr_range_store(struct device *dev,
  1002. struct device_attribute *attr,
  1003. const char *buf, size_t size)
  1004. {
  1005. u8 idx;
  1006. unsigned long val1, val2;
  1007. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  1008. if (sscanf(buf, "%lx %lx", &val1, &val2) != 2)
  1009. return -EINVAL;
  1010. /* lower address comparator cannot have a higher address value */
  1011. if (val1 > val2)
  1012. return -EINVAL;
  1013. spin_lock(&drvdata->spinlock);
  1014. idx = drvdata->addr_idx;
  1015. if (idx % 2 != 0) {
  1016. spin_unlock(&drvdata->spinlock);
  1017. return -EPERM;
  1018. }
  1019. if (!((drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE &&
  1020. drvdata->addr_type[idx + 1] == ETM_ADDR_TYPE_NONE) ||
  1021. (drvdata->addr_type[idx] == ETM_ADDR_TYPE_RANGE &&
  1022. drvdata->addr_type[idx + 1] == ETM_ADDR_TYPE_RANGE))) {
  1023. spin_unlock(&drvdata->spinlock);
  1024. return -EPERM;
  1025. }
  1026. drvdata->addr_val[idx] = (u64)val1;
  1027. drvdata->addr_type[idx] = ETM_ADDR_TYPE_RANGE;
  1028. drvdata->addr_val[idx + 1] = (u64)val2;
  1029. drvdata->addr_type[idx + 1] = ETM_ADDR_TYPE_RANGE;
  1030. /*
  1031. * Program include or exclude control bits for vinst or vdata
  1032. * whenever we change addr comparators to ETM_ADDR_TYPE_RANGE
  1033. */
  1034. if (drvdata->mode & ETM_MODE_EXCLUDE)
  1035. etm4_set_mode_exclude(drvdata, true);
  1036. else
  1037. etm4_set_mode_exclude(drvdata, false);
  1038. spin_unlock(&drvdata->spinlock);
  1039. return size;
  1040. }
  1041. static DEVICE_ATTR_RW(addr_range);
  1042. static ssize_t addr_start_show(struct device *dev,
  1043. struct device_attribute *attr,
  1044. char *buf)
  1045. {
  1046. u8 idx;
  1047. unsigned long val;
  1048. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  1049. spin_lock(&drvdata->spinlock);
  1050. idx = drvdata->addr_idx;
  1051. if (!(drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
  1052. drvdata->addr_type[idx] == ETM_ADDR_TYPE_START)) {
  1053. spin_unlock(&drvdata->spinlock);
  1054. return -EPERM;
  1055. }
  1056. val = (unsigned long)drvdata->addr_val[idx];
  1057. spin_unlock(&drvdata->spinlock);
  1058. return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
  1059. }
  1060. static ssize_t addr_start_store(struct device *dev,
  1061. struct device_attribute *attr,
  1062. const char *buf, size_t size)
  1063. {
  1064. u8 idx;
  1065. unsigned long val;
  1066. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  1067. if (kstrtoul(buf, 16, &val))
  1068. return -EINVAL;
  1069. spin_lock(&drvdata->spinlock);
  1070. idx = drvdata->addr_idx;
  1071. if (!drvdata->nr_addr_cmp) {
  1072. spin_unlock(&drvdata->spinlock);
  1073. return -EINVAL;
  1074. }
  1075. if (!(drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
  1076. drvdata->addr_type[idx] == ETM_ADDR_TYPE_START)) {
  1077. spin_unlock(&drvdata->spinlock);
  1078. return -EPERM;
  1079. }
  1080. drvdata->addr_val[idx] = (u64)val;
  1081. drvdata->addr_type[idx] = ETM_ADDR_TYPE_START;
  1082. drvdata->vissctlr |= BIT(idx);
  1083. /* SSSTATUS, bit[9] - turn on start/stop logic */
  1084. drvdata->vinst_ctrl |= BIT(9);
  1085. spin_unlock(&drvdata->spinlock);
  1086. return size;
  1087. }
  1088. static DEVICE_ATTR_RW(addr_start);
  1089. static ssize_t addr_stop_show(struct device *dev,
  1090. struct device_attribute *attr,
  1091. char *buf)
  1092. {
  1093. u8 idx;
  1094. unsigned long val;
  1095. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  1096. spin_lock(&drvdata->spinlock);
  1097. idx = drvdata->addr_idx;
  1098. if (!(drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
  1099. drvdata->addr_type[idx] == ETM_ADDR_TYPE_STOP)) {
  1100. spin_unlock(&drvdata->spinlock);
  1101. return -EPERM;
  1102. }
  1103. val = (unsigned long)drvdata->addr_val[idx];
  1104. spin_unlock(&drvdata->spinlock);
  1105. return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
  1106. }
  1107. static ssize_t addr_stop_store(struct device *dev,
  1108. struct device_attribute *attr,
  1109. const char *buf, size_t size)
  1110. {
  1111. u8 idx;
  1112. unsigned long val;
  1113. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  1114. if (kstrtoul(buf, 16, &val))
  1115. return -EINVAL;
  1116. spin_lock(&drvdata->spinlock);
  1117. idx = drvdata->addr_idx;
  1118. if (!drvdata->nr_addr_cmp) {
  1119. spin_unlock(&drvdata->spinlock);
  1120. return -EINVAL;
  1121. }
  1122. if (!(drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
  1123. drvdata->addr_type[idx] == ETM_ADDR_TYPE_STOP)) {
  1124. spin_unlock(&drvdata->spinlock);
  1125. return -EPERM;
  1126. }
  1127. drvdata->addr_val[idx] = (u64)val;
  1128. drvdata->addr_type[idx] = ETM_ADDR_TYPE_STOP;
  1129. drvdata->vissctlr |= BIT(idx + 16);
  1130. /* SSSTATUS, bit[9] - turn on start/stop logic */
  1131. drvdata->vinst_ctrl |= BIT(9);
  1132. spin_unlock(&drvdata->spinlock);
  1133. return size;
  1134. }
  1135. static DEVICE_ATTR_RW(addr_stop);
  1136. static ssize_t addr_ctxtype_show(struct device *dev,
  1137. struct device_attribute *attr,
  1138. char *buf)
  1139. {
  1140. ssize_t len;
  1141. u8 idx, val;
  1142. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  1143. spin_lock(&drvdata->spinlock);
  1144. idx = drvdata->addr_idx;
  1145. /* CONTEXTTYPE, bits[3:2] */
  1146. val = BMVAL(drvdata->addr_acc[idx], 2, 3);
  1147. len = scnprintf(buf, PAGE_SIZE, "%s\n", val == ETM_CTX_NONE ? "none" :
  1148. (val == ETM_CTX_CTXID ? "ctxid" :
  1149. (val == ETM_CTX_VMID ? "vmid" : "all")));
  1150. spin_unlock(&drvdata->spinlock);
  1151. return len;
  1152. }
  1153. static ssize_t addr_ctxtype_store(struct device *dev,
  1154. struct device_attribute *attr,
  1155. const char *buf, size_t size)
  1156. {
  1157. u8 idx;
  1158. char str[10] = "";
  1159. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  1160. if (strlen(buf) >= 10)
  1161. return -EINVAL;
  1162. if (sscanf(buf, "%s", str) != 1)
  1163. return -EINVAL;
  1164. spin_lock(&drvdata->spinlock);
  1165. idx = drvdata->addr_idx;
  1166. if (!strcmp(str, "none"))
  1167. /* start by clearing context type bits */
  1168. drvdata->addr_acc[idx] &= ~(BIT(2) | BIT(3));
  1169. else if (!strcmp(str, "ctxid")) {
  1170. /* 0b01 The trace unit performs a Context ID */
  1171. if (drvdata->numcidc) {
  1172. drvdata->addr_acc[idx] |= BIT(2);
  1173. drvdata->addr_acc[idx] &= ~BIT(3);
  1174. }
  1175. } else if (!strcmp(str, "vmid")) {
  1176. /* 0b10 The trace unit performs a VMID */
  1177. if (drvdata->numvmidc) {
  1178. drvdata->addr_acc[idx] &= ~BIT(2);
  1179. drvdata->addr_acc[idx] |= BIT(3);
  1180. }
  1181. } else if (!strcmp(str, "all")) {
  1182. /*
  1183. * 0b11 The trace unit performs a Context ID
  1184. * comparison and a VMID
  1185. */
  1186. if (drvdata->numcidc)
  1187. drvdata->addr_acc[idx] |= BIT(2);
  1188. if (drvdata->numvmidc)
  1189. drvdata->addr_acc[idx] |= BIT(3);
  1190. }
  1191. spin_unlock(&drvdata->spinlock);
  1192. return size;
  1193. }
  1194. static DEVICE_ATTR_RW(addr_ctxtype);
  1195. static ssize_t addr_context_show(struct device *dev,
  1196. struct device_attribute *attr,
  1197. char *buf)
  1198. {
  1199. u8 idx;
  1200. unsigned long val;
  1201. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  1202. spin_lock(&drvdata->spinlock);
  1203. idx = drvdata->addr_idx;
  1204. /* context ID comparator bits[6:4] */
  1205. val = BMVAL(drvdata->addr_acc[idx], 4, 6);
  1206. spin_unlock(&drvdata->spinlock);
  1207. return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
  1208. }
  1209. static ssize_t addr_context_store(struct device *dev,
  1210. struct device_attribute *attr,
  1211. const char *buf, size_t size)
  1212. {
  1213. u8 idx;
  1214. unsigned long val;
  1215. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  1216. if (kstrtoul(buf, 16, &val))
  1217. return -EINVAL;
  1218. if ((drvdata->numcidc <= 1) && (drvdata->numvmidc <= 1))
  1219. return -EINVAL;
  1220. if (val >= (drvdata->numcidc >= drvdata->numvmidc ?
  1221. drvdata->numcidc : drvdata->numvmidc))
  1222. return -EINVAL;
  1223. spin_lock(&drvdata->spinlock);
  1224. idx = drvdata->addr_idx;
  1225. /* clear context ID comparator bits[6:4] */
  1226. drvdata->addr_acc[idx] &= ~(BIT(4) | BIT(5) | BIT(6));
  1227. drvdata->addr_acc[idx] |= (val << 4);
  1228. spin_unlock(&drvdata->spinlock);
  1229. return size;
  1230. }
  1231. static DEVICE_ATTR_RW(addr_context);
  1232. static ssize_t seq_idx_show(struct device *dev,
  1233. struct device_attribute *attr,
  1234. char *buf)
  1235. {
  1236. unsigned long val;
  1237. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  1238. val = drvdata->seq_idx;
  1239. return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
  1240. }
  1241. static ssize_t seq_idx_store(struct device *dev,
  1242. struct device_attribute *attr,
  1243. const char *buf, size_t size)
  1244. {
  1245. unsigned long val;
  1246. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  1247. if (kstrtoul(buf, 16, &val))
  1248. return -EINVAL;
  1249. if (val >= drvdata->nrseqstate - 1)
  1250. return -EINVAL;
  1251. /*
  1252. * Use spinlock to ensure index doesn't change while it gets
  1253. * dereferenced multiple times within a spinlock block elsewhere.
  1254. */
  1255. spin_lock(&drvdata->spinlock);
  1256. drvdata->seq_idx = val;
  1257. spin_unlock(&drvdata->spinlock);
  1258. return size;
  1259. }
  1260. static DEVICE_ATTR_RW(seq_idx);
  1261. static ssize_t seq_state_show(struct device *dev,
  1262. struct device_attribute *attr,
  1263. char *buf)
  1264. {
  1265. unsigned long val;
  1266. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  1267. val = drvdata->seq_state;
  1268. return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
  1269. }
  1270. static ssize_t seq_state_store(struct device *dev,
  1271. struct device_attribute *attr,
  1272. const char *buf, size_t size)
  1273. {
  1274. unsigned long val;
  1275. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  1276. if (kstrtoul(buf, 16, &val))
  1277. return -EINVAL;
  1278. if (val >= drvdata->nrseqstate)
  1279. return -EINVAL;
  1280. drvdata->seq_state = val;
  1281. return size;
  1282. }
  1283. static DEVICE_ATTR_RW(seq_state);
  1284. static ssize_t seq_event_show(struct device *dev,
  1285. struct device_attribute *attr,
  1286. char *buf)
  1287. {
  1288. u8 idx;
  1289. unsigned long val;
  1290. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  1291. spin_lock(&drvdata->spinlock);
  1292. idx = drvdata->seq_idx;
  1293. val = drvdata->seq_ctrl[idx];
  1294. spin_unlock(&drvdata->spinlock);
  1295. return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
  1296. }
  1297. static ssize_t seq_event_store(struct device *dev,
  1298. struct device_attribute *attr,
  1299. const char *buf, size_t size)
  1300. {
  1301. u8 idx;
  1302. unsigned long val;
  1303. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  1304. if (kstrtoul(buf, 16, &val))
  1305. return -EINVAL;
  1306. spin_lock(&drvdata->spinlock);
  1307. idx = drvdata->seq_idx;
  1308. /* RST, bits[7:0] */
  1309. drvdata->seq_ctrl[idx] = val & 0xFF;
  1310. spin_unlock(&drvdata->spinlock);
  1311. return size;
  1312. }
  1313. static DEVICE_ATTR_RW(seq_event);
  1314. static ssize_t seq_reset_event_show(struct device *dev,
  1315. struct device_attribute *attr,
  1316. char *buf)
  1317. {
  1318. unsigned long val;
  1319. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  1320. val = drvdata->seq_rst;
  1321. return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
  1322. }
  1323. static ssize_t seq_reset_event_store(struct device *dev,
  1324. struct device_attribute *attr,
  1325. const char *buf, size_t size)
  1326. {
  1327. unsigned long val;
  1328. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  1329. if (kstrtoul(buf, 16, &val))
  1330. return -EINVAL;
  1331. if (!(drvdata->nrseqstate))
  1332. return -EINVAL;
  1333. drvdata->seq_rst = val & ETMv4_EVENT_MASK;
  1334. return size;
  1335. }
  1336. static DEVICE_ATTR_RW(seq_reset_event);
  1337. static ssize_t cntr_idx_show(struct device *dev,
  1338. struct device_attribute *attr,
  1339. char *buf)
  1340. {
  1341. unsigned long val;
  1342. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  1343. val = drvdata->cntr_idx;
  1344. return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
  1345. }
  1346. static ssize_t cntr_idx_store(struct device *dev,
  1347. struct device_attribute *attr,
  1348. const char *buf, size_t size)
  1349. {
  1350. unsigned long val;
  1351. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  1352. if (kstrtoul(buf, 16, &val))
  1353. return -EINVAL;
  1354. if (val >= drvdata->nr_cntr)
  1355. return -EINVAL;
  1356. /*
  1357. * Use spinlock to ensure index doesn't change while it gets
  1358. * dereferenced multiple times within a spinlock block elsewhere.
  1359. */
  1360. spin_lock(&drvdata->spinlock);
  1361. drvdata->cntr_idx = val;
  1362. spin_unlock(&drvdata->spinlock);
  1363. return size;
  1364. }
  1365. static DEVICE_ATTR_RW(cntr_idx);
  1366. static ssize_t cntrldvr_show(struct device *dev,
  1367. struct device_attribute *attr,
  1368. char *buf)
  1369. {
  1370. u8 idx;
  1371. unsigned long val;
  1372. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  1373. spin_lock(&drvdata->spinlock);
  1374. idx = drvdata->cntr_idx;
  1375. val = drvdata->cntrldvr[idx];
  1376. spin_unlock(&drvdata->spinlock);
  1377. return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
  1378. }
  1379. static ssize_t cntrldvr_store(struct device *dev,
  1380. struct device_attribute *attr,
  1381. const char *buf, size_t size)
  1382. {
  1383. u8 idx;
  1384. unsigned long val;
  1385. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  1386. if (kstrtoul(buf, 16, &val))
  1387. return -EINVAL;
  1388. if (val > ETM_CNTR_MAX_VAL)
  1389. return -EINVAL;
  1390. spin_lock(&drvdata->spinlock);
  1391. idx = drvdata->cntr_idx;
  1392. drvdata->cntrldvr[idx] = val;
  1393. spin_unlock(&drvdata->spinlock);
  1394. return size;
  1395. }
  1396. static DEVICE_ATTR_RW(cntrldvr);
  1397. static ssize_t cntr_val_show(struct device *dev,
  1398. struct device_attribute *attr,
  1399. char *buf)
  1400. {
  1401. u8 idx;
  1402. unsigned long val;
  1403. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  1404. spin_lock(&drvdata->spinlock);
  1405. idx = drvdata->cntr_idx;
  1406. val = drvdata->cntr_val[idx];
  1407. spin_unlock(&drvdata->spinlock);
  1408. return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
  1409. }
  1410. static ssize_t cntr_val_store(struct device *dev,
  1411. struct device_attribute *attr,
  1412. const char *buf, size_t size)
  1413. {
  1414. u8 idx;
  1415. unsigned long val;
  1416. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  1417. if (kstrtoul(buf, 16, &val))
  1418. return -EINVAL;
  1419. if (val > ETM_CNTR_MAX_VAL)
  1420. return -EINVAL;
  1421. spin_lock(&drvdata->spinlock);
  1422. idx = drvdata->cntr_idx;
  1423. drvdata->cntr_val[idx] = val;
  1424. spin_unlock(&drvdata->spinlock);
  1425. return size;
  1426. }
  1427. static DEVICE_ATTR_RW(cntr_val);
  1428. static ssize_t cntr_ctrl_show(struct device *dev,
  1429. struct device_attribute *attr,
  1430. char *buf)
  1431. {
  1432. u8 idx;
  1433. unsigned long val;
  1434. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  1435. spin_lock(&drvdata->spinlock);
  1436. idx = drvdata->cntr_idx;
  1437. val = drvdata->cntr_ctrl[idx];
  1438. spin_unlock(&drvdata->spinlock);
  1439. return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
  1440. }
  1441. static ssize_t cntr_ctrl_store(struct device *dev,
  1442. struct device_attribute *attr,
  1443. const char *buf, size_t size)
  1444. {
  1445. u8 idx;
  1446. unsigned long val;
  1447. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  1448. if (kstrtoul(buf, 16, &val))
  1449. return -EINVAL;
  1450. spin_lock(&drvdata->spinlock);
  1451. idx = drvdata->cntr_idx;
  1452. drvdata->cntr_ctrl[idx] = val;
  1453. spin_unlock(&drvdata->spinlock);
  1454. return size;
  1455. }
  1456. static DEVICE_ATTR_RW(cntr_ctrl);
  1457. static ssize_t res_idx_show(struct device *dev,
  1458. struct device_attribute *attr,
  1459. char *buf)
  1460. {
  1461. unsigned long val;
  1462. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  1463. val = drvdata->res_idx;
  1464. return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
  1465. }
  1466. static ssize_t res_idx_store(struct device *dev,
  1467. struct device_attribute *attr,
  1468. const char *buf, size_t size)
  1469. {
  1470. unsigned long val;
  1471. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  1472. if (kstrtoul(buf, 16, &val))
  1473. return -EINVAL;
  1474. /* Resource selector pair 0 is always implemented and reserved */
  1475. if ((val == 0) || (val >= drvdata->nr_resource))
  1476. return -EINVAL;
  1477. /*
  1478. * Use spinlock to ensure index doesn't change while it gets
  1479. * dereferenced multiple times within a spinlock block elsewhere.
  1480. */
  1481. spin_lock(&drvdata->spinlock);
  1482. drvdata->res_idx = val;
  1483. spin_unlock(&drvdata->spinlock);
  1484. return size;
  1485. }
  1486. static DEVICE_ATTR_RW(res_idx);
  1487. static ssize_t res_ctrl_show(struct device *dev,
  1488. struct device_attribute *attr,
  1489. char *buf)
  1490. {
  1491. u8 idx;
  1492. unsigned long val;
  1493. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  1494. spin_lock(&drvdata->spinlock);
  1495. idx = drvdata->res_idx;
  1496. val = drvdata->res_ctrl[idx];
  1497. spin_unlock(&drvdata->spinlock);
  1498. return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
  1499. }
  1500. static ssize_t res_ctrl_store(struct device *dev,
  1501. struct device_attribute *attr,
  1502. const char *buf, size_t size)
  1503. {
  1504. u8 idx;
  1505. unsigned long val;
  1506. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  1507. if (kstrtoul(buf, 16, &val))
  1508. return -EINVAL;
  1509. spin_lock(&drvdata->spinlock);
  1510. idx = drvdata->res_idx;
  1511. /* For odd idx pair inversal bit is RES0 */
  1512. if (idx % 2 != 0)
  1513. /* PAIRINV, bit[21] */
  1514. val &= ~BIT(21);
  1515. drvdata->res_ctrl[idx] = val;
  1516. spin_unlock(&drvdata->spinlock);
  1517. return size;
  1518. }
  1519. static DEVICE_ATTR_RW(res_ctrl);
  1520. static ssize_t ctxid_idx_show(struct device *dev,
  1521. struct device_attribute *attr,
  1522. char *buf)
  1523. {
  1524. unsigned long val;
  1525. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  1526. val = drvdata->ctxid_idx;
  1527. return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
  1528. }
  1529. static ssize_t ctxid_idx_store(struct device *dev,
  1530. struct device_attribute *attr,
  1531. const char *buf, size_t size)
  1532. {
  1533. unsigned long val;
  1534. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  1535. if (kstrtoul(buf, 16, &val))
  1536. return -EINVAL;
  1537. if (val >= drvdata->numcidc)
  1538. return -EINVAL;
  1539. /*
  1540. * Use spinlock to ensure index doesn't change while it gets
  1541. * dereferenced multiple times within a spinlock block elsewhere.
  1542. */
  1543. spin_lock(&drvdata->spinlock);
  1544. drvdata->ctxid_idx = val;
  1545. spin_unlock(&drvdata->spinlock);
  1546. return size;
  1547. }
  1548. static DEVICE_ATTR_RW(ctxid_idx);
  1549. static ssize_t ctxid_pid_show(struct device *dev,
  1550. struct device_attribute *attr,
  1551. char *buf)
  1552. {
  1553. u8 idx;
  1554. unsigned long val;
  1555. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  1556. spin_lock(&drvdata->spinlock);
  1557. idx = drvdata->ctxid_idx;
  1558. val = (unsigned long)drvdata->ctxid_vpid[idx];
  1559. spin_unlock(&drvdata->spinlock);
  1560. return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
  1561. }
  1562. static ssize_t ctxid_pid_store(struct device *dev,
  1563. struct device_attribute *attr,
  1564. const char *buf, size_t size)
  1565. {
  1566. u8 idx;
  1567. unsigned long vpid, pid;
  1568. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  1569. /*
  1570. * only implemented when ctxid tracing is enabled, i.e. at least one
  1571. * ctxid comparator is implemented and ctxid is greater than 0 bits
  1572. * in length
  1573. */
  1574. if (!drvdata->ctxid_size || !drvdata->numcidc)
  1575. return -EINVAL;
  1576. if (kstrtoul(buf, 16, &vpid))
  1577. return -EINVAL;
  1578. pid = coresight_vpid_to_pid(vpid);
  1579. spin_lock(&drvdata->spinlock);
  1580. idx = drvdata->ctxid_idx;
  1581. drvdata->ctxid_pid[idx] = (u64)pid;
  1582. drvdata->ctxid_vpid[idx] = (u64)vpid;
  1583. spin_unlock(&drvdata->spinlock);
  1584. return size;
  1585. }
  1586. static DEVICE_ATTR_RW(ctxid_pid);
  1587. static ssize_t ctxid_masks_show(struct device *dev,
  1588. struct device_attribute *attr,
  1589. char *buf)
  1590. {
  1591. unsigned long val1, val2;
  1592. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  1593. spin_lock(&drvdata->spinlock);
  1594. val1 = drvdata->ctxid_mask0;
  1595. val2 = drvdata->ctxid_mask1;
  1596. spin_unlock(&drvdata->spinlock);
  1597. return scnprintf(buf, PAGE_SIZE, "%#lx %#lx\n", val1, val2);
  1598. }
  1599. static ssize_t ctxid_masks_store(struct device *dev,
  1600. struct device_attribute *attr,
  1601. const char *buf, size_t size)
  1602. {
  1603. u8 i, j, maskbyte;
  1604. unsigned long val1, val2, mask;
  1605. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  1606. /*
  1607. * only implemented when ctxid tracing is enabled, i.e. at least one
  1608. * ctxid comparator is implemented and ctxid is greater than 0 bits
  1609. * in length
  1610. */
  1611. if (!drvdata->ctxid_size || !drvdata->numcidc)
  1612. return -EINVAL;
  1613. if (sscanf(buf, "%lx %lx", &val1, &val2) != 2)
  1614. return -EINVAL;
  1615. spin_lock(&drvdata->spinlock);
  1616. /*
  1617. * each byte[0..3] controls mask value applied to ctxid
  1618. * comparator[0..3]
  1619. */
  1620. switch (drvdata->numcidc) {
  1621. case 0x1:
  1622. /* COMP0, bits[7:0] */
  1623. drvdata->ctxid_mask0 = val1 & 0xFF;
  1624. break;
  1625. case 0x2:
  1626. /* COMP1, bits[15:8] */
  1627. drvdata->ctxid_mask0 = val1 & 0xFFFF;
  1628. break;
  1629. case 0x3:
  1630. /* COMP2, bits[23:16] */
  1631. drvdata->ctxid_mask0 = val1 & 0xFFFFFF;
  1632. break;
  1633. case 0x4:
  1634. /* COMP3, bits[31:24] */
  1635. drvdata->ctxid_mask0 = val1;
  1636. break;
  1637. case 0x5:
  1638. /* COMP4, bits[7:0] */
  1639. drvdata->ctxid_mask0 = val1;
  1640. drvdata->ctxid_mask1 = val2 & 0xFF;
  1641. break;
  1642. case 0x6:
  1643. /* COMP5, bits[15:8] */
  1644. drvdata->ctxid_mask0 = val1;
  1645. drvdata->ctxid_mask1 = val2 & 0xFFFF;
  1646. break;
  1647. case 0x7:
  1648. /* COMP6, bits[23:16] */
  1649. drvdata->ctxid_mask0 = val1;
  1650. drvdata->ctxid_mask1 = val2 & 0xFFFFFF;
  1651. break;
  1652. case 0x8:
  1653. /* COMP7, bits[31:24] */
  1654. drvdata->ctxid_mask0 = val1;
  1655. drvdata->ctxid_mask1 = val2;
  1656. break;
  1657. default:
  1658. break;
  1659. }
  1660. /*
  1661. * If software sets a mask bit to 1, it must program relevant byte
  1662. * of ctxid comparator value 0x0, otherwise behavior is unpredictable.
  1663. * For example, if bit[3] of ctxid_mask0 is 1, we must clear bits[31:24]
  1664. * of ctxid comparator0 value (corresponding to byte 0) register.
  1665. */
  1666. mask = drvdata->ctxid_mask0;
  1667. for (i = 0; i < drvdata->numcidc; i++) {
  1668. /* mask value of corresponding ctxid comparator */
  1669. maskbyte = mask & ETMv4_EVENT_MASK;
  1670. /*
  1671. * each bit corresponds to a byte of respective ctxid comparator
  1672. * value register
  1673. */
  1674. for (j = 0; j < 8; j++) {
  1675. if (maskbyte & 1)
  1676. drvdata->ctxid_pid[i] &= ~(0xFF << (j * 8));
  1677. maskbyte >>= 1;
  1678. }
  1679. /* Select the next ctxid comparator mask value */
  1680. if (i == 3)
  1681. /* ctxid comparators[4-7] */
  1682. mask = drvdata->ctxid_mask1;
  1683. else
  1684. mask >>= 0x8;
  1685. }
  1686. spin_unlock(&drvdata->spinlock);
  1687. return size;
  1688. }
  1689. static DEVICE_ATTR_RW(ctxid_masks);
  1690. static ssize_t vmid_idx_show(struct device *dev,
  1691. struct device_attribute *attr,
  1692. char *buf)
  1693. {
  1694. unsigned long val;
  1695. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  1696. val = drvdata->vmid_idx;
  1697. return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
  1698. }
  1699. static ssize_t vmid_idx_store(struct device *dev,
  1700. struct device_attribute *attr,
  1701. const char *buf, size_t size)
  1702. {
  1703. unsigned long val;
  1704. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  1705. if (kstrtoul(buf, 16, &val))
  1706. return -EINVAL;
  1707. if (val >= drvdata->numvmidc)
  1708. return -EINVAL;
  1709. /*
  1710. * Use spinlock to ensure index doesn't change while it gets
  1711. * dereferenced multiple times within a spinlock block elsewhere.
  1712. */
  1713. spin_lock(&drvdata->spinlock);
  1714. drvdata->vmid_idx = val;
  1715. spin_unlock(&drvdata->spinlock);
  1716. return size;
  1717. }
  1718. static DEVICE_ATTR_RW(vmid_idx);
  1719. static ssize_t vmid_val_show(struct device *dev,
  1720. struct device_attribute *attr,
  1721. char *buf)
  1722. {
  1723. unsigned long val;
  1724. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  1725. val = (unsigned long)drvdata->vmid_val[drvdata->vmid_idx];
  1726. return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
  1727. }
  1728. static ssize_t vmid_val_store(struct device *dev,
  1729. struct device_attribute *attr,
  1730. const char *buf, size_t size)
  1731. {
  1732. unsigned long val;
  1733. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  1734. /*
  1735. * only implemented when vmid tracing is enabled, i.e. at least one
  1736. * vmid comparator is implemented and at least 8 bit vmid size
  1737. */
  1738. if (!drvdata->vmid_size || !drvdata->numvmidc)
  1739. return -EINVAL;
  1740. if (kstrtoul(buf, 16, &val))
  1741. return -EINVAL;
  1742. spin_lock(&drvdata->spinlock);
  1743. drvdata->vmid_val[drvdata->vmid_idx] = (u64)val;
  1744. spin_unlock(&drvdata->spinlock);
  1745. return size;
  1746. }
  1747. static DEVICE_ATTR_RW(vmid_val);
  1748. static ssize_t vmid_masks_show(struct device *dev,
  1749. struct device_attribute *attr, char *buf)
  1750. {
  1751. unsigned long val1, val2;
  1752. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  1753. spin_lock(&drvdata->spinlock);
  1754. val1 = drvdata->vmid_mask0;
  1755. val2 = drvdata->vmid_mask1;
  1756. spin_unlock(&drvdata->spinlock);
  1757. return scnprintf(buf, PAGE_SIZE, "%#lx %#lx\n", val1, val2);
  1758. }
  1759. static ssize_t vmid_masks_store(struct device *dev,
  1760. struct device_attribute *attr,
  1761. const char *buf, size_t size)
  1762. {
  1763. u8 i, j, maskbyte;
  1764. unsigned long val1, val2, mask;
  1765. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  1766. /*
  1767. * only implemented when vmid tracing is enabled, i.e. at least one
  1768. * vmid comparator is implemented and at least 8 bit vmid size
  1769. */
  1770. if (!drvdata->vmid_size || !drvdata->numvmidc)
  1771. return -EINVAL;
  1772. if (sscanf(buf, "%lx %lx", &val1, &val2) != 2)
  1773. return -EINVAL;
  1774. spin_lock(&drvdata->spinlock);
  1775. /*
  1776. * each byte[0..3] controls mask value applied to vmid
  1777. * comparator[0..3]
  1778. */
  1779. switch (drvdata->numvmidc) {
  1780. case 0x1:
  1781. /* COMP0, bits[7:0] */
  1782. drvdata->vmid_mask0 = val1 & 0xFF;
  1783. break;
  1784. case 0x2:
  1785. /* COMP1, bits[15:8] */
  1786. drvdata->vmid_mask0 = val1 & 0xFFFF;
  1787. break;
  1788. case 0x3:
  1789. /* COMP2, bits[23:16] */
  1790. drvdata->vmid_mask0 = val1 & 0xFFFFFF;
  1791. break;
  1792. case 0x4:
  1793. /* COMP3, bits[31:24] */
  1794. drvdata->vmid_mask0 = val1;
  1795. break;
  1796. case 0x5:
  1797. /* COMP4, bits[7:0] */
  1798. drvdata->vmid_mask0 = val1;
  1799. drvdata->vmid_mask1 = val2 & 0xFF;
  1800. break;
  1801. case 0x6:
  1802. /* COMP5, bits[15:8] */
  1803. drvdata->vmid_mask0 = val1;
  1804. drvdata->vmid_mask1 = val2 & 0xFFFF;
  1805. break;
  1806. case 0x7:
  1807. /* COMP6, bits[23:16] */
  1808. drvdata->vmid_mask0 = val1;
  1809. drvdata->vmid_mask1 = val2 & 0xFFFFFF;
  1810. break;
  1811. case 0x8:
  1812. /* COMP7, bits[31:24] */
  1813. drvdata->vmid_mask0 = val1;
  1814. drvdata->vmid_mask1 = val2;
  1815. break;
  1816. default:
  1817. break;
  1818. }
  1819. /*
  1820. * If software sets a mask bit to 1, it must program relevant byte
  1821. * of vmid comparator value 0x0, otherwise behavior is unpredictable.
  1822. * For example, if bit[3] of vmid_mask0 is 1, we must clear bits[31:24]
  1823. * of vmid comparator0 value (corresponding to byte 0) register.
  1824. */
  1825. mask = drvdata->vmid_mask0;
  1826. for (i = 0; i < drvdata->numvmidc; i++) {
  1827. /* mask value of corresponding vmid comparator */
  1828. maskbyte = mask & ETMv4_EVENT_MASK;
  1829. /*
  1830. * each bit corresponds to a byte of respective vmid comparator
  1831. * value register
  1832. */
  1833. for (j = 0; j < 8; j++) {
  1834. if (maskbyte & 1)
  1835. drvdata->vmid_val[i] &= ~(0xFF << (j * 8));
  1836. maskbyte >>= 1;
  1837. }
  1838. /* Select the next vmid comparator mask value */
  1839. if (i == 3)
  1840. /* vmid comparators[4-7] */
  1841. mask = drvdata->vmid_mask1;
  1842. else
  1843. mask >>= 0x8;
  1844. }
  1845. spin_unlock(&drvdata->spinlock);
  1846. return size;
  1847. }
  1848. static DEVICE_ATTR_RW(vmid_masks);
  1849. static ssize_t cpu_show(struct device *dev,
  1850. struct device_attribute *attr, char *buf)
  1851. {
  1852. int val;
  1853. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  1854. val = drvdata->cpu;
  1855. return scnprintf(buf, PAGE_SIZE, "%d\n", val);
  1856. }
  1857. static DEVICE_ATTR_RO(cpu);
  1858. static struct attribute *coresight_etmv4_attrs[] = {
  1859. &dev_attr_nr_pe_cmp.attr,
  1860. &dev_attr_nr_addr_cmp.attr,
  1861. &dev_attr_nr_cntr.attr,
  1862. &dev_attr_nr_ext_inp.attr,
  1863. &dev_attr_numcidc.attr,
  1864. &dev_attr_numvmidc.attr,
  1865. &dev_attr_nrseqstate.attr,
  1866. &dev_attr_nr_resource.attr,
  1867. &dev_attr_nr_ss_cmp.attr,
  1868. &dev_attr_reset.attr,
  1869. &dev_attr_mode.attr,
  1870. &dev_attr_pe.attr,
  1871. &dev_attr_event.attr,
  1872. &dev_attr_event_instren.attr,
  1873. &dev_attr_event_ts.attr,
  1874. &dev_attr_syncfreq.attr,
  1875. &dev_attr_cyc_threshold.attr,
  1876. &dev_attr_bb_ctrl.attr,
  1877. &dev_attr_event_vinst.attr,
  1878. &dev_attr_s_exlevel_vinst.attr,
  1879. &dev_attr_ns_exlevel_vinst.attr,
  1880. &dev_attr_addr_idx.attr,
  1881. &dev_attr_addr_instdatatype.attr,
  1882. &dev_attr_addr_single.attr,
  1883. &dev_attr_addr_range.attr,
  1884. &dev_attr_addr_start.attr,
  1885. &dev_attr_addr_stop.attr,
  1886. &dev_attr_addr_ctxtype.attr,
  1887. &dev_attr_addr_context.attr,
  1888. &dev_attr_seq_idx.attr,
  1889. &dev_attr_seq_state.attr,
  1890. &dev_attr_seq_event.attr,
  1891. &dev_attr_seq_reset_event.attr,
  1892. &dev_attr_cntr_idx.attr,
  1893. &dev_attr_cntrldvr.attr,
  1894. &dev_attr_cntr_val.attr,
  1895. &dev_attr_cntr_ctrl.attr,
  1896. &dev_attr_res_idx.attr,
  1897. &dev_attr_res_ctrl.attr,
  1898. &dev_attr_ctxid_idx.attr,
  1899. &dev_attr_ctxid_pid.attr,
  1900. &dev_attr_ctxid_masks.attr,
  1901. &dev_attr_vmid_idx.attr,
  1902. &dev_attr_vmid_val.attr,
  1903. &dev_attr_vmid_masks.attr,
  1904. &dev_attr_cpu.attr,
  1905. NULL,
  1906. };
  1907. #define coresight_simple_func(name, offset) \
  1908. static ssize_t name##_show(struct device *_dev, \
  1909. struct device_attribute *attr, char *buf) \
  1910. { \
  1911. struct etmv4_drvdata *drvdata = dev_get_drvdata(_dev->parent); \
  1912. return scnprintf(buf, PAGE_SIZE, "0x%x\n", \
  1913. readl_relaxed(drvdata->base + offset)); \
  1914. } \
  1915. DEVICE_ATTR_RO(name)
  1916. coresight_simple_func(trcoslsr, TRCOSLSR);
  1917. coresight_simple_func(trcpdcr, TRCPDCR);
  1918. coresight_simple_func(trcpdsr, TRCPDSR);
  1919. coresight_simple_func(trclsr, TRCLSR);
  1920. coresight_simple_func(trcauthstatus, TRCAUTHSTATUS);
  1921. coresight_simple_func(trcdevid, TRCDEVID);
  1922. coresight_simple_func(trcdevtype, TRCDEVTYPE);
  1923. coresight_simple_func(trcpidr0, TRCPIDR0);
  1924. coresight_simple_func(trcpidr1, TRCPIDR1);
  1925. coresight_simple_func(trcpidr2, TRCPIDR2);
  1926. coresight_simple_func(trcpidr3, TRCPIDR3);
  1927. static struct attribute *coresight_etmv4_mgmt_attrs[] = {
  1928. &dev_attr_trcoslsr.attr,
  1929. &dev_attr_trcpdcr.attr,
  1930. &dev_attr_trcpdsr.attr,
  1931. &dev_attr_trclsr.attr,
  1932. &dev_attr_trcauthstatus.attr,
  1933. &dev_attr_trcdevid.attr,
  1934. &dev_attr_trcdevtype.attr,
  1935. &dev_attr_trcpidr0.attr,
  1936. &dev_attr_trcpidr1.attr,
  1937. &dev_attr_trcpidr2.attr,
  1938. &dev_attr_trcpidr3.attr,
  1939. NULL,
  1940. };
  1941. coresight_simple_func(trcidr0, TRCIDR0);
  1942. coresight_simple_func(trcidr1, TRCIDR1);
  1943. coresight_simple_func(trcidr2, TRCIDR2);
  1944. coresight_simple_func(trcidr3, TRCIDR3);
  1945. coresight_simple_func(trcidr4, TRCIDR4);
  1946. coresight_simple_func(trcidr5, TRCIDR5);
  1947. /* trcidr[6,7] are reserved */
  1948. coresight_simple_func(trcidr8, TRCIDR8);
  1949. coresight_simple_func(trcidr9, TRCIDR9);
  1950. coresight_simple_func(trcidr10, TRCIDR10);
  1951. coresight_simple_func(trcidr11, TRCIDR11);
  1952. coresight_simple_func(trcidr12, TRCIDR12);
  1953. coresight_simple_func(trcidr13, TRCIDR13);
  1954. static struct attribute *coresight_etmv4_trcidr_attrs[] = {
  1955. &dev_attr_trcidr0.attr,
  1956. &dev_attr_trcidr1.attr,
  1957. &dev_attr_trcidr2.attr,
  1958. &dev_attr_trcidr3.attr,
  1959. &dev_attr_trcidr4.attr,
  1960. &dev_attr_trcidr5.attr,
  1961. /* trcidr[6,7] are reserved */
  1962. &dev_attr_trcidr8.attr,
  1963. &dev_attr_trcidr9.attr,
  1964. &dev_attr_trcidr10.attr,
  1965. &dev_attr_trcidr11.attr,
  1966. &dev_attr_trcidr12.attr,
  1967. &dev_attr_trcidr13.attr,
  1968. NULL,
  1969. };
  1970. static const struct attribute_group coresight_etmv4_group = {
  1971. .attrs = coresight_etmv4_attrs,
  1972. };
  1973. static const struct attribute_group coresight_etmv4_mgmt_group = {
  1974. .attrs = coresight_etmv4_mgmt_attrs,
  1975. .name = "mgmt",
  1976. };
  1977. static const struct attribute_group coresight_etmv4_trcidr_group = {
  1978. .attrs = coresight_etmv4_trcidr_attrs,
  1979. .name = "trcidr",
  1980. };
  1981. static const struct attribute_group *coresight_etmv4_groups[] = {
  1982. &coresight_etmv4_group,
  1983. &coresight_etmv4_mgmt_group,
  1984. &coresight_etmv4_trcidr_group,
  1985. NULL,
  1986. };
  1987. static void etm4_init_arch_data(void *info)
  1988. {
  1989. u32 etmidr0;
  1990. u32 etmidr1;
  1991. u32 etmidr2;
  1992. u32 etmidr3;
  1993. u32 etmidr4;
  1994. u32 etmidr5;
  1995. struct etmv4_drvdata *drvdata = info;
  1996. CS_UNLOCK(drvdata->base);
  1997. /* find all capabilities of the tracing unit */
  1998. etmidr0 = readl_relaxed(drvdata->base + TRCIDR0);
  1999. /* INSTP0, bits[2:1] P0 tracing support field */
  2000. if (BMVAL(etmidr0, 1, 1) && BMVAL(etmidr0, 2, 2))
  2001. drvdata->instrp0 = true;
  2002. else
  2003. drvdata->instrp0 = false;
  2004. /* TRCBB, bit[5] Branch broadcast tracing support bit */
  2005. if (BMVAL(etmidr0, 5, 5))
  2006. drvdata->trcbb = true;
  2007. else
  2008. drvdata->trcbb = false;
  2009. /* TRCCOND, bit[6] Conditional instruction tracing support bit */
  2010. if (BMVAL(etmidr0, 6, 6))
  2011. drvdata->trccond = true;
  2012. else
  2013. drvdata->trccond = false;
  2014. /* TRCCCI, bit[7] Cycle counting instruction bit */
  2015. if (BMVAL(etmidr0, 7, 7))
  2016. drvdata->trccci = true;
  2017. else
  2018. drvdata->trccci = false;
  2019. /* RETSTACK, bit[9] Return stack bit */
  2020. if (BMVAL(etmidr0, 9, 9))
  2021. drvdata->retstack = true;
  2022. else
  2023. drvdata->retstack = false;
  2024. /* NUMEVENT, bits[11:10] Number of events field */
  2025. drvdata->nr_event = BMVAL(etmidr0, 10, 11);
  2026. /* QSUPP, bits[16:15] Q element support field */
  2027. drvdata->q_support = BMVAL(etmidr0, 15, 16);
  2028. /* TSSIZE, bits[28:24] Global timestamp size field */
  2029. drvdata->ts_size = BMVAL(etmidr0, 24, 28);
  2030. /* base architecture of trace unit */
  2031. etmidr1 = readl_relaxed(drvdata->base + TRCIDR1);
  2032. /*
  2033. * TRCARCHMIN, bits[7:4] architecture the minor version number
  2034. * TRCARCHMAJ, bits[11:8] architecture major versin number
  2035. */
  2036. drvdata->arch = BMVAL(etmidr1, 4, 11);
  2037. /* maximum size of resources */
  2038. etmidr2 = readl_relaxed(drvdata->base + TRCIDR2);
  2039. /* CIDSIZE, bits[9:5] Indicates the Context ID size */
  2040. drvdata->ctxid_size = BMVAL(etmidr2, 5, 9);
  2041. /* VMIDSIZE, bits[14:10] Indicates the VMID size */
  2042. drvdata->vmid_size = BMVAL(etmidr2, 10, 14);
  2043. /* CCSIZE, bits[28:25] size of the cycle counter in bits minus 12 */
  2044. drvdata->ccsize = BMVAL(etmidr2, 25, 28);
  2045. etmidr3 = readl_relaxed(drvdata->base + TRCIDR3);
  2046. /* CCITMIN, bits[11:0] minimum threshold value that can be programmed */
  2047. drvdata->ccitmin = BMVAL(etmidr3, 0, 11);
  2048. /* EXLEVEL_S, bits[19:16] Secure state instruction tracing */
  2049. drvdata->s_ex_level = BMVAL(etmidr3, 16, 19);
  2050. /* EXLEVEL_NS, bits[23:20] Non-secure state instruction tracing */
  2051. drvdata->ns_ex_level = BMVAL(etmidr3, 20, 23);
  2052. /*
  2053. * TRCERR, bit[24] whether a trace unit can trace a
  2054. * system error exception.
  2055. */
  2056. if (BMVAL(etmidr3, 24, 24))
  2057. drvdata->trc_error = true;
  2058. else
  2059. drvdata->trc_error = false;
  2060. /* SYNCPR, bit[25] implementation has a fixed synchronization period? */
  2061. if (BMVAL(etmidr3, 25, 25))
  2062. drvdata->syncpr = true;
  2063. else
  2064. drvdata->syncpr = false;
  2065. /* STALLCTL, bit[26] is stall control implemented? */
  2066. if (BMVAL(etmidr3, 26, 26))
  2067. drvdata->stallctl = true;
  2068. else
  2069. drvdata->stallctl = false;
  2070. /* SYSSTALL, bit[27] implementation can support stall control? */
  2071. if (BMVAL(etmidr3, 27, 27))
  2072. drvdata->sysstall = true;
  2073. else
  2074. drvdata->sysstall = false;
  2075. /* NUMPROC, bits[30:28] the number of PEs available for tracing */
  2076. drvdata->nr_pe = BMVAL(etmidr3, 28, 30);
  2077. /* NOOVERFLOW, bit[31] is trace overflow prevention supported */
  2078. if (BMVAL(etmidr3, 31, 31))
  2079. drvdata->nooverflow = true;
  2080. else
  2081. drvdata->nooverflow = false;
  2082. /* number of resources trace unit supports */
  2083. etmidr4 = readl_relaxed(drvdata->base + TRCIDR4);
  2084. /* NUMACPAIRS, bits[0:3] number of addr comparator pairs for tracing */
  2085. drvdata->nr_addr_cmp = BMVAL(etmidr4, 0, 3);
  2086. /* NUMPC, bits[15:12] number of PE comparator inputs for tracing */
  2087. drvdata->nr_pe_cmp = BMVAL(etmidr4, 12, 15);
  2088. /* NUMRSPAIR, bits[19:16] the number of resource pairs for tracing */
  2089. drvdata->nr_resource = BMVAL(etmidr4, 16, 19);
  2090. /*
  2091. * NUMSSCC, bits[23:20] the number of single-shot
  2092. * comparator control for tracing
  2093. */
  2094. drvdata->nr_ss_cmp = BMVAL(etmidr4, 20, 23);
  2095. /* NUMCIDC, bits[27:24] number of Context ID comparators for tracing */
  2096. drvdata->numcidc = BMVAL(etmidr4, 24, 27);
  2097. /* NUMVMIDC, bits[31:28] number of VMID comparators for tracing */
  2098. drvdata->numvmidc = BMVAL(etmidr4, 28, 31);
  2099. etmidr5 = readl_relaxed(drvdata->base + TRCIDR5);
  2100. /* NUMEXTIN, bits[8:0] number of external inputs implemented */
  2101. drvdata->nr_ext_inp = BMVAL(etmidr5, 0, 8);
  2102. /* TRACEIDSIZE, bits[21:16] indicates the trace ID width */
  2103. drvdata->trcid_size = BMVAL(etmidr5, 16, 21);
  2104. /* ATBTRIG, bit[22] implementation can support ATB triggers? */
  2105. if (BMVAL(etmidr5, 22, 22))
  2106. drvdata->atbtrig = true;
  2107. else
  2108. drvdata->atbtrig = false;
  2109. /*
  2110. * LPOVERRIDE, bit[23] implementation supports
  2111. * low-power state override
  2112. */
  2113. if (BMVAL(etmidr5, 23, 23))
  2114. drvdata->lpoverride = true;
  2115. else
  2116. drvdata->lpoverride = false;
  2117. /* NUMSEQSTATE, bits[27:25] number of sequencer states implemented */
  2118. drvdata->nrseqstate = BMVAL(etmidr5, 25, 27);
  2119. /* NUMCNTR, bits[30:28] number of counters available for tracing */
  2120. drvdata->nr_cntr = BMVAL(etmidr5, 28, 30);
  2121. CS_LOCK(drvdata->base);
  2122. }
  2123. static void etm4_init_default_data(struct etmv4_drvdata *drvdata)
  2124. {
  2125. int i;
  2126. drvdata->pe_sel = 0x0;
  2127. drvdata->cfg = (ETMv4_MODE_CTXID | ETM_MODE_VMID |
  2128. ETMv4_MODE_TIMESTAMP | ETM_MODE_RETURNSTACK);
  2129. /* disable all events tracing */
  2130. drvdata->eventctrl0 = 0x0;
  2131. drvdata->eventctrl1 = 0x0;
  2132. /* disable stalling */
  2133. drvdata->stall_ctrl = 0x0;
  2134. /* disable timestamp event */
  2135. drvdata->ts_ctrl = 0x0;
  2136. /* enable trace synchronization every 4096 bytes for trace */
  2137. if (drvdata->syncpr == false)
  2138. drvdata->syncfreq = 0xC;
  2139. /*
  2140. * enable viewInst to trace everything with start-stop logic in
  2141. * started state
  2142. */
  2143. drvdata->vinst_ctrl |= BIT(0);
  2144. /* set initial state of start-stop logic */
  2145. if (drvdata->nr_addr_cmp)
  2146. drvdata->vinst_ctrl |= BIT(9);
  2147. /* no address range filtering for ViewInst */
  2148. drvdata->viiectlr = 0x0;
  2149. /* no start-stop filtering for ViewInst */
  2150. drvdata->vissctlr = 0x0;
  2151. /* disable seq events */
  2152. for (i = 0; i < drvdata->nrseqstate-1; i++)
  2153. drvdata->seq_ctrl[i] = 0x0;
  2154. drvdata->seq_rst = 0x0;
  2155. drvdata->seq_state = 0x0;
  2156. /* disable external input events */
  2157. drvdata->ext_inp = 0x0;
  2158. for (i = 0; i < drvdata->nr_cntr; i++) {
  2159. drvdata->cntrldvr[i] = 0x0;
  2160. drvdata->cntr_ctrl[i] = 0x0;
  2161. drvdata->cntr_val[i] = 0x0;
  2162. }
  2163. for (i = 2; i < drvdata->nr_resource * 2; i++)
  2164. drvdata->res_ctrl[i] = 0x0;
  2165. for (i = 0; i < drvdata->nr_ss_cmp; i++) {
  2166. drvdata->ss_ctrl[i] = 0x0;
  2167. drvdata->ss_pe_cmp[i] = 0x0;
  2168. }
  2169. if (drvdata->nr_addr_cmp >= 1) {
  2170. drvdata->addr_val[0] = (unsigned long)_stext;
  2171. drvdata->addr_val[1] = (unsigned long)_etext;
  2172. drvdata->addr_type[0] = ETM_ADDR_TYPE_RANGE;
  2173. drvdata->addr_type[1] = ETM_ADDR_TYPE_RANGE;
  2174. }
  2175. for (i = 0; i < drvdata->numcidc; i++) {
  2176. drvdata->ctxid_pid[i] = 0x0;
  2177. drvdata->ctxid_vpid[i] = 0x0;
  2178. }
  2179. drvdata->ctxid_mask0 = 0x0;
  2180. drvdata->ctxid_mask1 = 0x0;
  2181. for (i = 0; i < drvdata->numvmidc; i++)
  2182. drvdata->vmid_val[i] = 0x0;
  2183. drvdata->vmid_mask0 = 0x0;
  2184. drvdata->vmid_mask1 = 0x0;
  2185. /*
  2186. * A trace ID value of 0 is invalid, so let's start at some
  2187. * random value that fits in 7 bits. ETMv3.x has 0x10 so let's
  2188. * start at 0x20.
  2189. */
  2190. drvdata->trcid = 0x20 + drvdata->cpu;
  2191. }
  2192. static int etm4_cpu_callback(struct notifier_block *nfb, unsigned long action,
  2193. void *hcpu)
  2194. {
  2195. unsigned int cpu = (unsigned long)hcpu;
  2196. if (!etmdrvdata[cpu])
  2197. goto out;
  2198. switch (action & (~CPU_TASKS_FROZEN)) {
  2199. case CPU_STARTING:
  2200. spin_lock(&etmdrvdata[cpu]->spinlock);
  2201. if (!etmdrvdata[cpu]->os_unlock) {
  2202. etm4_os_unlock(etmdrvdata[cpu]);
  2203. etmdrvdata[cpu]->os_unlock = true;
  2204. }
  2205. if (etmdrvdata[cpu]->enable)
  2206. etm4_enable_hw(etmdrvdata[cpu]);
  2207. spin_unlock(&etmdrvdata[cpu]->spinlock);
  2208. break;
  2209. case CPU_ONLINE:
  2210. if (etmdrvdata[cpu]->boot_enable &&
  2211. !etmdrvdata[cpu]->sticky_enable)
  2212. coresight_enable(etmdrvdata[cpu]->csdev);
  2213. break;
  2214. case CPU_DYING:
  2215. spin_lock(&etmdrvdata[cpu]->spinlock);
  2216. if (etmdrvdata[cpu]->enable)
  2217. etm4_disable_hw(etmdrvdata[cpu]);
  2218. spin_unlock(&etmdrvdata[cpu]->spinlock);
  2219. break;
  2220. }
  2221. out:
  2222. return NOTIFY_OK;
  2223. }
  2224. static struct notifier_block etm4_cpu_notifier = {
  2225. .notifier_call = etm4_cpu_callback,
  2226. };
  2227. static int etm4_probe(struct amba_device *adev, const struct amba_id *id)
  2228. {
  2229. int ret;
  2230. void __iomem *base;
  2231. struct device *dev = &adev->dev;
  2232. struct coresight_platform_data *pdata = NULL;
  2233. struct etmv4_drvdata *drvdata;
  2234. struct resource *res = &adev->res;
  2235. struct coresight_desc *desc;
  2236. struct device_node *np = adev->dev.of_node;
  2237. desc = devm_kzalloc(dev, sizeof(*desc), GFP_KERNEL);
  2238. if (!desc)
  2239. return -ENOMEM;
  2240. drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
  2241. if (!drvdata)
  2242. return -ENOMEM;
  2243. if (np) {
  2244. pdata = of_get_coresight_platform_data(dev, np);
  2245. if (IS_ERR(pdata))
  2246. return PTR_ERR(pdata);
  2247. adev->dev.platform_data = pdata;
  2248. }
  2249. drvdata->dev = &adev->dev;
  2250. dev_set_drvdata(dev, drvdata);
  2251. /* Validity for the resource is already checked by the AMBA core */
  2252. base = devm_ioremap_resource(dev, res);
  2253. if (IS_ERR(base))
  2254. return PTR_ERR(base);
  2255. drvdata->base = base;
  2256. spin_lock_init(&drvdata->spinlock);
  2257. drvdata->cpu = pdata ? pdata->cpu : 0;
  2258. get_online_cpus();
  2259. etmdrvdata[drvdata->cpu] = drvdata;
  2260. if (!smp_call_function_single(drvdata->cpu, etm4_os_unlock, drvdata, 1))
  2261. drvdata->os_unlock = true;
  2262. if (smp_call_function_single(drvdata->cpu,
  2263. etm4_init_arch_data, drvdata, 1))
  2264. dev_err(dev, "ETM arch init failed\n");
  2265. if (!etm4_count++)
  2266. register_hotcpu_notifier(&etm4_cpu_notifier);
  2267. put_online_cpus();
  2268. if (etm4_arch_supported(drvdata->arch) == false) {
  2269. ret = -EINVAL;
  2270. goto err_arch_supported;
  2271. }
  2272. etm4_init_default_data(drvdata);
  2273. pm_runtime_put(&adev->dev);
  2274. desc->type = CORESIGHT_DEV_TYPE_SOURCE;
  2275. desc->subtype.source_subtype = CORESIGHT_DEV_SUBTYPE_SOURCE_PROC;
  2276. desc->ops = &etm4_cs_ops;
  2277. desc->pdata = pdata;
  2278. desc->dev = dev;
  2279. desc->groups = coresight_etmv4_groups;
  2280. drvdata->csdev = coresight_register(desc);
  2281. if (IS_ERR(drvdata->csdev)) {
  2282. ret = PTR_ERR(drvdata->csdev);
  2283. goto err_coresight_register;
  2284. }
  2285. dev_info(dev, "%s initialized\n", (char *)id->data);
  2286. if (boot_enable) {
  2287. coresight_enable(drvdata->csdev);
  2288. drvdata->boot_enable = true;
  2289. }
  2290. return 0;
  2291. err_arch_supported:
  2292. pm_runtime_put(&adev->dev);
  2293. err_coresight_register:
  2294. if (--etm4_count == 0)
  2295. unregister_hotcpu_notifier(&etm4_cpu_notifier);
  2296. return ret;
  2297. }
  2298. static int etm4_remove(struct amba_device *adev)
  2299. {
  2300. struct etmv4_drvdata *drvdata = amba_get_drvdata(adev);
  2301. coresight_unregister(drvdata->csdev);
  2302. if (--etm4_count == 0)
  2303. unregister_hotcpu_notifier(&etm4_cpu_notifier);
  2304. return 0;
  2305. }
  2306. static struct amba_id etm4_ids[] = {
  2307. { /* ETM 4.0 - Qualcomm */
  2308. .id = 0x0003b95d,
  2309. .mask = 0x0003ffff,
  2310. .data = "ETM 4.0",
  2311. },
  2312. { /* ETM 4.0 - Juno board */
  2313. .id = 0x000bb95e,
  2314. .mask = 0x000fffff,
  2315. .data = "ETM 4.0",
  2316. },
  2317. { 0, 0},
  2318. };
  2319. static struct amba_driver etm4x_driver = {
  2320. .drv = {
  2321. .name = "coresight-etm4x",
  2322. },
  2323. .probe = etm4_probe,
  2324. .remove = etm4_remove,
  2325. .id_table = etm4_ids,
  2326. };
  2327. module_amba_driver(etm4x_driver);