coresight-etm4x-sysfs.c 57 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157
  1. /*
  2. * Copyright(C) 2015 Linaro Limited. All rights reserved.
  3. * Author: Mathieu Poirier <mathieu.poirier@linaro.org>
  4. *
  5. * This program is free software; you can redistribute it and/or modify it
  6. * under the terms of the GNU General Public License version 2 as published by
  7. * the Free Software Foundation.
  8. *
  9. * This program is distributed in the hope that it will be useful, but WITHOUT
  10. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  12. * more details.
  13. *
  14. * You should have received a copy of the GNU General Public License along with
  15. * this program. If not, see <http://www.gnu.org/licenses/>.
  16. */
  17. #include <linux/pm_runtime.h>
  18. #include <linux/sysfs.h>
  19. #include "coresight-etm4x.h"
  20. #include "coresight-priv.h"
  21. static int etm4_set_mode_exclude(struct etmv4_drvdata *drvdata, bool exclude)
  22. {
  23. u8 idx;
  24. struct etmv4_config *config = &drvdata->config;
  25. idx = config->addr_idx;
  26. /*
  27. * TRCACATRn.TYPE bit[1:0]: type of comparison
  28. * the trace unit performs
  29. */
  30. if (BMVAL(config->addr_acc[idx], 0, 1) == ETM_INSTR_ADDR) {
  31. if (idx % 2 != 0)
  32. return -EINVAL;
  33. /*
  34. * We are performing instruction address comparison. Set the
  35. * relevant bit of ViewInst Include/Exclude Control register
  36. * for corresponding address comparator pair.
  37. */
  38. if (config->addr_type[idx] != ETM_ADDR_TYPE_RANGE ||
  39. config->addr_type[idx + 1] != ETM_ADDR_TYPE_RANGE)
  40. return -EINVAL;
  41. if (exclude == true) {
  42. /*
  43. * Set exclude bit and unset the include bit
  44. * corresponding to comparator pair
  45. */
  46. config->viiectlr |= BIT(idx / 2 + 16);
  47. config->viiectlr &= ~BIT(idx / 2);
  48. } else {
  49. /*
  50. * Set include bit and unset exclude bit
  51. * corresponding to comparator pair
  52. */
  53. config->viiectlr |= BIT(idx / 2);
  54. config->viiectlr &= ~BIT(idx / 2 + 16);
  55. }
  56. }
  57. return 0;
  58. }
  59. static ssize_t nr_pe_cmp_show(struct device *dev,
  60. struct device_attribute *attr,
  61. char *buf)
  62. {
  63. unsigned long val;
  64. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  65. val = drvdata->nr_pe_cmp;
  66. return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
  67. }
  68. static DEVICE_ATTR_RO(nr_pe_cmp);
  69. static ssize_t nr_addr_cmp_show(struct device *dev,
  70. struct device_attribute *attr,
  71. char *buf)
  72. {
  73. unsigned long val;
  74. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  75. val = drvdata->nr_addr_cmp;
  76. return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
  77. }
  78. static DEVICE_ATTR_RO(nr_addr_cmp);
  79. static ssize_t nr_cntr_show(struct device *dev,
  80. struct device_attribute *attr,
  81. char *buf)
  82. {
  83. unsigned long val;
  84. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  85. val = drvdata->nr_cntr;
  86. return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
  87. }
  88. static DEVICE_ATTR_RO(nr_cntr);
  89. static ssize_t nr_ext_inp_show(struct device *dev,
  90. struct device_attribute *attr,
  91. char *buf)
  92. {
  93. unsigned long val;
  94. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  95. val = drvdata->nr_ext_inp;
  96. return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
  97. }
  98. static DEVICE_ATTR_RO(nr_ext_inp);
  99. static ssize_t numcidc_show(struct device *dev,
  100. struct device_attribute *attr,
  101. char *buf)
  102. {
  103. unsigned long val;
  104. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  105. val = drvdata->numcidc;
  106. return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
  107. }
  108. static DEVICE_ATTR_RO(numcidc);
  109. static ssize_t numvmidc_show(struct device *dev,
  110. struct device_attribute *attr,
  111. char *buf)
  112. {
  113. unsigned long val;
  114. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  115. val = drvdata->numvmidc;
  116. return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
  117. }
  118. static DEVICE_ATTR_RO(numvmidc);
  119. static ssize_t nrseqstate_show(struct device *dev,
  120. struct device_attribute *attr,
  121. char *buf)
  122. {
  123. unsigned long val;
  124. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  125. val = drvdata->nrseqstate;
  126. return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
  127. }
  128. static DEVICE_ATTR_RO(nrseqstate);
  129. static ssize_t nr_resource_show(struct device *dev,
  130. struct device_attribute *attr,
  131. char *buf)
  132. {
  133. unsigned long val;
  134. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  135. val = drvdata->nr_resource;
  136. return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
  137. }
  138. static DEVICE_ATTR_RO(nr_resource);
  139. static ssize_t nr_ss_cmp_show(struct device *dev,
  140. struct device_attribute *attr,
  141. char *buf)
  142. {
  143. unsigned long val;
  144. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  145. val = drvdata->nr_ss_cmp;
  146. return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
  147. }
  148. static DEVICE_ATTR_RO(nr_ss_cmp);
  149. static ssize_t reset_store(struct device *dev,
  150. struct device_attribute *attr,
  151. const char *buf, size_t size)
  152. {
  153. int i;
  154. unsigned long val;
  155. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  156. struct etmv4_config *config = &drvdata->config;
  157. if (kstrtoul(buf, 16, &val))
  158. return -EINVAL;
  159. spin_lock(&drvdata->spinlock);
  160. if (val)
  161. config->mode = 0x0;
  162. /* Disable data tracing: do not trace load and store data transfers */
  163. config->mode &= ~(ETM_MODE_LOAD | ETM_MODE_STORE);
  164. config->cfg &= ~(BIT(1) | BIT(2));
  165. /* Disable data value and data address tracing */
  166. config->mode &= ~(ETM_MODE_DATA_TRACE_ADDR |
  167. ETM_MODE_DATA_TRACE_VAL);
  168. config->cfg &= ~(BIT(16) | BIT(17));
  169. /* Disable all events tracing */
  170. config->eventctrl0 = 0x0;
  171. config->eventctrl1 = 0x0;
  172. /* Disable timestamp event */
  173. config->ts_ctrl = 0x0;
  174. /* Disable stalling */
  175. config->stall_ctrl = 0x0;
  176. /* Reset trace synchronization period to 2^8 = 256 bytes*/
  177. if (drvdata->syncpr == false)
  178. config->syncfreq = 0x8;
  179. /*
  180. * Enable ViewInst to trace everything with start-stop logic in
  181. * started state. ARM recommends start-stop logic is set before
  182. * each trace run.
  183. */
  184. config->vinst_ctrl |= BIT(0);
  185. if (drvdata->nr_addr_cmp == true) {
  186. config->mode |= ETM_MODE_VIEWINST_STARTSTOP;
  187. /* SSSTATUS, bit[9] */
  188. config->vinst_ctrl |= BIT(9);
  189. }
  190. /* No address range filtering for ViewInst */
  191. config->viiectlr = 0x0;
  192. /* No start-stop filtering for ViewInst */
  193. config->vissctlr = 0x0;
  194. /* Disable seq events */
  195. for (i = 0; i < drvdata->nrseqstate-1; i++)
  196. config->seq_ctrl[i] = 0x0;
  197. config->seq_rst = 0x0;
  198. config->seq_state = 0x0;
  199. /* Disable external input events */
  200. config->ext_inp = 0x0;
  201. config->cntr_idx = 0x0;
  202. for (i = 0; i < drvdata->nr_cntr; i++) {
  203. config->cntrldvr[i] = 0x0;
  204. config->cntr_ctrl[i] = 0x0;
  205. config->cntr_val[i] = 0x0;
  206. }
  207. config->res_idx = 0x0;
  208. for (i = 0; i < drvdata->nr_resource; i++)
  209. config->res_ctrl[i] = 0x0;
  210. for (i = 0; i < drvdata->nr_ss_cmp; i++) {
  211. config->ss_ctrl[i] = 0x0;
  212. config->ss_pe_cmp[i] = 0x0;
  213. }
  214. config->addr_idx = 0x0;
  215. for (i = 0; i < drvdata->nr_addr_cmp * 2; i++) {
  216. config->addr_val[i] = 0x0;
  217. config->addr_acc[i] = 0x0;
  218. config->addr_type[i] = ETM_ADDR_TYPE_NONE;
  219. }
  220. config->ctxid_idx = 0x0;
  221. for (i = 0; i < drvdata->numcidc; i++) {
  222. config->ctxid_pid[i] = 0x0;
  223. config->ctxid_vpid[i] = 0x0;
  224. }
  225. config->ctxid_mask0 = 0x0;
  226. config->ctxid_mask1 = 0x0;
  227. config->vmid_idx = 0x0;
  228. for (i = 0; i < drvdata->numvmidc; i++)
  229. config->vmid_val[i] = 0x0;
  230. config->vmid_mask0 = 0x0;
  231. config->vmid_mask1 = 0x0;
  232. drvdata->trcid = drvdata->cpu + 1;
  233. spin_unlock(&drvdata->spinlock);
  234. return size;
  235. }
  236. static DEVICE_ATTR_WO(reset);
  237. static ssize_t mode_show(struct device *dev,
  238. struct device_attribute *attr,
  239. char *buf)
  240. {
  241. unsigned long val;
  242. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  243. struct etmv4_config *config = &drvdata->config;
  244. val = config->mode;
  245. return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
  246. }
  247. static ssize_t mode_store(struct device *dev,
  248. struct device_attribute *attr,
  249. const char *buf, size_t size)
  250. {
  251. unsigned long val, mode;
  252. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  253. struct etmv4_config *config = &drvdata->config;
  254. if (kstrtoul(buf, 16, &val))
  255. return -EINVAL;
  256. spin_lock(&drvdata->spinlock);
  257. config->mode = val & ETMv4_MODE_ALL;
  258. if (config->mode & ETM_MODE_EXCLUDE)
  259. etm4_set_mode_exclude(drvdata, true);
  260. else
  261. etm4_set_mode_exclude(drvdata, false);
  262. if (drvdata->instrp0 == true) {
  263. /* start by clearing instruction P0 field */
  264. config->cfg &= ~(BIT(1) | BIT(2));
  265. if (config->mode & ETM_MODE_LOAD)
  266. /* 0b01 Trace load instructions as P0 instructions */
  267. config->cfg |= BIT(1);
  268. if (config->mode & ETM_MODE_STORE)
  269. /* 0b10 Trace store instructions as P0 instructions */
  270. config->cfg |= BIT(2);
  271. if (config->mode & ETM_MODE_LOAD_STORE)
  272. /*
  273. * 0b11 Trace load and store instructions
  274. * as P0 instructions
  275. */
  276. config->cfg |= BIT(1) | BIT(2);
  277. }
  278. /* bit[3], Branch broadcast mode */
  279. if ((config->mode & ETM_MODE_BB) && (drvdata->trcbb == true))
  280. config->cfg |= BIT(3);
  281. else
  282. config->cfg &= ~BIT(3);
  283. /* bit[4], Cycle counting instruction trace bit */
  284. if ((config->mode & ETMv4_MODE_CYCACC) &&
  285. (drvdata->trccci == true))
  286. config->cfg |= BIT(4);
  287. else
  288. config->cfg &= ~BIT(4);
  289. /* bit[6], Context ID tracing bit */
  290. if ((config->mode & ETMv4_MODE_CTXID) && (drvdata->ctxid_size))
  291. config->cfg |= BIT(6);
  292. else
  293. config->cfg &= ~BIT(6);
  294. if ((config->mode & ETM_MODE_VMID) && (drvdata->vmid_size))
  295. config->cfg |= BIT(7);
  296. else
  297. config->cfg &= ~BIT(7);
  298. /* bits[10:8], Conditional instruction tracing bit */
  299. mode = ETM_MODE_COND(config->mode);
  300. if (drvdata->trccond == true) {
  301. config->cfg &= ~(BIT(8) | BIT(9) | BIT(10));
  302. config->cfg |= mode << 8;
  303. }
  304. /* bit[11], Global timestamp tracing bit */
  305. if ((config->mode & ETMv4_MODE_TIMESTAMP) && (drvdata->ts_size))
  306. config->cfg |= BIT(11);
  307. else
  308. config->cfg &= ~BIT(11);
  309. /* bit[12], Return stack enable bit */
  310. if ((config->mode & ETM_MODE_RETURNSTACK) &&
  311. (drvdata->retstack == true))
  312. config->cfg |= BIT(12);
  313. else
  314. config->cfg &= ~BIT(12);
  315. /* bits[14:13], Q element enable field */
  316. mode = ETM_MODE_QELEM(config->mode);
  317. /* start by clearing QE bits */
  318. config->cfg &= ~(BIT(13) | BIT(14));
  319. /* if supported, Q elements with instruction counts are enabled */
  320. if ((mode & BIT(0)) && (drvdata->q_support & BIT(0)))
  321. config->cfg |= BIT(13);
  322. /*
  323. * if supported, Q elements with and without instruction
  324. * counts are enabled
  325. */
  326. if ((mode & BIT(1)) && (drvdata->q_support & BIT(1)))
  327. config->cfg |= BIT(14);
  328. /* bit[11], AMBA Trace Bus (ATB) trigger enable bit */
  329. if ((config->mode & ETM_MODE_ATB_TRIGGER) &&
  330. (drvdata->atbtrig == true))
  331. config->eventctrl1 |= BIT(11);
  332. else
  333. config->eventctrl1 &= ~BIT(11);
  334. /* bit[12], Low-power state behavior override bit */
  335. if ((config->mode & ETM_MODE_LPOVERRIDE) &&
  336. (drvdata->lpoverride == true))
  337. config->eventctrl1 |= BIT(12);
  338. else
  339. config->eventctrl1 &= ~BIT(12);
  340. /* bit[8], Instruction stall bit */
  341. if (config->mode & ETM_MODE_ISTALL_EN)
  342. config->stall_ctrl |= BIT(8);
  343. else
  344. config->stall_ctrl &= ~BIT(8);
  345. /* bit[10], Prioritize instruction trace bit */
  346. if (config->mode & ETM_MODE_INSTPRIO)
  347. config->stall_ctrl |= BIT(10);
  348. else
  349. config->stall_ctrl &= ~BIT(10);
  350. /* bit[13], Trace overflow prevention bit */
  351. if ((config->mode & ETM_MODE_NOOVERFLOW) &&
  352. (drvdata->nooverflow == true))
  353. config->stall_ctrl |= BIT(13);
  354. else
  355. config->stall_ctrl &= ~BIT(13);
  356. /* bit[9] Start/stop logic control bit */
  357. if (config->mode & ETM_MODE_VIEWINST_STARTSTOP)
  358. config->vinst_ctrl |= BIT(9);
  359. else
  360. config->vinst_ctrl &= ~BIT(9);
  361. /* bit[10], Whether a trace unit must trace a Reset exception */
  362. if (config->mode & ETM_MODE_TRACE_RESET)
  363. config->vinst_ctrl |= BIT(10);
  364. else
  365. config->vinst_ctrl &= ~BIT(10);
  366. /* bit[11], Whether a trace unit must trace a system error exception */
  367. if ((config->mode & ETM_MODE_TRACE_ERR) &&
  368. (drvdata->trc_error == true))
  369. config->vinst_ctrl |= BIT(11);
  370. else
  371. config->vinst_ctrl &= ~BIT(11);
  372. if (config->mode & (ETM_MODE_EXCL_KERN | ETM_MODE_EXCL_USER))
  373. etm4_config_trace_mode(config);
  374. spin_unlock(&drvdata->spinlock);
  375. return size;
  376. }
  377. static DEVICE_ATTR_RW(mode);
  378. static ssize_t pe_show(struct device *dev,
  379. struct device_attribute *attr,
  380. char *buf)
  381. {
  382. unsigned long val;
  383. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  384. struct etmv4_config *config = &drvdata->config;
  385. val = config->pe_sel;
  386. return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
  387. }
  388. static ssize_t pe_store(struct device *dev,
  389. struct device_attribute *attr,
  390. const char *buf, size_t size)
  391. {
  392. unsigned long val;
  393. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  394. struct etmv4_config *config = &drvdata->config;
  395. if (kstrtoul(buf, 16, &val))
  396. return -EINVAL;
  397. spin_lock(&drvdata->spinlock);
  398. if (val > drvdata->nr_pe) {
  399. spin_unlock(&drvdata->spinlock);
  400. return -EINVAL;
  401. }
  402. config->pe_sel = val;
  403. spin_unlock(&drvdata->spinlock);
  404. return size;
  405. }
  406. static DEVICE_ATTR_RW(pe);
  407. static ssize_t event_show(struct device *dev,
  408. struct device_attribute *attr,
  409. char *buf)
  410. {
  411. unsigned long val;
  412. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  413. struct etmv4_config *config = &drvdata->config;
  414. val = config->eventctrl0;
  415. return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
  416. }
  417. static ssize_t event_store(struct device *dev,
  418. struct device_attribute *attr,
  419. const char *buf, size_t size)
  420. {
  421. unsigned long val;
  422. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  423. struct etmv4_config *config = &drvdata->config;
  424. if (kstrtoul(buf, 16, &val))
  425. return -EINVAL;
  426. spin_lock(&drvdata->spinlock);
  427. switch (drvdata->nr_event) {
  428. case 0x0:
  429. /* EVENT0, bits[7:0] */
  430. config->eventctrl0 = val & 0xFF;
  431. break;
  432. case 0x1:
  433. /* EVENT1, bits[15:8] */
  434. config->eventctrl0 = val & 0xFFFF;
  435. break;
  436. case 0x2:
  437. /* EVENT2, bits[23:16] */
  438. config->eventctrl0 = val & 0xFFFFFF;
  439. break;
  440. case 0x3:
  441. /* EVENT3, bits[31:24] */
  442. config->eventctrl0 = val;
  443. break;
  444. default:
  445. break;
  446. }
  447. spin_unlock(&drvdata->spinlock);
  448. return size;
  449. }
  450. static DEVICE_ATTR_RW(event);
  451. static ssize_t event_instren_show(struct device *dev,
  452. struct device_attribute *attr,
  453. char *buf)
  454. {
  455. unsigned long val;
  456. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  457. struct etmv4_config *config = &drvdata->config;
  458. val = BMVAL(config->eventctrl1, 0, 3);
  459. return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
  460. }
  461. static ssize_t event_instren_store(struct device *dev,
  462. struct device_attribute *attr,
  463. const char *buf, size_t size)
  464. {
  465. unsigned long val;
  466. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  467. struct etmv4_config *config = &drvdata->config;
  468. if (kstrtoul(buf, 16, &val))
  469. return -EINVAL;
  470. spin_lock(&drvdata->spinlock);
  471. /* start by clearing all instruction event enable bits */
  472. config->eventctrl1 &= ~(BIT(0) | BIT(1) | BIT(2) | BIT(3));
  473. switch (drvdata->nr_event) {
  474. case 0x0:
  475. /* generate Event element for event 1 */
  476. config->eventctrl1 |= val & BIT(1);
  477. break;
  478. case 0x1:
  479. /* generate Event element for event 1 and 2 */
  480. config->eventctrl1 |= val & (BIT(0) | BIT(1));
  481. break;
  482. case 0x2:
  483. /* generate Event element for event 1, 2 and 3 */
  484. config->eventctrl1 |= val & (BIT(0) | BIT(1) | BIT(2));
  485. break;
  486. case 0x3:
  487. /* generate Event element for all 4 events */
  488. config->eventctrl1 |= val & 0xF;
  489. break;
  490. default:
  491. break;
  492. }
  493. spin_unlock(&drvdata->spinlock);
  494. return size;
  495. }
  496. static DEVICE_ATTR_RW(event_instren);
  497. static ssize_t event_ts_show(struct device *dev,
  498. struct device_attribute *attr,
  499. char *buf)
  500. {
  501. unsigned long val;
  502. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  503. struct etmv4_config *config = &drvdata->config;
  504. val = config->ts_ctrl;
  505. return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
  506. }
  507. static ssize_t event_ts_store(struct device *dev,
  508. struct device_attribute *attr,
  509. const char *buf, size_t size)
  510. {
  511. unsigned long val;
  512. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  513. struct etmv4_config *config = &drvdata->config;
  514. if (kstrtoul(buf, 16, &val))
  515. return -EINVAL;
  516. if (!drvdata->ts_size)
  517. return -EINVAL;
  518. config->ts_ctrl = val & ETMv4_EVENT_MASK;
  519. return size;
  520. }
  521. static DEVICE_ATTR_RW(event_ts);
  522. static ssize_t syncfreq_show(struct device *dev,
  523. struct device_attribute *attr,
  524. char *buf)
  525. {
  526. unsigned long val;
  527. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  528. struct etmv4_config *config = &drvdata->config;
  529. val = config->syncfreq;
  530. return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
  531. }
  532. static ssize_t syncfreq_store(struct device *dev,
  533. struct device_attribute *attr,
  534. const char *buf, size_t size)
  535. {
  536. unsigned long val;
  537. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  538. struct etmv4_config *config = &drvdata->config;
  539. if (kstrtoul(buf, 16, &val))
  540. return -EINVAL;
  541. if (drvdata->syncpr == true)
  542. return -EINVAL;
  543. config->syncfreq = val & ETMv4_SYNC_MASK;
  544. return size;
  545. }
  546. static DEVICE_ATTR_RW(syncfreq);
  547. static ssize_t cyc_threshold_show(struct device *dev,
  548. struct device_attribute *attr,
  549. char *buf)
  550. {
  551. unsigned long val;
  552. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  553. struct etmv4_config *config = &drvdata->config;
  554. val = config->ccctlr;
  555. return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
  556. }
  557. static ssize_t cyc_threshold_store(struct device *dev,
  558. struct device_attribute *attr,
  559. const char *buf, size_t size)
  560. {
  561. unsigned long val;
  562. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  563. struct etmv4_config *config = &drvdata->config;
  564. if (kstrtoul(buf, 16, &val))
  565. return -EINVAL;
  566. if (val < drvdata->ccitmin)
  567. return -EINVAL;
  568. config->ccctlr = val & ETM_CYC_THRESHOLD_MASK;
  569. return size;
  570. }
  571. static DEVICE_ATTR_RW(cyc_threshold);
  572. static ssize_t bb_ctrl_show(struct device *dev,
  573. struct device_attribute *attr,
  574. char *buf)
  575. {
  576. unsigned long val;
  577. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  578. struct etmv4_config *config = &drvdata->config;
  579. val = config->bb_ctrl;
  580. return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
  581. }
  582. static ssize_t bb_ctrl_store(struct device *dev,
  583. struct device_attribute *attr,
  584. const char *buf, size_t size)
  585. {
  586. unsigned long val;
  587. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  588. struct etmv4_config *config = &drvdata->config;
  589. if (kstrtoul(buf, 16, &val))
  590. return -EINVAL;
  591. if (drvdata->trcbb == false)
  592. return -EINVAL;
  593. if (!drvdata->nr_addr_cmp)
  594. return -EINVAL;
  595. /*
  596. * Bit[7:0] selects which address range comparator is used for
  597. * branch broadcast control.
  598. */
  599. if (BMVAL(val, 0, 7) > drvdata->nr_addr_cmp)
  600. return -EINVAL;
  601. config->bb_ctrl = val;
  602. return size;
  603. }
  604. static DEVICE_ATTR_RW(bb_ctrl);
  605. static ssize_t event_vinst_show(struct device *dev,
  606. struct device_attribute *attr,
  607. char *buf)
  608. {
  609. unsigned long val;
  610. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  611. struct etmv4_config *config = &drvdata->config;
  612. val = config->vinst_ctrl & ETMv4_EVENT_MASK;
  613. return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
  614. }
  615. static ssize_t event_vinst_store(struct device *dev,
  616. struct device_attribute *attr,
  617. const char *buf, size_t size)
  618. {
  619. unsigned long val;
  620. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  621. struct etmv4_config *config = &drvdata->config;
  622. if (kstrtoul(buf, 16, &val))
  623. return -EINVAL;
  624. spin_lock(&drvdata->spinlock);
  625. val &= ETMv4_EVENT_MASK;
  626. config->vinst_ctrl &= ~ETMv4_EVENT_MASK;
  627. config->vinst_ctrl |= val;
  628. spin_unlock(&drvdata->spinlock);
  629. return size;
  630. }
  631. static DEVICE_ATTR_RW(event_vinst);
  632. static ssize_t s_exlevel_vinst_show(struct device *dev,
  633. struct device_attribute *attr,
  634. char *buf)
  635. {
  636. unsigned long val;
  637. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  638. struct etmv4_config *config = &drvdata->config;
  639. val = BMVAL(config->vinst_ctrl, 16, 19);
  640. return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
  641. }
  642. static ssize_t s_exlevel_vinst_store(struct device *dev,
  643. struct device_attribute *attr,
  644. const char *buf, size_t size)
  645. {
  646. unsigned long val;
  647. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  648. struct etmv4_config *config = &drvdata->config;
  649. if (kstrtoul(buf, 16, &val))
  650. return -EINVAL;
  651. spin_lock(&drvdata->spinlock);
  652. /* clear all EXLEVEL_S bits (bit[18] is never implemented) */
  653. config->vinst_ctrl &= ~(BIT(16) | BIT(17) | BIT(19));
  654. /* enable instruction tracing for corresponding exception level */
  655. val &= drvdata->s_ex_level;
  656. config->vinst_ctrl |= (val << 16);
  657. spin_unlock(&drvdata->spinlock);
  658. return size;
  659. }
  660. static DEVICE_ATTR_RW(s_exlevel_vinst);
  661. static ssize_t ns_exlevel_vinst_show(struct device *dev,
  662. struct device_attribute *attr,
  663. char *buf)
  664. {
  665. unsigned long val;
  666. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  667. struct etmv4_config *config = &drvdata->config;
  668. /* EXLEVEL_NS, bits[23:20] */
  669. val = BMVAL(config->vinst_ctrl, 20, 23);
  670. return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
  671. }
  672. static ssize_t ns_exlevel_vinst_store(struct device *dev,
  673. struct device_attribute *attr,
  674. const char *buf, size_t size)
  675. {
  676. unsigned long val;
  677. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  678. struct etmv4_config *config = &drvdata->config;
  679. if (kstrtoul(buf, 16, &val))
  680. return -EINVAL;
  681. spin_lock(&drvdata->spinlock);
  682. /* clear EXLEVEL_NS bits (bit[23] is never implemented */
  683. config->vinst_ctrl &= ~(BIT(20) | BIT(21) | BIT(22));
  684. /* enable instruction tracing for corresponding exception level */
  685. val &= drvdata->ns_ex_level;
  686. config->vinst_ctrl |= (val << 20);
  687. spin_unlock(&drvdata->spinlock);
  688. return size;
  689. }
  690. static DEVICE_ATTR_RW(ns_exlevel_vinst);
  691. static ssize_t addr_idx_show(struct device *dev,
  692. struct device_attribute *attr,
  693. char *buf)
  694. {
  695. unsigned long val;
  696. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  697. struct etmv4_config *config = &drvdata->config;
  698. val = config->addr_idx;
  699. return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
  700. }
  701. static ssize_t addr_idx_store(struct device *dev,
  702. struct device_attribute *attr,
  703. const char *buf, size_t size)
  704. {
  705. unsigned long val;
  706. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  707. struct etmv4_config *config = &drvdata->config;
  708. if (kstrtoul(buf, 16, &val))
  709. return -EINVAL;
  710. if (val >= drvdata->nr_addr_cmp * 2)
  711. return -EINVAL;
  712. /*
  713. * Use spinlock to ensure index doesn't change while it gets
  714. * dereferenced multiple times within a spinlock block elsewhere.
  715. */
  716. spin_lock(&drvdata->spinlock);
  717. config->addr_idx = val;
  718. spin_unlock(&drvdata->spinlock);
  719. return size;
  720. }
  721. static DEVICE_ATTR_RW(addr_idx);
  722. static ssize_t addr_instdatatype_show(struct device *dev,
  723. struct device_attribute *attr,
  724. char *buf)
  725. {
  726. ssize_t len;
  727. u8 val, idx;
  728. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  729. struct etmv4_config *config = &drvdata->config;
  730. spin_lock(&drvdata->spinlock);
  731. idx = config->addr_idx;
  732. val = BMVAL(config->addr_acc[idx], 0, 1);
  733. len = scnprintf(buf, PAGE_SIZE, "%s\n",
  734. val == ETM_INSTR_ADDR ? "instr" :
  735. (val == ETM_DATA_LOAD_ADDR ? "data_load" :
  736. (val == ETM_DATA_STORE_ADDR ? "data_store" :
  737. "data_load_store")));
  738. spin_unlock(&drvdata->spinlock);
  739. return len;
  740. }
  741. static ssize_t addr_instdatatype_store(struct device *dev,
  742. struct device_attribute *attr,
  743. const char *buf, size_t size)
  744. {
  745. u8 idx;
  746. char str[20] = "";
  747. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  748. struct etmv4_config *config = &drvdata->config;
  749. if (strlen(buf) >= 20)
  750. return -EINVAL;
  751. if (sscanf(buf, "%s", str) != 1)
  752. return -EINVAL;
  753. spin_lock(&drvdata->spinlock);
  754. idx = config->addr_idx;
  755. if (!strcmp(str, "instr"))
  756. /* TYPE, bits[1:0] */
  757. config->addr_acc[idx] &= ~(BIT(0) | BIT(1));
  758. spin_unlock(&drvdata->spinlock);
  759. return size;
  760. }
  761. static DEVICE_ATTR_RW(addr_instdatatype);
  762. static ssize_t addr_single_show(struct device *dev,
  763. struct device_attribute *attr,
  764. char *buf)
  765. {
  766. u8 idx;
  767. unsigned long val;
  768. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  769. struct etmv4_config *config = &drvdata->config;
  770. idx = config->addr_idx;
  771. spin_lock(&drvdata->spinlock);
  772. if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
  773. config->addr_type[idx] == ETM_ADDR_TYPE_SINGLE)) {
  774. spin_unlock(&drvdata->spinlock);
  775. return -EPERM;
  776. }
  777. val = (unsigned long)config->addr_val[idx];
  778. spin_unlock(&drvdata->spinlock);
  779. return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
  780. }
  781. static ssize_t addr_single_store(struct device *dev,
  782. struct device_attribute *attr,
  783. const char *buf, size_t size)
  784. {
  785. u8 idx;
  786. unsigned long val;
  787. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  788. struct etmv4_config *config = &drvdata->config;
  789. if (kstrtoul(buf, 16, &val))
  790. return -EINVAL;
  791. spin_lock(&drvdata->spinlock);
  792. idx = config->addr_idx;
  793. if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
  794. config->addr_type[idx] == ETM_ADDR_TYPE_SINGLE)) {
  795. spin_unlock(&drvdata->spinlock);
  796. return -EPERM;
  797. }
  798. config->addr_val[idx] = (u64)val;
  799. config->addr_type[idx] = ETM_ADDR_TYPE_SINGLE;
  800. spin_unlock(&drvdata->spinlock);
  801. return size;
  802. }
  803. static DEVICE_ATTR_RW(addr_single);
  804. static ssize_t addr_range_show(struct device *dev,
  805. struct device_attribute *attr,
  806. char *buf)
  807. {
  808. u8 idx;
  809. unsigned long val1, val2;
  810. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  811. struct etmv4_config *config = &drvdata->config;
  812. spin_lock(&drvdata->spinlock);
  813. idx = config->addr_idx;
  814. if (idx % 2 != 0) {
  815. spin_unlock(&drvdata->spinlock);
  816. return -EPERM;
  817. }
  818. if (!((config->addr_type[idx] == ETM_ADDR_TYPE_NONE &&
  819. config->addr_type[idx + 1] == ETM_ADDR_TYPE_NONE) ||
  820. (config->addr_type[idx] == ETM_ADDR_TYPE_RANGE &&
  821. config->addr_type[idx + 1] == ETM_ADDR_TYPE_RANGE))) {
  822. spin_unlock(&drvdata->spinlock);
  823. return -EPERM;
  824. }
  825. val1 = (unsigned long)config->addr_val[idx];
  826. val2 = (unsigned long)config->addr_val[idx + 1];
  827. spin_unlock(&drvdata->spinlock);
  828. return scnprintf(buf, PAGE_SIZE, "%#lx %#lx\n", val1, val2);
  829. }
  830. static ssize_t addr_range_store(struct device *dev,
  831. struct device_attribute *attr,
  832. const char *buf, size_t size)
  833. {
  834. u8 idx;
  835. unsigned long val1, val2;
  836. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  837. struct etmv4_config *config = &drvdata->config;
  838. if (sscanf(buf, "%lx %lx", &val1, &val2) != 2)
  839. return -EINVAL;
  840. /* lower address comparator cannot have a higher address value */
  841. if (val1 > val2)
  842. return -EINVAL;
  843. spin_lock(&drvdata->spinlock);
  844. idx = config->addr_idx;
  845. if (idx % 2 != 0) {
  846. spin_unlock(&drvdata->spinlock);
  847. return -EPERM;
  848. }
  849. if (!((config->addr_type[idx] == ETM_ADDR_TYPE_NONE &&
  850. config->addr_type[idx + 1] == ETM_ADDR_TYPE_NONE) ||
  851. (config->addr_type[idx] == ETM_ADDR_TYPE_RANGE &&
  852. config->addr_type[idx + 1] == ETM_ADDR_TYPE_RANGE))) {
  853. spin_unlock(&drvdata->spinlock);
  854. return -EPERM;
  855. }
  856. config->addr_val[idx] = (u64)val1;
  857. config->addr_type[idx] = ETM_ADDR_TYPE_RANGE;
  858. config->addr_val[idx + 1] = (u64)val2;
  859. config->addr_type[idx + 1] = ETM_ADDR_TYPE_RANGE;
  860. /*
  861. * Program include or exclude control bits for vinst or vdata
  862. * whenever we change addr comparators to ETM_ADDR_TYPE_RANGE
  863. */
  864. if (config->mode & ETM_MODE_EXCLUDE)
  865. etm4_set_mode_exclude(drvdata, true);
  866. else
  867. etm4_set_mode_exclude(drvdata, false);
  868. spin_unlock(&drvdata->spinlock);
  869. return size;
  870. }
  871. static DEVICE_ATTR_RW(addr_range);
  872. static ssize_t addr_start_show(struct device *dev,
  873. struct device_attribute *attr,
  874. char *buf)
  875. {
  876. u8 idx;
  877. unsigned long val;
  878. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  879. struct etmv4_config *config = &drvdata->config;
  880. spin_lock(&drvdata->spinlock);
  881. idx = config->addr_idx;
  882. if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
  883. config->addr_type[idx] == ETM_ADDR_TYPE_START)) {
  884. spin_unlock(&drvdata->spinlock);
  885. return -EPERM;
  886. }
  887. val = (unsigned long)config->addr_val[idx];
  888. spin_unlock(&drvdata->spinlock);
  889. return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
  890. }
  891. static ssize_t addr_start_store(struct device *dev,
  892. struct device_attribute *attr,
  893. const char *buf, size_t size)
  894. {
  895. u8 idx;
  896. unsigned long val;
  897. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  898. struct etmv4_config *config = &drvdata->config;
  899. if (kstrtoul(buf, 16, &val))
  900. return -EINVAL;
  901. spin_lock(&drvdata->spinlock);
  902. idx = config->addr_idx;
  903. if (!drvdata->nr_addr_cmp) {
  904. spin_unlock(&drvdata->spinlock);
  905. return -EINVAL;
  906. }
  907. if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
  908. config->addr_type[idx] == ETM_ADDR_TYPE_START)) {
  909. spin_unlock(&drvdata->spinlock);
  910. return -EPERM;
  911. }
  912. config->addr_val[idx] = (u64)val;
  913. config->addr_type[idx] = ETM_ADDR_TYPE_START;
  914. config->vissctlr |= BIT(idx);
  915. /* SSSTATUS, bit[9] - turn on start/stop logic */
  916. config->vinst_ctrl |= BIT(9);
  917. spin_unlock(&drvdata->spinlock);
  918. return size;
  919. }
  920. static DEVICE_ATTR_RW(addr_start);
  921. static ssize_t addr_stop_show(struct device *dev,
  922. struct device_attribute *attr,
  923. char *buf)
  924. {
  925. u8 idx;
  926. unsigned long val;
  927. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  928. struct etmv4_config *config = &drvdata->config;
  929. spin_lock(&drvdata->spinlock);
  930. idx = config->addr_idx;
  931. if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
  932. config->addr_type[idx] == ETM_ADDR_TYPE_STOP)) {
  933. spin_unlock(&drvdata->spinlock);
  934. return -EPERM;
  935. }
  936. val = (unsigned long)config->addr_val[idx];
  937. spin_unlock(&drvdata->spinlock);
  938. return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
  939. }
  940. static ssize_t addr_stop_store(struct device *dev,
  941. struct device_attribute *attr,
  942. const char *buf, size_t size)
  943. {
  944. u8 idx;
  945. unsigned long val;
  946. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  947. struct etmv4_config *config = &drvdata->config;
  948. if (kstrtoul(buf, 16, &val))
  949. return -EINVAL;
  950. spin_lock(&drvdata->spinlock);
  951. idx = config->addr_idx;
  952. if (!drvdata->nr_addr_cmp) {
  953. spin_unlock(&drvdata->spinlock);
  954. return -EINVAL;
  955. }
  956. if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
  957. config->addr_type[idx] == ETM_ADDR_TYPE_STOP)) {
  958. spin_unlock(&drvdata->spinlock);
  959. return -EPERM;
  960. }
  961. config->addr_val[idx] = (u64)val;
  962. config->addr_type[idx] = ETM_ADDR_TYPE_STOP;
  963. config->vissctlr |= BIT(idx + 16);
  964. /* SSSTATUS, bit[9] - turn on start/stop logic */
  965. config->vinst_ctrl |= BIT(9);
  966. spin_unlock(&drvdata->spinlock);
  967. return size;
  968. }
  969. static DEVICE_ATTR_RW(addr_stop);
  970. static ssize_t addr_ctxtype_show(struct device *dev,
  971. struct device_attribute *attr,
  972. char *buf)
  973. {
  974. ssize_t len;
  975. u8 idx, val;
  976. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  977. struct etmv4_config *config = &drvdata->config;
  978. spin_lock(&drvdata->spinlock);
  979. idx = config->addr_idx;
  980. /* CONTEXTTYPE, bits[3:2] */
  981. val = BMVAL(config->addr_acc[idx], 2, 3);
  982. len = scnprintf(buf, PAGE_SIZE, "%s\n", val == ETM_CTX_NONE ? "none" :
  983. (val == ETM_CTX_CTXID ? "ctxid" :
  984. (val == ETM_CTX_VMID ? "vmid" : "all")));
  985. spin_unlock(&drvdata->spinlock);
  986. return len;
  987. }
  988. static ssize_t addr_ctxtype_store(struct device *dev,
  989. struct device_attribute *attr,
  990. const char *buf, size_t size)
  991. {
  992. u8 idx;
  993. char str[10] = "";
  994. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  995. struct etmv4_config *config = &drvdata->config;
  996. if (strlen(buf) >= 10)
  997. return -EINVAL;
  998. if (sscanf(buf, "%s", str) != 1)
  999. return -EINVAL;
  1000. spin_lock(&drvdata->spinlock);
  1001. idx = config->addr_idx;
  1002. if (!strcmp(str, "none"))
  1003. /* start by clearing context type bits */
  1004. config->addr_acc[idx] &= ~(BIT(2) | BIT(3));
  1005. else if (!strcmp(str, "ctxid")) {
  1006. /* 0b01 The trace unit performs a Context ID */
  1007. if (drvdata->numcidc) {
  1008. config->addr_acc[idx] |= BIT(2);
  1009. config->addr_acc[idx] &= ~BIT(3);
  1010. }
  1011. } else if (!strcmp(str, "vmid")) {
  1012. /* 0b10 The trace unit performs a VMID */
  1013. if (drvdata->numvmidc) {
  1014. config->addr_acc[idx] &= ~BIT(2);
  1015. config->addr_acc[idx] |= BIT(3);
  1016. }
  1017. } else if (!strcmp(str, "all")) {
  1018. /*
  1019. * 0b11 The trace unit performs a Context ID
  1020. * comparison and a VMID
  1021. */
  1022. if (drvdata->numcidc)
  1023. config->addr_acc[idx] |= BIT(2);
  1024. if (drvdata->numvmidc)
  1025. config->addr_acc[idx] |= BIT(3);
  1026. }
  1027. spin_unlock(&drvdata->spinlock);
  1028. return size;
  1029. }
  1030. static DEVICE_ATTR_RW(addr_ctxtype);
  1031. static ssize_t addr_context_show(struct device *dev,
  1032. struct device_attribute *attr,
  1033. char *buf)
  1034. {
  1035. u8 idx;
  1036. unsigned long val;
  1037. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  1038. struct etmv4_config *config = &drvdata->config;
  1039. spin_lock(&drvdata->spinlock);
  1040. idx = config->addr_idx;
  1041. /* context ID comparator bits[6:4] */
  1042. val = BMVAL(config->addr_acc[idx], 4, 6);
  1043. spin_unlock(&drvdata->spinlock);
  1044. return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
  1045. }
  1046. static ssize_t addr_context_store(struct device *dev,
  1047. struct device_attribute *attr,
  1048. const char *buf, size_t size)
  1049. {
  1050. u8 idx;
  1051. unsigned long val;
  1052. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  1053. struct etmv4_config *config = &drvdata->config;
  1054. if (kstrtoul(buf, 16, &val))
  1055. return -EINVAL;
  1056. if ((drvdata->numcidc <= 1) && (drvdata->numvmidc <= 1))
  1057. return -EINVAL;
  1058. if (val >= (drvdata->numcidc >= drvdata->numvmidc ?
  1059. drvdata->numcidc : drvdata->numvmidc))
  1060. return -EINVAL;
  1061. spin_lock(&drvdata->spinlock);
  1062. idx = config->addr_idx;
  1063. /* clear context ID comparator bits[6:4] */
  1064. config->addr_acc[idx] &= ~(BIT(4) | BIT(5) | BIT(6));
  1065. config->addr_acc[idx] |= (val << 4);
  1066. spin_unlock(&drvdata->spinlock);
  1067. return size;
  1068. }
  1069. static DEVICE_ATTR_RW(addr_context);
  1070. static ssize_t seq_idx_show(struct device *dev,
  1071. struct device_attribute *attr,
  1072. char *buf)
  1073. {
  1074. unsigned long val;
  1075. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  1076. struct etmv4_config *config = &drvdata->config;
  1077. val = config->seq_idx;
  1078. return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
  1079. }
  1080. static ssize_t seq_idx_store(struct device *dev,
  1081. struct device_attribute *attr,
  1082. const char *buf, size_t size)
  1083. {
  1084. unsigned long val;
  1085. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  1086. struct etmv4_config *config = &drvdata->config;
  1087. if (kstrtoul(buf, 16, &val))
  1088. return -EINVAL;
  1089. if (val >= drvdata->nrseqstate - 1)
  1090. return -EINVAL;
  1091. /*
  1092. * Use spinlock to ensure index doesn't change while it gets
  1093. * dereferenced multiple times within a spinlock block elsewhere.
  1094. */
  1095. spin_lock(&drvdata->spinlock);
  1096. config->seq_idx = val;
  1097. spin_unlock(&drvdata->spinlock);
  1098. return size;
  1099. }
  1100. static DEVICE_ATTR_RW(seq_idx);
  1101. static ssize_t seq_state_show(struct device *dev,
  1102. struct device_attribute *attr,
  1103. char *buf)
  1104. {
  1105. unsigned long val;
  1106. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  1107. struct etmv4_config *config = &drvdata->config;
  1108. val = config->seq_state;
  1109. return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
  1110. }
  1111. static ssize_t seq_state_store(struct device *dev,
  1112. struct device_attribute *attr,
  1113. const char *buf, size_t size)
  1114. {
  1115. unsigned long val;
  1116. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  1117. struct etmv4_config *config = &drvdata->config;
  1118. if (kstrtoul(buf, 16, &val))
  1119. return -EINVAL;
  1120. if (val >= drvdata->nrseqstate)
  1121. return -EINVAL;
  1122. config->seq_state = val;
  1123. return size;
  1124. }
  1125. static DEVICE_ATTR_RW(seq_state);
  1126. static ssize_t seq_event_show(struct device *dev,
  1127. struct device_attribute *attr,
  1128. char *buf)
  1129. {
  1130. u8 idx;
  1131. unsigned long val;
  1132. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  1133. struct etmv4_config *config = &drvdata->config;
  1134. spin_lock(&drvdata->spinlock);
  1135. idx = config->seq_idx;
  1136. val = config->seq_ctrl[idx];
  1137. spin_unlock(&drvdata->spinlock);
  1138. return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
  1139. }
  1140. static ssize_t seq_event_store(struct device *dev,
  1141. struct device_attribute *attr,
  1142. const char *buf, size_t size)
  1143. {
  1144. u8 idx;
  1145. unsigned long val;
  1146. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  1147. struct etmv4_config *config = &drvdata->config;
  1148. if (kstrtoul(buf, 16, &val))
  1149. return -EINVAL;
  1150. spin_lock(&drvdata->spinlock);
  1151. idx = config->seq_idx;
  1152. /* RST, bits[7:0] */
  1153. config->seq_ctrl[idx] = val & 0xFF;
  1154. spin_unlock(&drvdata->spinlock);
  1155. return size;
  1156. }
  1157. static DEVICE_ATTR_RW(seq_event);
  1158. static ssize_t seq_reset_event_show(struct device *dev,
  1159. struct device_attribute *attr,
  1160. char *buf)
  1161. {
  1162. unsigned long val;
  1163. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  1164. struct etmv4_config *config = &drvdata->config;
  1165. val = config->seq_rst;
  1166. return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
  1167. }
  1168. static ssize_t seq_reset_event_store(struct device *dev,
  1169. struct device_attribute *attr,
  1170. const char *buf, size_t size)
  1171. {
  1172. unsigned long val;
  1173. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  1174. struct etmv4_config *config = &drvdata->config;
  1175. if (kstrtoul(buf, 16, &val))
  1176. return -EINVAL;
  1177. if (!(drvdata->nrseqstate))
  1178. return -EINVAL;
  1179. config->seq_rst = val & ETMv4_EVENT_MASK;
  1180. return size;
  1181. }
  1182. static DEVICE_ATTR_RW(seq_reset_event);
  1183. static ssize_t cntr_idx_show(struct device *dev,
  1184. struct device_attribute *attr,
  1185. char *buf)
  1186. {
  1187. unsigned long val;
  1188. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  1189. struct etmv4_config *config = &drvdata->config;
  1190. val = config->cntr_idx;
  1191. return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
  1192. }
  1193. static ssize_t cntr_idx_store(struct device *dev,
  1194. struct device_attribute *attr,
  1195. const char *buf, size_t size)
  1196. {
  1197. unsigned long val;
  1198. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  1199. struct etmv4_config *config = &drvdata->config;
  1200. if (kstrtoul(buf, 16, &val))
  1201. return -EINVAL;
  1202. if (val >= drvdata->nr_cntr)
  1203. return -EINVAL;
  1204. /*
  1205. * Use spinlock to ensure index doesn't change while it gets
  1206. * dereferenced multiple times within a spinlock block elsewhere.
  1207. */
  1208. spin_lock(&drvdata->spinlock);
  1209. config->cntr_idx = val;
  1210. spin_unlock(&drvdata->spinlock);
  1211. return size;
  1212. }
  1213. static DEVICE_ATTR_RW(cntr_idx);
  1214. static ssize_t cntrldvr_show(struct device *dev,
  1215. struct device_attribute *attr,
  1216. char *buf)
  1217. {
  1218. u8 idx;
  1219. unsigned long val;
  1220. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  1221. struct etmv4_config *config = &drvdata->config;
  1222. spin_lock(&drvdata->spinlock);
  1223. idx = config->cntr_idx;
  1224. val = config->cntrldvr[idx];
  1225. spin_unlock(&drvdata->spinlock);
  1226. return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
  1227. }
  1228. static ssize_t cntrldvr_store(struct device *dev,
  1229. struct device_attribute *attr,
  1230. const char *buf, size_t size)
  1231. {
  1232. u8 idx;
  1233. unsigned long val;
  1234. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  1235. struct etmv4_config *config = &drvdata->config;
  1236. if (kstrtoul(buf, 16, &val))
  1237. return -EINVAL;
  1238. if (val > ETM_CNTR_MAX_VAL)
  1239. return -EINVAL;
  1240. spin_lock(&drvdata->spinlock);
  1241. idx = config->cntr_idx;
  1242. config->cntrldvr[idx] = val;
  1243. spin_unlock(&drvdata->spinlock);
  1244. return size;
  1245. }
  1246. static DEVICE_ATTR_RW(cntrldvr);
  1247. static ssize_t cntr_val_show(struct device *dev,
  1248. struct device_attribute *attr,
  1249. char *buf)
  1250. {
  1251. u8 idx;
  1252. unsigned long val;
  1253. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  1254. struct etmv4_config *config = &drvdata->config;
  1255. spin_lock(&drvdata->spinlock);
  1256. idx = config->cntr_idx;
  1257. val = config->cntr_val[idx];
  1258. spin_unlock(&drvdata->spinlock);
  1259. return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
  1260. }
  1261. static ssize_t cntr_val_store(struct device *dev,
  1262. struct device_attribute *attr,
  1263. const char *buf, size_t size)
  1264. {
  1265. u8 idx;
  1266. unsigned long val;
  1267. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  1268. struct etmv4_config *config = &drvdata->config;
  1269. if (kstrtoul(buf, 16, &val))
  1270. return -EINVAL;
  1271. if (val > ETM_CNTR_MAX_VAL)
  1272. return -EINVAL;
  1273. spin_lock(&drvdata->spinlock);
  1274. idx = config->cntr_idx;
  1275. config->cntr_val[idx] = val;
  1276. spin_unlock(&drvdata->spinlock);
  1277. return size;
  1278. }
  1279. static DEVICE_ATTR_RW(cntr_val);
  1280. static ssize_t cntr_ctrl_show(struct device *dev,
  1281. struct device_attribute *attr,
  1282. char *buf)
  1283. {
  1284. u8 idx;
  1285. unsigned long val;
  1286. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  1287. struct etmv4_config *config = &drvdata->config;
  1288. spin_lock(&drvdata->spinlock);
  1289. idx = config->cntr_idx;
  1290. val = config->cntr_ctrl[idx];
  1291. spin_unlock(&drvdata->spinlock);
  1292. return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
  1293. }
  1294. static ssize_t cntr_ctrl_store(struct device *dev,
  1295. struct device_attribute *attr,
  1296. const char *buf, size_t size)
  1297. {
  1298. u8 idx;
  1299. unsigned long val;
  1300. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  1301. struct etmv4_config *config = &drvdata->config;
  1302. if (kstrtoul(buf, 16, &val))
  1303. return -EINVAL;
  1304. spin_lock(&drvdata->spinlock);
  1305. idx = config->cntr_idx;
  1306. config->cntr_ctrl[idx] = val;
  1307. spin_unlock(&drvdata->spinlock);
  1308. return size;
  1309. }
  1310. static DEVICE_ATTR_RW(cntr_ctrl);
  1311. static ssize_t res_idx_show(struct device *dev,
  1312. struct device_attribute *attr,
  1313. char *buf)
  1314. {
  1315. unsigned long val;
  1316. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  1317. struct etmv4_config *config = &drvdata->config;
  1318. val = config->res_idx;
  1319. return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
  1320. }
  1321. static ssize_t res_idx_store(struct device *dev,
  1322. struct device_attribute *attr,
  1323. const char *buf, size_t size)
  1324. {
  1325. unsigned long val;
  1326. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  1327. struct etmv4_config *config = &drvdata->config;
  1328. if (kstrtoul(buf, 16, &val))
  1329. return -EINVAL;
  1330. /* Resource selector pair 0 is always implemented and reserved */
  1331. if ((val == 0) || (val >= drvdata->nr_resource))
  1332. return -EINVAL;
  1333. /*
  1334. * Use spinlock to ensure index doesn't change while it gets
  1335. * dereferenced multiple times within a spinlock block elsewhere.
  1336. */
  1337. spin_lock(&drvdata->spinlock);
  1338. config->res_idx = val;
  1339. spin_unlock(&drvdata->spinlock);
  1340. return size;
  1341. }
  1342. static DEVICE_ATTR_RW(res_idx);
  1343. static ssize_t res_ctrl_show(struct device *dev,
  1344. struct device_attribute *attr,
  1345. char *buf)
  1346. {
  1347. u8 idx;
  1348. unsigned long val;
  1349. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  1350. struct etmv4_config *config = &drvdata->config;
  1351. spin_lock(&drvdata->spinlock);
  1352. idx = config->res_idx;
  1353. val = config->res_ctrl[idx];
  1354. spin_unlock(&drvdata->spinlock);
  1355. return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
  1356. }
  1357. static ssize_t res_ctrl_store(struct device *dev,
  1358. struct device_attribute *attr,
  1359. const char *buf, size_t size)
  1360. {
  1361. u8 idx;
  1362. unsigned long val;
  1363. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  1364. struct etmv4_config *config = &drvdata->config;
  1365. if (kstrtoul(buf, 16, &val))
  1366. return -EINVAL;
  1367. spin_lock(&drvdata->spinlock);
  1368. idx = config->res_idx;
  1369. /* For odd idx pair inversal bit is RES0 */
  1370. if (idx % 2 != 0)
  1371. /* PAIRINV, bit[21] */
  1372. val &= ~BIT(21);
  1373. config->res_ctrl[idx] = val;
  1374. spin_unlock(&drvdata->spinlock);
  1375. return size;
  1376. }
  1377. static DEVICE_ATTR_RW(res_ctrl);
  1378. static ssize_t ctxid_idx_show(struct device *dev,
  1379. struct device_attribute *attr,
  1380. char *buf)
  1381. {
  1382. unsigned long val;
  1383. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  1384. struct etmv4_config *config = &drvdata->config;
  1385. val = config->ctxid_idx;
  1386. return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
  1387. }
  1388. static ssize_t ctxid_idx_store(struct device *dev,
  1389. struct device_attribute *attr,
  1390. const char *buf, size_t size)
  1391. {
  1392. unsigned long val;
  1393. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  1394. struct etmv4_config *config = &drvdata->config;
  1395. if (kstrtoul(buf, 16, &val))
  1396. return -EINVAL;
  1397. if (val >= drvdata->numcidc)
  1398. return -EINVAL;
  1399. /*
  1400. * Use spinlock to ensure index doesn't change while it gets
  1401. * dereferenced multiple times within a spinlock block elsewhere.
  1402. */
  1403. spin_lock(&drvdata->spinlock);
  1404. config->ctxid_idx = val;
  1405. spin_unlock(&drvdata->spinlock);
  1406. return size;
  1407. }
  1408. static DEVICE_ATTR_RW(ctxid_idx);
  1409. static ssize_t ctxid_pid_show(struct device *dev,
  1410. struct device_attribute *attr,
  1411. char *buf)
  1412. {
  1413. u8 idx;
  1414. unsigned long val;
  1415. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  1416. struct etmv4_config *config = &drvdata->config;
  1417. spin_lock(&drvdata->spinlock);
  1418. idx = config->ctxid_idx;
  1419. val = (unsigned long)config->ctxid_vpid[idx];
  1420. spin_unlock(&drvdata->spinlock);
  1421. return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
  1422. }
  1423. static ssize_t ctxid_pid_store(struct device *dev,
  1424. struct device_attribute *attr,
  1425. const char *buf, size_t size)
  1426. {
  1427. u8 idx;
  1428. unsigned long vpid, pid;
  1429. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  1430. struct etmv4_config *config = &drvdata->config;
  1431. /*
  1432. * only implemented when ctxid tracing is enabled, i.e. at least one
  1433. * ctxid comparator is implemented and ctxid is greater than 0 bits
  1434. * in length
  1435. */
  1436. if (!drvdata->ctxid_size || !drvdata->numcidc)
  1437. return -EINVAL;
  1438. if (kstrtoul(buf, 16, &vpid))
  1439. return -EINVAL;
  1440. pid = coresight_vpid_to_pid(vpid);
  1441. spin_lock(&drvdata->spinlock);
  1442. idx = config->ctxid_idx;
  1443. config->ctxid_pid[idx] = (u64)pid;
  1444. config->ctxid_vpid[idx] = (u64)vpid;
  1445. spin_unlock(&drvdata->spinlock);
  1446. return size;
  1447. }
  1448. static DEVICE_ATTR_RW(ctxid_pid);
  1449. static ssize_t ctxid_masks_show(struct device *dev,
  1450. struct device_attribute *attr,
  1451. char *buf)
  1452. {
  1453. unsigned long val1, val2;
  1454. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  1455. struct etmv4_config *config = &drvdata->config;
  1456. spin_lock(&drvdata->spinlock);
  1457. val1 = config->ctxid_mask0;
  1458. val2 = config->ctxid_mask1;
  1459. spin_unlock(&drvdata->spinlock);
  1460. return scnprintf(buf, PAGE_SIZE, "%#lx %#lx\n", val1, val2);
  1461. }
  1462. static ssize_t ctxid_masks_store(struct device *dev,
  1463. struct device_attribute *attr,
  1464. const char *buf, size_t size)
  1465. {
  1466. u8 i, j, maskbyte;
  1467. unsigned long val1, val2, mask;
  1468. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  1469. struct etmv4_config *config = &drvdata->config;
  1470. /*
  1471. * only implemented when ctxid tracing is enabled, i.e. at least one
  1472. * ctxid comparator is implemented and ctxid is greater than 0 bits
  1473. * in length
  1474. */
  1475. if (!drvdata->ctxid_size || !drvdata->numcidc)
  1476. return -EINVAL;
  1477. if (sscanf(buf, "%lx %lx", &val1, &val2) != 2)
  1478. return -EINVAL;
  1479. spin_lock(&drvdata->spinlock);
  1480. /*
  1481. * each byte[0..3] controls mask value applied to ctxid
  1482. * comparator[0..3]
  1483. */
  1484. switch (drvdata->numcidc) {
  1485. case 0x1:
  1486. /* COMP0, bits[7:0] */
  1487. config->ctxid_mask0 = val1 & 0xFF;
  1488. break;
  1489. case 0x2:
  1490. /* COMP1, bits[15:8] */
  1491. config->ctxid_mask0 = val1 & 0xFFFF;
  1492. break;
  1493. case 0x3:
  1494. /* COMP2, bits[23:16] */
  1495. config->ctxid_mask0 = val1 & 0xFFFFFF;
  1496. break;
  1497. case 0x4:
  1498. /* COMP3, bits[31:24] */
  1499. config->ctxid_mask0 = val1;
  1500. break;
  1501. case 0x5:
  1502. /* COMP4, bits[7:0] */
  1503. config->ctxid_mask0 = val1;
  1504. config->ctxid_mask1 = val2 & 0xFF;
  1505. break;
  1506. case 0x6:
  1507. /* COMP5, bits[15:8] */
  1508. config->ctxid_mask0 = val1;
  1509. config->ctxid_mask1 = val2 & 0xFFFF;
  1510. break;
  1511. case 0x7:
  1512. /* COMP6, bits[23:16] */
  1513. config->ctxid_mask0 = val1;
  1514. config->ctxid_mask1 = val2 & 0xFFFFFF;
  1515. break;
  1516. case 0x8:
  1517. /* COMP7, bits[31:24] */
  1518. config->ctxid_mask0 = val1;
  1519. config->ctxid_mask1 = val2;
  1520. break;
  1521. default:
  1522. break;
  1523. }
  1524. /*
  1525. * If software sets a mask bit to 1, it must program relevant byte
  1526. * of ctxid comparator value 0x0, otherwise behavior is unpredictable.
  1527. * For example, if bit[3] of ctxid_mask0 is 1, we must clear bits[31:24]
  1528. * of ctxid comparator0 value (corresponding to byte 0) register.
  1529. */
  1530. mask = config->ctxid_mask0;
  1531. for (i = 0; i < drvdata->numcidc; i++) {
  1532. /* mask value of corresponding ctxid comparator */
  1533. maskbyte = mask & ETMv4_EVENT_MASK;
  1534. /*
  1535. * each bit corresponds to a byte of respective ctxid comparator
  1536. * value register
  1537. */
  1538. for (j = 0; j < 8; j++) {
  1539. if (maskbyte & 1)
  1540. config->ctxid_pid[i] &= ~(0xFF << (j * 8));
  1541. maskbyte >>= 1;
  1542. }
  1543. /* Select the next ctxid comparator mask value */
  1544. if (i == 3)
  1545. /* ctxid comparators[4-7] */
  1546. mask = config->ctxid_mask1;
  1547. else
  1548. mask >>= 0x8;
  1549. }
  1550. spin_unlock(&drvdata->spinlock);
  1551. return size;
  1552. }
  1553. static DEVICE_ATTR_RW(ctxid_masks);
  1554. static ssize_t vmid_idx_show(struct device *dev,
  1555. struct device_attribute *attr,
  1556. char *buf)
  1557. {
  1558. unsigned long val;
  1559. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  1560. struct etmv4_config *config = &drvdata->config;
  1561. val = config->vmid_idx;
  1562. return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
  1563. }
  1564. static ssize_t vmid_idx_store(struct device *dev,
  1565. struct device_attribute *attr,
  1566. const char *buf, size_t size)
  1567. {
  1568. unsigned long val;
  1569. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  1570. struct etmv4_config *config = &drvdata->config;
  1571. if (kstrtoul(buf, 16, &val))
  1572. return -EINVAL;
  1573. if (val >= drvdata->numvmidc)
  1574. return -EINVAL;
  1575. /*
  1576. * Use spinlock to ensure index doesn't change while it gets
  1577. * dereferenced multiple times within a spinlock block elsewhere.
  1578. */
  1579. spin_lock(&drvdata->spinlock);
  1580. config->vmid_idx = val;
  1581. spin_unlock(&drvdata->spinlock);
  1582. return size;
  1583. }
  1584. static DEVICE_ATTR_RW(vmid_idx);
  1585. static ssize_t vmid_val_show(struct device *dev,
  1586. struct device_attribute *attr,
  1587. char *buf)
  1588. {
  1589. unsigned long val;
  1590. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  1591. struct etmv4_config *config = &drvdata->config;
  1592. val = (unsigned long)config->vmid_val[config->vmid_idx];
  1593. return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
  1594. }
  1595. static ssize_t vmid_val_store(struct device *dev,
  1596. struct device_attribute *attr,
  1597. const char *buf, size_t size)
  1598. {
  1599. unsigned long val;
  1600. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  1601. struct etmv4_config *config = &drvdata->config;
  1602. /*
  1603. * only implemented when vmid tracing is enabled, i.e. at least one
  1604. * vmid comparator is implemented and at least 8 bit vmid size
  1605. */
  1606. if (!drvdata->vmid_size || !drvdata->numvmidc)
  1607. return -EINVAL;
  1608. if (kstrtoul(buf, 16, &val))
  1609. return -EINVAL;
  1610. spin_lock(&drvdata->spinlock);
  1611. config->vmid_val[config->vmid_idx] = (u64)val;
  1612. spin_unlock(&drvdata->spinlock);
  1613. return size;
  1614. }
  1615. static DEVICE_ATTR_RW(vmid_val);
  1616. static ssize_t vmid_masks_show(struct device *dev,
  1617. struct device_attribute *attr, char *buf)
  1618. {
  1619. unsigned long val1, val2;
  1620. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  1621. struct etmv4_config *config = &drvdata->config;
  1622. spin_lock(&drvdata->spinlock);
  1623. val1 = config->vmid_mask0;
  1624. val2 = config->vmid_mask1;
  1625. spin_unlock(&drvdata->spinlock);
  1626. return scnprintf(buf, PAGE_SIZE, "%#lx %#lx\n", val1, val2);
  1627. }
  1628. static ssize_t vmid_masks_store(struct device *dev,
  1629. struct device_attribute *attr,
  1630. const char *buf, size_t size)
  1631. {
  1632. u8 i, j, maskbyte;
  1633. unsigned long val1, val2, mask;
  1634. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  1635. struct etmv4_config *config = &drvdata->config;
  1636. /*
  1637. * only implemented when vmid tracing is enabled, i.e. at least one
  1638. * vmid comparator is implemented and at least 8 bit vmid size
  1639. */
  1640. if (!drvdata->vmid_size || !drvdata->numvmidc)
  1641. return -EINVAL;
  1642. if (sscanf(buf, "%lx %lx", &val1, &val2) != 2)
  1643. return -EINVAL;
  1644. spin_lock(&drvdata->spinlock);
  1645. /*
  1646. * each byte[0..3] controls mask value applied to vmid
  1647. * comparator[0..3]
  1648. */
  1649. switch (drvdata->numvmidc) {
  1650. case 0x1:
  1651. /* COMP0, bits[7:0] */
  1652. config->vmid_mask0 = val1 & 0xFF;
  1653. break;
  1654. case 0x2:
  1655. /* COMP1, bits[15:8] */
  1656. config->vmid_mask0 = val1 & 0xFFFF;
  1657. break;
  1658. case 0x3:
  1659. /* COMP2, bits[23:16] */
  1660. config->vmid_mask0 = val1 & 0xFFFFFF;
  1661. break;
  1662. case 0x4:
  1663. /* COMP3, bits[31:24] */
  1664. config->vmid_mask0 = val1;
  1665. break;
  1666. case 0x5:
  1667. /* COMP4, bits[7:0] */
  1668. config->vmid_mask0 = val1;
  1669. config->vmid_mask1 = val2 & 0xFF;
  1670. break;
  1671. case 0x6:
  1672. /* COMP5, bits[15:8] */
  1673. config->vmid_mask0 = val1;
  1674. config->vmid_mask1 = val2 & 0xFFFF;
  1675. break;
  1676. case 0x7:
  1677. /* COMP6, bits[23:16] */
  1678. config->vmid_mask0 = val1;
  1679. config->vmid_mask1 = val2 & 0xFFFFFF;
  1680. break;
  1681. case 0x8:
  1682. /* COMP7, bits[31:24] */
  1683. config->vmid_mask0 = val1;
  1684. config->vmid_mask1 = val2;
  1685. break;
  1686. default:
  1687. break;
  1688. }
  1689. /*
  1690. * If software sets a mask bit to 1, it must program relevant byte
  1691. * of vmid comparator value 0x0, otherwise behavior is unpredictable.
  1692. * For example, if bit[3] of vmid_mask0 is 1, we must clear bits[31:24]
  1693. * of vmid comparator0 value (corresponding to byte 0) register.
  1694. */
  1695. mask = config->vmid_mask0;
  1696. for (i = 0; i < drvdata->numvmidc; i++) {
  1697. /* mask value of corresponding vmid comparator */
  1698. maskbyte = mask & ETMv4_EVENT_MASK;
  1699. /*
  1700. * each bit corresponds to a byte of respective vmid comparator
  1701. * value register
  1702. */
  1703. for (j = 0; j < 8; j++) {
  1704. if (maskbyte & 1)
  1705. config->vmid_val[i] &= ~(0xFF << (j * 8));
  1706. maskbyte >>= 1;
  1707. }
  1708. /* Select the next vmid comparator mask value */
  1709. if (i == 3)
  1710. /* vmid comparators[4-7] */
  1711. mask = config->vmid_mask1;
  1712. else
  1713. mask >>= 0x8;
  1714. }
  1715. spin_unlock(&drvdata->spinlock);
  1716. return size;
  1717. }
  1718. static DEVICE_ATTR_RW(vmid_masks);
  1719. static ssize_t cpu_show(struct device *dev,
  1720. struct device_attribute *attr, char *buf)
  1721. {
  1722. int val;
  1723. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  1724. val = drvdata->cpu;
  1725. return scnprintf(buf, PAGE_SIZE, "%d\n", val);
  1726. }
  1727. static DEVICE_ATTR_RO(cpu);
  1728. static struct attribute *coresight_etmv4_attrs[] = {
  1729. &dev_attr_nr_pe_cmp.attr,
  1730. &dev_attr_nr_addr_cmp.attr,
  1731. &dev_attr_nr_cntr.attr,
  1732. &dev_attr_nr_ext_inp.attr,
  1733. &dev_attr_numcidc.attr,
  1734. &dev_attr_numvmidc.attr,
  1735. &dev_attr_nrseqstate.attr,
  1736. &dev_attr_nr_resource.attr,
  1737. &dev_attr_nr_ss_cmp.attr,
  1738. &dev_attr_reset.attr,
  1739. &dev_attr_mode.attr,
  1740. &dev_attr_pe.attr,
  1741. &dev_attr_event.attr,
  1742. &dev_attr_event_instren.attr,
  1743. &dev_attr_event_ts.attr,
  1744. &dev_attr_syncfreq.attr,
  1745. &dev_attr_cyc_threshold.attr,
  1746. &dev_attr_bb_ctrl.attr,
  1747. &dev_attr_event_vinst.attr,
  1748. &dev_attr_s_exlevel_vinst.attr,
  1749. &dev_attr_ns_exlevel_vinst.attr,
  1750. &dev_attr_addr_idx.attr,
  1751. &dev_attr_addr_instdatatype.attr,
  1752. &dev_attr_addr_single.attr,
  1753. &dev_attr_addr_range.attr,
  1754. &dev_attr_addr_start.attr,
  1755. &dev_attr_addr_stop.attr,
  1756. &dev_attr_addr_ctxtype.attr,
  1757. &dev_attr_addr_context.attr,
  1758. &dev_attr_seq_idx.attr,
  1759. &dev_attr_seq_state.attr,
  1760. &dev_attr_seq_event.attr,
  1761. &dev_attr_seq_reset_event.attr,
  1762. &dev_attr_cntr_idx.attr,
  1763. &dev_attr_cntrldvr.attr,
  1764. &dev_attr_cntr_val.attr,
  1765. &dev_attr_cntr_ctrl.attr,
  1766. &dev_attr_res_idx.attr,
  1767. &dev_attr_res_ctrl.attr,
  1768. &dev_attr_ctxid_idx.attr,
  1769. &dev_attr_ctxid_pid.attr,
  1770. &dev_attr_ctxid_masks.attr,
  1771. &dev_attr_vmid_idx.attr,
  1772. &dev_attr_vmid_val.attr,
  1773. &dev_attr_vmid_masks.attr,
  1774. &dev_attr_cpu.attr,
  1775. NULL,
  1776. };
  1777. struct etmv4_reg {
  1778. void __iomem *addr;
  1779. u32 data;
  1780. };
  1781. static void do_smp_cross_read(void *data)
  1782. {
  1783. struct etmv4_reg *reg = data;
  1784. reg->data = readl_relaxed(reg->addr);
  1785. }
  1786. static u32 etmv4_cross_read(const struct device *dev, u32 offset)
  1787. {
  1788. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev);
  1789. struct etmv4_reg reg;
  1790. reg.addr = drvdata->base + offset;
  1791. /*
  1792. * smp cross call ensures the CPU will be powered up before
  1793. * accessing the ETMv4 trace core registers
  1794. */
  1795. smp_call_function_single(drvdata->cpu, do_smp_cross_read, &reg, 1);
  1796. return reg.data;
  1797. }
  1798. #define coresight_etm4x_simple_func(name, offset) \
  1799. coresight_simple_func(struct etmv4_drvdata, NULL, name, offset)
  1800. #define coresight_etm4x_cross_read(name, offset) \
  1801. coresight_simple_func(struct etmv4_drvdata, etmv4_cross_read, \
  1802. name, offset)
  1803. coresight_etm4x_simple_func(trcpdcr, TRCPDCR);
  1804. coresight_etm4x_simple_func(trcpdsr, TRCPDSR);
  1805. coresight_etm4x_simple_func(trclsr, TRCLSR);
  1806. coresight_etm4x_simple_func(trcauthstatus, TRCAUTHSTATUS);
  1807. coresight_etm4x_simple_func(trcdevid, TRCDEVID);
  1808. coresight_etm4x_simple_func(trcdevtype, TRCDEVTYPE);
  1809. coresight_etm4x_simple_func(trcpidr0, TRCPIDR0);
  1810. coresight_etm4x_simple_func(trcpidr1, TRCPIDR1);
  1811. coresight_etm4x_simple_func(trcpidr2, TRCPIDR2);
  1812. coresight_etm4x_simple_func(trcpidr3, TRCPIDR3);
  1813. coresight_etm4x_cross_read(trcoslsr, TRCOSLSR);
  1814. coresight_etm4x_cross_read(trcconfig, TRCCONFIGR);
  1815. coresight_etm4x_cross_read(trctraceid, TRCTRACEIDR);
  1816. static struct attribute *coresight_etmv4_mgmt_attrs[] = {
  1817. &dev_attr_trcoslsr.attr,
  1818. &dev_attr_trcpdcr.attr,
  1819. &dev_attr_trcpdsr.attr,
  1820. &dev_attr_trclsr.attr,
  1821. &dev_attr_trcconfig.attr,
  1822. &dev_attr_trctraceid.attr,
  1823. &dev_attr_trcauthstatus.attr,
  1824. &dev_attr_trcdevid.attr,
  1825. &dev_attr_trcdevtype.attr,
  1826. &dev_attr_trcpidr0.attr,
  1827. &dev_attr_trcpidr1.attr,
  1828. &dev_attr_trcpidr2.attr,
  1829. &dev_attr_trcpidr3.attr,
  1830. NULL,
  1831. };
  1832. coresight_etm4x_cross_read(trcidr0, TRCIDR0);
  1833. coresight_etm4x_cross_read(trcidr1, TRCIDR1);
  1834. coresight_etm4x_cross_read(trcidr2, TRCIDR2);
  1835. coresight_etm4x_cross_read(trcidr3, TRCIDR3);
  1836. coresight_etm4x_cross_read(trcidr4, TRCIDR4);
  1837. coresight_etm4x_cross_read(trcidr5, TRCIDR5);
  1838. /* trcidr[6,7] are reserved */
  1839. coresight_etm4x_cross_read(trcidr8, TRCIDR8);
  1840. coresight_etm4x_cross_read(trcidr9, TRCIDR9);
  1841. coresight_etm4x_cross_read(trcidr10, TRCIDR10);
  1842. coresight_etm4x_cross_read(trcidr11, TRCIDR11);
  1843. coresight_etm4x_cross_read(trcidr12, TRCIDR12);
  1844. coresight_etm4x_cross_read(trcidr13, TRCIDR13);
  1845. static struct attribute *coresight_etmv4_trcidr_attrs[] = {
  1846. &dev_attr_trcidr0.attr,
  1847. &dev_attr_trcidr1.attr,
  1848. &dev_attr_trcidr2.attr,
  1849. &dev_attr_trcidr3.attr,
  1850. &dev_attr_trcidr4.attr,
  1851. &dev_attr_trcidr5.attr,
  1852. /* trcidr[6,7] are reserved */
  1853. &dev_attr_trcidr8.attr,
  1854. &dev_attr_trcidr9.attr,
  1855. &dev_attr_trcidr10.attr,
  1856. &dev_attr_trcidr11.attr,
  1857. &dev_attr_trcidr12.attr,
  1858. &dev_attr_trcidr13.attr,
  1859. NULL,
  1860. };
  1861. static const struct attribute_group coresight_etmv4_group = {
  1862. .attrs = coresight_etmv4_attrs,
  1863. };
  1864. static const struct attribute_group coresight_etmv4_mgmt_group = {
  1865. .attrs = coresight_etmv4_mgmt_attrs,
  1866. .name = "mgmt",
  1867. };
  1868. static const struct attribute_group coresight_etmv4_trcidr_group = {
  1869. .attrs = coresight_etmv4_trcidr_attrs,
  1870. .name = "trcidr",
  1871. };
  1872. const struct attribute_group *coresight_etmv4_groups[] = {
  1873. &coresight_etmv4_group,
  1874. &coresight_etmv4_mgmt_group,
  1875. &coresight_etmv4_trcidr_group,
  1876. NULL,
  1877. };