coresight-etm4x-sysfs.c 57 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright(C) 2015 Linaro Limited. All rights reserved.
  4. * Author: Mathieu Poirier <mathieu.poirier@linaro.org>
  5. */
  6. #include <linux/pid_namespace.h>
  7. #include <linux/pm_runtime.h>
  8. #include <linux/sysfs.h>
  9. #include "coresight-etm4x.h"
  10. #include "coresight-priv.h"
  11. static int etm4_set_mode_exclude(struct etmv4_drvdata *drvdata, bool exclude)
  12. {
  13. u8 idx;
  14. struct etmv4_config *config = &drvdata->config;
  15. idx = config->addr_idx;
  16. /*
  17. * TRCACATRn.TYPE bit[1:0]: type of comparison
  18. * the trace unit performs
  19. */
  20. if (BMVAL(config->addr_acc[idx], 0, 1) == ETM_INSTR_ADDR) {
  21. if (idx % 2 != 0)
  22. return -EINVAL;
  23. /*
  24. * We are performing instruction address comparison. Set the
  25. * relevant bit of ViewInst Include/Exclude Control register
  26. * for corresponding address comparator pair.
  27. */
  28. if (config->addr_type[idx] != ETM_ADDR_TYPE_RANGE ||
  29. config->addr_type[idx + 1] != ETM_ADDR_TYPE_RANGE)
  30. return -EINVAL;
  31. if (exclude == true) {
  32. /*
  33. * Set exclude bit and unset the include bit
  34. * corresponding to comparator pair
  35. */
  36. config->viiectlr |= BIT(idx / 2 + 16);
  37. config->viiectlr &= ~BIT(idx / 2);
  38. } else {
  39. /*
  40. * Set include bit and unset exclude bit
  41. * corresponding to comparator pair
  42. */
  43. config->viiectlr |= BIT(idx / 2);
  44. config->viiectlr &= ~BIT(idx / 2 + 16);
  45. }
  46. }
  47. return 0;
  48. }
  49. static ssize_t nr_pe_cmp_show(struct device *dev,
  50. struct device_attribute *attr,
  51. char *buf)
  52. {
  53. unsigned long val;
  54. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  55. val = drvdata->nr_pe_cmp;
  56. return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
  57. }
  58. static DEVICE_ATTR_RO(nr_pe_cmp);
  59. static ssize_t nr_addr_cmp_show(struct device *dev,
  60. struct device_attribute *attr,
  61. char *buf)
  62. {
  63. unsigned long val;
  64. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  65. val = drvdata->nr_addr_cmp;
  66. return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
  67. }
  68. static DEVICE_ATTR_RO(nr_addr_cmp);
  69. static ssize_t nr_cntr_show(struct device *dev,
  70. struct device_attribute *attr,
  71. char *buf)
  72. {
  73. unsigned long val;
  74. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  75. val = drvdata->nr_cntr;
  76. return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
  77. }
  78. static DEVICE_ATTR_RO(nr_cntr);
  79. static ssize_t nr_ext_inp_show(struct device *dev,
  80. struct device_attribute *attr,
  81. char *buf)
  82. {
  83. unsigned long val;
  84. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  85. val = drvdata->nr_ext_inp;
  86. return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
  87. }
  88. static DEVICE_ATTR_RO(nr_ext_inp);
  89. static ssize_t numcidc_show(struct device *dev,
  90. struct device_attribute *attr,
  91. char *buf)
  92. {
  93. unsigned long val;
  94. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  95. val = drvdata->numcidc;
  96. return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
  97. }
  98. static DEVICE_ATTR_RO(numcidc);
  99. static ssize_t numvmidc_show(struct device *dev,
  100. struct device_attribute *attr,
  101. char *buf)
  102. {
  103. unsigned long val;
  104. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  105. val = drvdata->numvmidc;
  106. return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
  107. }
  108. static DEVICE_ATTR_RO(numvmidc);
  109. static ssize_t nrseqstate_show(struct device *dev,
  110. struct device_attribute *attr,
  111. char *buf)
  112. {
  113. unsigned long val;
  114. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  115. val = drvdata->nrseqstate;
  116. return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
  117. }
  118. static DEVICE_ATTR_RO(nrseqstate);
  119. static ssize_t nr_resource_show(struct device *dev,
  120. struct device_attribute *attr,
  121. char *buf)
  122. {
  123. unsigned long val;
  124. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  125. val = drvdata->nr_resource;
  126. return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
  127. }
  128. static DEVICE_ATTR_RO(nr_resource);
  129. static ssize_t nr_ss_cmp_show(struct device *dev,
  130. struct device_attribute *attr,
  131. char *buf)
  132. {
  133. unsigned long val;
  134. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  135. val = drvdata->nr_ss_cmp;
  136. return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
  137. }
  138. static DEVICE_ATTR_RO(nr_ss_cmp);
  139. static ssize_t reset_store(struct device *dev,
  140. struct device_attribute *attr,
  141. const char *buf, size_t size)
  142. {
  143. int i;
  144. unsigned long val;
  145. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  146. struct etmv4_config *config = &drvdata->config;
  147. if (kstrtoul(buf, 16, &val))
  148. return -EINVAL;
  149. spin_lock(&drvdata->spinlock);
  150. if (val)
  151. config->mode = 0x0;
  152. /* Disable data tracing: do not trace load and store data transfers */
  153. config->mode &= ~(ETM_MODE_LOAD | ETM_MODE_STORE);
  154. config->cfg &= ~(BIT(1) | BIT(2));
  155. /* Disable data value and data address tracing */
  156. config->mode &= ~(ETM_MODE_DATA_TRACE_ADDR |
  157. ETM_MODE_DATA_TRACE_VAL);
  158. config->cfg &= ~(BIT(16) | BIT(17));
  159. /* Disable all events tracing */
  160. config->eventctrl0 = 0x0;
  161. config->eventctrl1 = 0x0;
  162. /* Disable timestamp event */
  163. config->ts_ctrl = 0x0;
  164. /* Disable stalling */
  165. config->stall_ctrl = 0x0;
  166. /* Reset trace synchronization period to 2^8 = 256 bytes*/
  167. if (drvdata->syncpr == false)
  168. config->syncfreq = 0x8;
  169. /*
  170. * Enable ViewInst to trace everything with start-stop logic in
  171. * started state. ARM recommends start-stop logic is set before
  172. * each trace run.
  173. */
  174. config->vinst_ctrl |= BIT(0);
  175. if (drvdata->nr_addr_cmp == true) {
  176. config->mode |= ETM_MODE_VIEWINST_STARTSTOP;
  177. /* SSSTATUS, bit[9] */
  178. config->vinst_ctrl |= BIT(9);
  179. }
  180. /* No address range filtering for ViewInst */
  181. config->viiectlr = 0x0;
  182. /* No start-stop filtering for ViewInst */
  183. config->vissctlr = 0x0;
  184. /* Disable seq events */
  185. for (i = 0; i < drvdata->nrseqstate-1; i++)
  186. config->seq_ctrl[i] = 0x0;
  187. config->seq_rst = 0x0;
  188. config->seq_state = 0x0;
  189. /* Disable external input events */
  190. config->ext_inp = 0x0;
  191. config->cntr_idx = 0x0;
  192. for (i = 0; i < drvdata->nr_cntr; i++) {
  193. config->cntrldvr[i] = 0x0;
  194. config->cntr_ctrl[i] = 0x0;
  195. config->cntr_val[i] = 0x0;
  196. }
  197. config->res_idx = 0x0;
  198. for (i = 0; i < drvdata->nr_resource; i++)
  199. config->res_ctrl[i] = 0x0;
  200. for (i = 0; i < drvdata->nr_ss_cmp; i++) {
  201. config->ss_ctrl[i] = 0x0;
  202. config->ss_pe_cmp[i] = 0x0;
  203. }
  204. config->addr_idx = 0x0;
  205. for (i = 0; i < drvdata->nr_addr_cmp * 2; i++) {
  206. config->addr_val[i] = 0x0;
  207. config->addr_acc[i] = 0x0;
  208. config->addr_type[i] = ETM_ADDR_TYPE_NONE;
  209. }
  210. config->ctxid_idx = 0x0;
  211. for (i = 0; i < drvdata->numcidc; i++)
  212. config->ctxid_pid[i] = 0x0;
  213. config->ctxid_mask0 = 0x0;
  214. config->ctxid_mask1 = 0x0;
  215. config->vmid_idx = 0x0;
  216. for (i = 0; i < drvdata->numvmidc; i++)
  217. config->vmid_val[i] = 0x0;
  218. config->vmid_mask0 = 0x0;
  219. config->vmid_mask1 = 0x0;
  220. drvdata->trcid = drvdata->cpu + 1;
  221. spin_unlock(&drvdata->spinlock);
  222. return size;
  223. }
  224. static DEVICE_ATTR_WO(reset);
  225. static ssize_t mode_show(struct device *dev,
  226. struct device_attribute *attr,
  227. char *buf)
  228. {
  229. unsigned long val;
  230. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  231. struct etmv4_config *config = &drvdata->config;
  232. val = config->mode;
  233. return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
  234. }
  235. static ssize_t mode_store(struct device *dev,
  236. struct device_attribute *attr,
  237. const char *buf, size_t size)
  238. {
  239. unsigned long val, mode;
  240. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  241. struct etmv4_config *config = &drvdata->config;
  242. if (kstrtoul(buf, 16, &val))
  243. return -EINVAL;
  244. spin_lock(&drvdata->spinlock);
  245. config->mode = val & ETMv4_MODE_ALL;
  246. if (config->mode & ETM_MODE_EXCLUDE)
  247. etm4_set_mode_exclude(drvdata, true);
  248. else
  249. etm4_set_mode_exclude(drvdata, false);
  250. if (drvdata->instrp0 == true) {
  251. /* start by clearing instruction P0 field */
  252. config->cfg &= ~(BIT(1) | BIT(2));
  253. if (config->mode & ETM_MODE_LOAD)
  254. /* 0b01 Trace load instructions as P0 instructions */
  255. config->cfg |= BIT(1);
  256. if (config->mode & ETM_MODE_STORE)
  257. /* 0b10 Trace store instructions as P0 instructions */
  258. config->cfg |= BIT(2);
  259. if (config->mode & ETM_MODE_LOAD_STORE)
  260. /*
  261. * 0b11 Trace load and store instructions
  262. * as P0 instructions
  263. */
  264. config->cfg |= BIT(1) | BIT(2);
  265. }
  266. /* bit[3], Branch broadcast mode */
  267. if ((config->mode & ETM_MODE_BB) && (drvdata->trcbb == true))
  268. config->cfg |= BIT(3);
  269. else
  270. config->cfg &= ~BIT(3);
  271. /* bit[4], Cycle counting instruction trace bit */
  272. if ((config->mode & ETMv4_MODE_CYCACC) &&
  273. (drvdata->trccci == true))
  274. config->cfg |= BIT(4);
  275. else
  276. config->cfg &= ~BIT(4);
  277. /* bit[6], Context ID tracing bit */
  278. if ((config->mode & ETMv4_MODE_CTXID) && (drvdata->ctxid_size))
  279. config->cfg |= BIT(6);
  280. else
  281. config->cfg &= ~BIT(6);
  282. if ((config->mode & ETM_MODE_VMID) && (drvdata->vmid_size))
  283. config->cfg |= BIT(7);
  284. else
  285. config->cfg &= ~BIT(7);
  286. /* bits[10:8], Conditional instruction tracing bit */
  287. mode = ETM_MODE_COND(config->mode);
  288. if (drvdata->trccond == true) {
  289. config->cfg &= ~(BIT(8) | BIT(9) | BIT(10));
  290. config->cfg |= mode << 8;
  291. }
  292. /* bit[11], Global timestamp tracing bit */
  293. if ((config->mode & ETMv4_MODE_TIMESTAMP) && (drvdata->ts_size))
  294. config->cfg |= BIT(11);
  295. else
  296. config->cfg &= ~BIT(11);
  297. /* bit[12], Return stack enable bit */
  298. if ((config->mode & ETM_MODE_RETURNSTACK) &&
  299. (drvdata->retstack == true))
  300. config->cfg |= BIT(12);
  301. else
  302. config->cfg &= ~BIT(12);
  303. /* bits[14:13], Q element enable field */
  304. mode = ETM_MODE_QELEM(config->mode);
  305. /* start by clearing QE bits */
  306. config->cfg &= ~(BIT(13) | BIT(14));
  307. /* if supported, Q elements with instruction counts are enabled */
  308. if ((mode & BIT(0)) && (drvdata->q_support & BIT(0)))
  309. config->cfg |= BIT(13);
  310. /*
  311. * if supported, Q elements with and without instruction
  312. * counts are enabled
  313. */
  314. if ((mode & BIT(1)) && (drvdata->q_support & BIT(1)))
  315. config->cfg |= BIT(14);
  316. /* bit[11], AMBA Trace Bus (ATB) trigger enable bit */
  317. if ((config->mode & ETM_MODE_ATB_TRIGGER) &&
  318. (drvdata->atbtrig == true))
  319. config->eventctrl1 |= BIT(11);
  320. else
  321. config->eventctrl1 &= ~BIT(11);
  322. /* bit[12], Low-power state behavior override bit */
  323. if ((config->mode & ETM_MODE_LPOVERRIDE) &&
  324. (drvdata->lpoverride == true))
  325. config->eventctrl1 |= BIT(12);
  326. else
  327. config->eventctrl1 &= ~BIT(12);
  328. /* bit[8], Instruction stall bit */
  329. if (config->mode & ETM_MODE_ISTALL_EN)
  330. config->stall_ctrl |= BIT(8);
  331. else
  332. config->stall_ctrl &= ~BIT(8);
  333. /* bit[10], Prioritize instruction trace bit */
  334. if (config->mode & ETM_MODE_INSTPRIO)
  335. config->stall_ctrl |= BIT(10);
  336. else
  337. config->stall_ctrl &= ~BIT(10);
  338. /* bit[13], Trace overflow prevention bit */
  339. if ((config->mode & ETM_MODE_NOOVERFLOW) &&
  340. (drvdata->nooverflow == true))
  341. config->stall_ctrl |= BIT(13);
  342. else
  343. config->stall_ctrl &= ~BIT(13);
  344. /* bit[9] Start/stop logic control bit */
  345. if (config->mode & ETM_MODE_VIEWINST_STARTSTOP)
  346. config->vinst_ctrl |= BIT(9);
  347. else
  348. config->vinst_ctrl &= ~BIT(9);
  349. /* bit[10], Whether a trace unit must trace a Reset exception */
  350. if (config->mode & ETM_MODE_TRACE_RESET)
  351. config->vinst_ctrl |= BIT(10);
  352. else
  353. config->vinst_ctrl &= ~BIT(10);
  354. /* bit[11], Whether a trace unit must trace a system error exception */
  355. if ((config->mode & ETM_MODE_TRACE_ERR) &&
  356. (drvdata->trc_error == true))
  357. config->vinst_ctrl |= BIT(11);
  358. else
  359. config->vinst_ctrl &= ~BIT(11);
  360. if (config->mode & (ETM_MODE_EXCL_KERN | ETM_MODE_EXCL_USER))
  361. etm4_config_trace_mode(config);
  362. spin_unlock(&drvdata->spinlock);
  363. return size;
  364. }
  365. static DEVICE_ATTR_RW(mode);
  366. static ssize_t pe_show(struct device *dev,
  367. struct device_attribute *attr,
  368. char *buf)
  369. {
  370. unsigned long val;
  371. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  372. struct etmv4_config *config = &drvdata->config;
  373. val = config->pe_sel;
  374. return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
  375. }
  376. static ssize_t pe_store(struct device *dev,
  377. struct device_attribute *attr,
  378. const char *buf, size_t size)
  379. {
  380. unsigned long val;
  381. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  382. struct etmv4_config *config = &drvdata->config;
  383. if (kstrtoul(buf, 16, &val))
  384. return -EINVAL;
  385. spin_lock(&drvdata->spinlock);
  386. if (val > drvdata->nr_pe) {
  387. spin_unlock(&drvdata->spinlock);
  388. return -EINVAL;
  389. }
  390. config->pe_sel = val;
  391. spin_unlock(&drvdata->spinlock);
  392. return size;
  393. }
  394. static DEVICE_ATTR_RW(pe);
  395. static ssize_t event_show(struct device *dev,
  396. struct device_attribute *attr,
  397. char *buf)
  398. {
  399. unsigned long val;
  400. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  401. struct etmv4_config *config = &drvdata->config;
  402. val = config->eventctrl0;
  403. return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
  404. }
  405. static ssize_t event_store(struct device *dev,
  406. struct device_attribute *attr,
  407. const char *buf, size_t size)
  408. {
  409. unsigned long val;
  410. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  411. struct etmv4_config *config = &drvdata->config;
  412. if (kstrtoul(buf, 16, &val))
  413. return -EINVAL;
  414. spin_lock(&drvdata->spinlock);
  415. switch (drvdata->nr_event) {
  416. case 0x0:
  417. /* EVENT0, bits[7:0] */
  418. config->eventctrl0 = val & 0xFF;
  419. break;
  420. case 0x1:
  421. /* EVENT1, bits[15:8] */
  422. config->eventctrl0 = val & 0xFFFF;
  423. break;
  424. case 0x2:
  425. /* EVENT2, bits[23:16] */
  426. config->eventctrl0 = val & 0xFFFFFF;
  427. break;
  428. case 0x3:
  429. /* EVENT3, bits[31:24] */
  430. config->eventctrl0 = val;
  431. break;
  432. default:
  433. break;
  434. }
  435. spin_unlock(&drvdata->spinlock);
  436. return size;
  437. }
  438. static DEVICE_ATTR_RW(event);
  439. static ssize_t event_instren_show(struct device *dev,
  440. struct device_attribute *attr,
  441. char *buf)
  442. {
  443. unsigned long val;
  444. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  445. struct etmv4_config *config = &drvdata->config;
  446. val = BMVAL(config->eventctrl1, 0, 3);
  447. return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
  448. }
  449. static ssize_t event_instren_store(struct device *dev,
  450. struct device_attribute *attr,
  451. const char *buf, size_t size)
  452. {
  453. unsigned long val;
  454. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  455. struct etmv4_config *config = &drvdata->config;
  456. if (kstrtoul(buf, 16, &val))
  457. return -EINVAL;
  458. spin_lock(&drvdata->spinlock);
  459. /* start by clearing all instruction event enable bits */
  460. config->eventctrl1 &= ~(BIT(0) | BIT(1) | BIT(2) | BIT(3));
  461. switch (drvdata->nr_event) {
  462. case 0x0:
  463. /* generate Event element for event 1 */
  464. config->eventctrl1 |= val & BIT(1);
  465. break;
  466. case 0x1:
  467. /* generate Event element for event 1 and 2 */
  468. config->eventctrl1 |= val & (BIT(0) | BIT(1));
  469. break;
  470. case 0x2:
  471. /* generate Event element for event 1, 2 and 3 */
  472. config->eventctrl1 |= val & (BIT(0) | BIT(1) | BIT(2));
  473. break;
  474. case 0x3:
  475. /* generate Event element for all 4 events */
  476. config->eventctrl1 |= val & 0xF;
  477. break;
  478. default:
  479. break;
  480. }
  481. spin_unlock(&drvdata->spinlock);
  482. return size;
  483. }
  484. static DEVICE_ATTR_RW(event_instren);
  485. static ssize_t event_ts_show(struct device *dev,
  486. struct device_attribute *attr,
  487. char *buf)
  488. {
  489. unsigned long val;
  490. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  491. struct etmv4_config *config = &drvdata->config;
  492. val = config->ts_ctrl;
  493. return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
  494. }
  495. static ssize_t event_ts_store(struct device *dev,
  496. struct device_attribute *attr,
  497. const char *buf, size_t size)
  498. {
  499. unsigned long val;
  500. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  501. struct etmv4_config *config = &drvdata->config;
  502. if (kstrtoul(buf, 16, &val))
  503. return -EINVAL;
  504. if (!drvdata->ts_size)
  505. return -EINVAL;
  506. config->ts_ctrl = val & ETMv4_EVENT_MASK;
  507. return size;
  508. }
  509. static DEVICE_ATTR_RW(event_ts);
  510. static ssize_t syncfreq_show(struct device *dev,
  511. struct device_attribute *attr,
  512. char *buf)
  513. {
  514. unsigned long val;
  515. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  516. struct etmv4_config *config = &drvdata->config;
  517. val = config->syncfreq;
  518. return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
  519. }
  520. static ssize_t syncfreq_store(struct device *dev,
  521. struct device_attribute *attr,
  522. const char *buf, size_t size)
  523. {
  524. unsigned long val;
  525. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  526. struct etmv4_config *config = &drvdata->config;
  527. if (kstrtoul(buf, 16, &val))
  528. return -EINVAL;
  529. if (drvdata->syncpr == true)
  530. return -EINVAL;
  531. config->syncfreq = val & ETMv4_SYNC_MASK;
  532. return size;
  533. }
  534. static DEVICE_ATTR_RW(syncfreq);
  535. static ssize_t cyc_threshold_show(struct device *dev,
  536. struct device_attribute *attr,
  537. char *buf)
  538. {
  539. unsigned long val;
  540. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  541. struct etmv4_config *config = &drvdata->config;
  542. val = config->ccctlr;
  543. return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
  544. }
  545. static ssize_t cyc_threshold_store(struct device *dev,
  546. struct device_attribute *attr,
  547. const char *buf, size_t size)
  548. {
  549. unsigned long val;
  550. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  551. struct etmv4_config *config = &drvdata->config;
  552. if (kstrtoul(buf, 16, &val))
  553. return -EINVAL;
  554. if (val < drvdata->ccitmin)
  555. return -EINVAL;
  556. config->ccctlr = val & ETM_CYC_THRESHOLD_MASK;
  557. return size;
  558. }
  559. static DEVICE_ATTR_RW(cyc_threshold);
  560. static ssize_t bb_ctrl_show(struct device *dev,
  561. struct device_attribute *attr,
  562. char *buf)
  563. {
  564. unsigned long val;
  565. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  566. struct etmv4_config *config = &drvdata->config;
  567. val = config->bb_ctrl;
  568. return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
  569. }
  570. static ssize_t bb_ctrl_store(struct device *dev,
  571. struct device_attribute *attr,
  572. const char *buf, size_t size)
  573. {
  574. unsigned long val;
  575. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  576. struct etmv4_config *config = &drvdata->config;
  577. if (kstrtoul(buf, 16, &val))
  578. return -EINVAL;
  579. if (drvdata->trcbb == false)
  580. return -EINVAL;
  581. if (!drvdata->nr_addr_cmp)
  582. return -EINVAL;
  583. /*
  584. * Bit[7:0] selects which address range comparator is used for
  585. * branch broadcast control.
  586. */
  587. if (BMVAL(val, 0, 7) > drvdata->nr_addr_cmp)
  588. return -EINVAL;
  589. config->bb_ctrl = val;
  590. return size;
  591. }
  592. static DEVICE_ATTR_RW(bb_ctrl);
  593. static ssize_t event_vinst_show(struct device *dev,
  594. struct device_attribute *attr,
  595. char *buf)
  596. {
  597. unsigned long val;
  598. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  599. struct etmv4_config *config = &drvdata->config;
  600. val = config->vinst_ctrl & ETMv4_EVENT_MASK;
  601. return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
  602. }
  603. static ssize_t event_vinst_store(struct device *dev,
  604. struct device_attribute *attr,
  605. const char *buf, size_t size)
  606. {
  607. unsigned long val;
  608. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  609. struct etmv4_config *config = &drvdata->config;
  610. if (kstrtoul(buf, 16, &val))
  611. return -EINVAL;
  612. spin_lock(&drvdata->spinlock);
  613. val &= ETMv4_EVENT_MASK;
  614. config->vinst_ctrl &= ~ETMv4_EVENT_MASK;
  615. config->vinst_ctrl |= val;
  616. spin_unlock(&drvdata->spinlock);
  617. return size;
  618. }
  619. static DEVICE_ATTR_RW(event_vinst);
  620. static ssize_t s_exlevel_vinst_show(struct device *dev,
  621. struct device_attribute *attr,
  622. char *buf)
  623. {
  624. unsigned long val;
  625. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  626. struct etmv4_config *config = &drvdata->config;
  627. val = BMVAL(config->vinst_ctrl, 16, 19);
  628. return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
  629. }
  630. static ssize_t s_exlevel_vinst_store(struct device *dev,
  631. struct device_attribute *attr,
  632. const char *buf, size_t size)
  633. {
  634. unsigned long val;
  635. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  636. struct etmv4_config *config = &drvdata->config;
  637. if (kstrtoul(buf, 16, &val))
  638. return -EINVAL;
  639. spin_lock(&drvdata->spinlock);
  640. /* clear all EXLEVEL_S bits (bit[18] is never implemented) */
  641. config->vinst_ctrl &= ~(BIT(16) | BIT(17) | BIT(19));
  642. /* enable instruction tracing for corresponding exception level */
  643. val &= drvdata->s_ex_level;
  644. config->vinst_ctrl |= (val << 16);
  645. spin_unlock(&drvdata->spinlock);
  646. return size;
  647. }
  648. static DEVICE_ATTR_RW(s_exlevel_vinst);
  649. static ssize_t ns_exlevel_vinst_show(struct device *dev,
  650. struct device_attribute *attr,
  651. char *buf)
  652. {
  653. unsigned long val;
  654. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  655. struct etmv4_config *config = &drvdata->config;
  656. /* EXLEVEL_NS, bits[23:20] */
  657. val = BMVAL(config->vinst_ctrl, 20, 23);
  658. return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
  659. }
  660. static ssize_t ns_exlevel_vinst_store(struct device *dev,
  661. struct device_attribute *attr,
  662. const char *buf, size_t size)
  663. {
  664. unsigned long val;
  665. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  666. struct etmv4_config *config = &drvdata->config;
  667. if (kstrtoul(buf, 16, &val))
  668. return -EINVAL;
  669. spin_lock(&drvdata->spinlock);
  670. /* clear EXLEVEL_NS bits (bit[23] is never implemented */
  671. config->vinst_ctrl &= ~(BIT(20) | BIT(21) | BIT(22));
  672. /* enable instruction tracing for corresponding exception level */
  673. val &= drvdata->ns_ex_level;
  674. config->vinst_ctrl |= (val << 20);
  675. spin_unlock(&drvdata->spinlock);
  676. return size;
  677. }
  678. static DEVICE_ATTR_RW(ns_exlevel_vinst);
  679. static ssize_t addr_idx_show(struct device *dev,
  680. struct device_attribute *attr,
  681. char *buf)
  682. {
  683. unsigned long val;
  684. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  685. struct etmv4_config *config = &drvdata->config;
  686. val = config->addr_idx;
  687. return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
  688. }
  689. static ssize_t addr_idx_store(struct device *dev,
  690. struct device_attribute *attr,
  691. const char *buf, size_t size)
  692. {
  693. unsigned long val;
  694. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  695. struct etmv4_config *config = &drvdata->config;
  696. if (kstrtoul(buf, 16, &val))
  697. return -EINVAL;
  698. if (val >= drvdata->nr_addr_cmp * 2)
  699. return -EINVAL;
  700. /*
  701. * Use spinlock to ensure index doesn't change while it gets
  702. * dereferenced multiple times within a spinlock block elsewhere.
  703. */
  704. spin_lock(&drvdata->spinlock);
  705. config->addr_idx = val;
  706. spin_unlock(&drvdata->spinlock);
  707. return size;
  708. }
  709. static DEVICE_ATTR_RW(addr_idx);
  710. static ssize_t addr_instdatatype_show(struct device *dev,
  711. struct device_attribute *attr,
  712. char *buf)
  713. {
  714. ssize_t len;
  715. u8 val, idx;
  716. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  717. struct etmv4_config *config = &drvdata->config;
  718. spin_lock(&drvdata->spinlock);
  719. idx = config->addr_idx;
  720. val = BMVAL(config->addr_acc[idx], 0, 1);
  721. len = scnprintf(buf, PAGE_SIZE, "%s\n",
  722. val == ETM_INSTR_ADDR ? "instr" :
  723. (val == ETM_DATA_LOAD_ADDR ? "data_load" :
  724. (val == ETM_DATA_STORE_ADDR ? "data_store" :
  725. "data_load_store")));
  726. spin_unlock(&drvdata->spinlock);
  727. return len;
  728. }
  729. static ssize_t addr_instdatatype_store(struct device *dev,
  730. struct device_attribute *attr,
  731. const char *buf, size_t size)
  732. {
  733. u8 idx;
  734. char str[20] = "";
  735. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  736. struct etmv4_config *config = &drvdata->config;
  737. if (strlen(buf) >= 20)
  738. return -EINVAL;
  739. if (sscanf(buf, "%s", str) != 1)
  740. return -EINVAL;
  741. spin_lock(&drvdata->spinlock);
  742. idx = config->addr_idx;
  743. if (!strcmp(str, "instr"))
  744. /* TYPE, bits[1:0] */
  745. config->addr_acc[idx] &= ~(BIT(0) | BIT(1));
  746. spin_unlock(&drvdata->spinlock);
  747. return size;
  748. }
  749. static DEVICE_ATTR_RW(addr_instdatatype);
  750. static ssize_t addr_single_show(struct device *dev,
  751. struct device_attribute *attr,
  752. char *buf)
  753. {
  754. u8 idx;
  755. unsigned long val;
  756. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  757. struct etmv4_config *config = &drvdata->config;
  758. idx = config->addr_idx;
  759. spin_lock(&drvdata->spinlock);
  760. if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
  761. config->addr_type[idx] == ETM_ADDR_TYPE_SINGLE)) {
  762. spin_unlock(&drvdata->spinlock);
  763. return -EPERM;
  764. }
  765. val = (unsigned long)config->addr_val[idx];
  766. spin_unlock(&drvdata->spinlock);
  767. return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
  768. }
  769. static ssize_t addr_single_store(struct device *dev,
  770. struct device_attribute *attr,
  771. const char *buf, size_t size)
  772. {
  773. u8 idx;
  774. unsigned long val;
  775. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  776. struct etmv4_config *config = &drvdata->config;
  777. if (kstrtoul(buf, 16, &val))
  778. return -EINVAL;
  779. spin_lock(&drvdata->spinlock);
  780. idx = config->addr_idx;
  781. if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
  782. config->addr_type[idx] == ETM_ADDR_TYPE_SINGLE)) {
  783. spin_unlock(&drvdata->spinlock);
  784. return -EPERM;
  785. }
  786. config->addr_val[idx] = (u64)val;
  787. config->addr_type[idx] = ETM_ADDR_TYPE_SINGLE;
  788. spin_unlock(&drvdata->spinlock);
  789. return size;
  790. }
  791. static DEVICE_ATTR_RW(addr_single);
  792. static ssize_t addr_range_show(struct device *dev,
  793. struct device_attribute *attr,
  794. char *buf)
  795. {
  796. u8 idx;
  797. unsigned long val1, val2;
  798. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  799. struct etmv4_config *config = &drvdata->config;
  800. spin_lock(&drvdata->spinlock);
  801. idx = config->addr_idx;
  802. if (idx % 2 != 0) {
  803. spin_unlock(&drvdata->spinlock);
  804. return -EPERM;
  805. }
  806. if (!((config->addr_type[idx] == ETM_ADDR_TYPE_NONE &&
  807. config->addr_type[idx + 1] == ETM_ADDR_TYPE_NONE) ||
  808. (config->addr_type[idx] == ETM_ADDR_TYPE_RANGE &&
  809. config->addr_type[idx + 1] == ETM_ADDR_TYPE_RANGE))) {
  810. spin_unlock(&drvdata->spinlock);
  811. return -EPERM;
  812. }
  813. val1 = (unsigned long)config->addr_val[idx];
  814. val2 = (unsigned long)config->addr_val[idx + 1];
  815. spin_unlock(&drvdata->spinlock);
  816. return scnprintf(buf, PAGE_SIZE, "%#lx %#lx\n", val1, val2);
  817. }
  818. static ssize_t addr_range_store(struct device *dev,
  819. struct device_attribute *attr,
  820. const char *buf, size_t size)
  821. {
  822. u8 idx;
  823. unsigned long val1, val2;
  824. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  825. struct etmv4_config *config = &drvdata->config;
  826. if (sscanf(buf, "%lx %lx", &val1, &val2) != 2)
  827. return -EINVAL;
  828. /* lower address comparator cannot have a higher address value */
  829. if (val1 > val2)
  830. return -EINVAL;
  831. spin_lock(&drvdata->spinlock);
  832. idx = config->addr_idx;
  833. if (idx % 2 != 0) {
  834. spin_unlock(&drvdata->spinlock);
  835. return -EPERM;
  836. }
  837. if (!((config->addr_type[idx] == ETM_ADDR_TYPE_NONE &&
  838. config->addr_type[idx + 1] == ETM_ADDR_TYPE_NONE) ||
  839. (config->addr_type[idx] == ETM_ADDR_TYPE_RANGE &&
  840. config->addr_type[idx + 1] == ETM_ADDR_TYPE_RANGE))) {
  841. spin_unlock(&drvdata->spinlock);
  842. return -EPERM;
  843. }
  844. config->addr_val[idx] = (u64)val1;
  845. config->addr_type[idx] = ETM_ADDR_TYPE_RANGE;
  846. config->addr_val[idx + 1] = (u64)val2;
  847. config->addr_type[idx + 1] = ETM_ADDR_TYPE_RANGE;
  848. /*
  849. * Program include or exclude control bits for vinst or vdata
  850. * whenever we change addr comparators to ETM_ADDR_TYPE_RANGE
  851. */
  852. if (config->mode & ETM_MODE_EXCLUDE)
  853. etm4_set_mode_exclude(drvdata, true);
  854. else
  855. etm4_set_mode_exclude(drvdata, false);
  856. spin_unlock(&drvdata->spinlock);
  857. return size;
  858. }
  859. static DEVICE_ATTR_RW(addr_range);
  860. static ssize_t addr_start_show(struct device *dev,
  861. struct device_attribute *attr,
  862. char *buf)
  863. {
  864. u8 idx;
  865. unsigned long val;
  866. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  867. struct etmv4_config *config = &drvdata->config;
  868. spin_lock(&drvdata->spinlock);
  869. idx = config->addr_idx;
  870. if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
  871. config->addr_type[idx] == ETM_ADDR_TYPE_START)) {
  872. spin_unlock(&drvdata->spinlock);
  873. return -EPERM;
  874. }
  875. val = (unsigned long)config->addr_val[idx];
  876. spin_unlock(&drvdata->spinlock);
  877. return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
  878. }
  879. static ssize_t addr_start_store(struct device *dev,
  880. struct device_attribute *attr,
  881. const char *buf, size_t size)
  882. {
  883. u8 idx;
  884. unsigned long val;
  885. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  886. struct etmv4_config *config = &drvdata->config;
  887. if (kstrtoul(buf, 16, &val))
  888. return -EINVAL;
  889. spin_lock(&drvdata->spinlock);
  890. idx = config->addr_idx;
  891. if (!drvdata->nr_addr_cmp) {
  892. spin_unlock(&drvdata->spinlock);
  893. return -EINVAL;
  894. }
  895. if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
  896. config->addr_type[idx] == ETM_ADDR_TYPE_START)) {
  897. spin_unlock(&drvdata->spinlock);
  898. return -EPERM;
  899. }
  900. config->addr_val[idx] = (u64)val;
  901. config->addr_type[idx] = ETM_ADDR_TYPE_START;
  902. config->vissctlr |= BIT(idx);
  903. /* SSSTATUS, bit[9] - turn on start/stop logic */
  904. config->vinst_ctrl |= BIT(9);
  905. spin_unlock(&drvdata->spinlock);
  906. return size;
  907. }
  908. static DEVICE_ATTR_RW(addr_start);
  909. static ssize_t addr_stop_show(struct device *dev,
  910. struct device_attribute *attr,
  911. char *buf)
  912. {
  913. u8 idx;
  914. unsigned long val;
  915. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  916. struct etmv4_config *config = &drvdata->config;
  917. spin_lock(&drvdata->spinlock);
  918. idx = config->addr_idx;
  919. if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
  920. config->addr_type[idx] == ETM_ADDR_TYPE_STOP)) {
  921. spin_unlock(&drvdata->spinlock);
  922. return -EPERM;
  923. }
  924. val = (unsigned long)config->addr_val[idx];
  925. spin_unlock(&drvdata->spinlock);
  926. return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
  927. }
  928. static ssize_t addr_stop_store(struct device *dev,
  929. struct device_attribute *attr,
  930. const char *buf, size_t size)
  931. {
  932. u8 idx;
  933. unsigned long val;
  934. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  935. struct etmv4_config *config = &drvdata->config;
  936. if (kstrtoul(buf, 16, &val))
  937. return -EINVAL;
  938. spin_lock(&drvdata->spinlock);
  939. idx = config->addr_idx;
  940. if (!drvdata->nr_addr_cmp) {
  941. spin_unlock(&drvdata->spinlock);
  942. return -EINVAL;
  943. }
  944. if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
  945. config->addr_type[idx] == ETM_ADDR_TYPE_STOP)) {
  946. spin_unlock(&drvdata->spinlock);
  947. return -EPERM;
  948. }
  949. config->addr_val[idx] = (u64)val;
  950. config->addr_type[idx] = ETM_ADDR_TYPE_STOP;
  951. config->vissctlr |= BIT(idx + 16);
  952. /* SSSTATUS, bit[9] - turn on start/stop logic */
  953. config->vinst_ctrl |= BIT(9);
  954. spin_unlock(&drvdata->spinlock);
  955. return size;
  956. }
  957. static DEVICE_ATTR_RW(addr_stop);
  958. static ssize_t addr_ctxtype_show(struct device *dev,
  959. struct device_attribute *attr,
  960. char *buf)
  961. {
  962. ssize_t len;
  963. u8 idx, val;
  964. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  965. struct etmv4_config *config = &drvdata->config;
  966. spin_lock(&drvdata->spinlock);
  967. idx = config->addr_idx;
  968. /* CONTEXTTYPE, bits[3:2] */
  969. val = BMVAL(config->addr_acc[idx], 2, 3);
  970. len = scnprintf(buf, PAGE_SIZE, "%s\n", val == ETM_CTX_NONE ? "none" :
  971. (val == ETM_CTX_CTXID ? "ctxid" :
  972. (val == ETM_CTX_VMID ? "vmid" : "all")));
  973. spin_unlock(&drvdata->spinlock);
  974. return len;
  975. }
  976. static ssize_t addr_ctxtype_store(struct device *dev,
  977. struct device_attribute *attr,
  978. const char *buf, size_t size)
  979. {
  980. u8 idx;
  981. char str[10] = "";
  982. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  983. struct etmv4_config *config = &drvdata->config;
  984. if (strlen(buf) >= 10)
  985. return -EINVAL;
  986. if (sscanf(buf, "%s", str) != 1)
  987. return -EINVAL;
  988. spin_lock(&drvdata->spinlock);
  989. idx = config->addr_idx;
  990. if (!strcmp(str, "none"))
  991. /* start by clearing context type bits */
  992. config->addr_acc[idx] &= ~(BIT(2) | BIT(3));
  993. else if (!strcmp(str, "ctxid")) {
  994. /* 0b01 The trace unit performs a Context ID */
  995. if (drvdata->numcidc) {
  996. config->addr_acc[idx] |= BIT(2);
  997. config->addr_acc[idx] &= ~BIT(3);
  998. }
  999. } else if (!strcmp(str, "vmid")) {
  1000. /* 0b10 The trace unit performs a VMID */
  1001. if (drvdata->numvmidc) {
  1002. config->addr_acc[idx] &= ~BIT(2);
  1003. config->addr_acc[idx] |= BIT(3);
  1004. }
  1005. } else if (!strcmp(str, "all")) {
  1006. /*
  1007. * 0b11 The trace unit performs a Context ID
  1008. * comparison and a VMID
  1009. */
  1010. if (drvdata->numcidc)
  1011. config->addr_acc[idx] |= BIT(2);
  1012. if (drvdata->numvmidc)
  1013. config->addr_acc[idx] |= BIT(3);
  1014. }
  1015. spin_unlock(&drvdata->spinlock);
  1016. return size;
  1017. }
  1018. static DEVICE_ATTR_RW(addr_ctxtype);
  1019. static ssize_t addr_context_show(struct device *dev,
  1020. struct device_attribute *attr,
  1021. char *buf)
  1022. {
  1023. u8 idx;
  1024. unsigned long val;
  1025. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  1026. struct etmv4_config *config = &drvdata->config;
  1027. spin_lock(&drvdata->spinlock);
  1028. idx = config->addr_idx;
  1029. /* context ID comparator bits[6:4] */
  1030. val = BMVAL(config->addr_acc[idx], 4, 6);
  1031. spin_unlock(&drvdata->spinlock);
  1032. return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
  1033. }
  1034. static ssize_t addr_context_store(struct device *dev,
  1035. struct device_attribute *attr,
  1036. const char *buf, size_t size)
  1037. {
  1038. u8 idx;
  1039. unsigned long val;
  1040. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  1041. struct etmv4_config *config = &drvdata->config;
  1042. if (kstrtoul(buf, 16, &val))
  1043. return -EINVAL;
  1044. if ((drvdata->numcidc <= 1) && (drvdata->numvmidc <= 1))
  1045. return -EINVAL;
  1046. if (val >= (drvdata->numcidc >= drvdata->numvmidc ?
  1047. drvdata->numcidc : drvdata->numvmidc))
  1048. return -EINVAL;
  1049. spin_lock(&drvdata->spinlock);
  1050. idx = config->addr_idx;
  1051. /* clear context ID comparator bits[6:4] */
  1052. config->addr_acc[idx] &= ~(BIT(4) | BIT(5) | BIT(6));
  1053. config->addr_acc[idx] |= (val << 4);
  1054. spin_unlock(&drvdata->spinlock);
  1055. return size;
  1056. }
  1057. static DEVICE_ATTR_RW(addr_context);
  1058. static ssize_t seq_idx_show(struct device *dev,
  1059. struct device_attribute *attr,
  1060. char *buf)
  1061. {
  1062. unsigned long val;
  1063. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  1064. struct etmv4_config *config = &drvdata->config;
  1065. val = config->seq_idx;
  1066. return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
  1067. }
  1068. static ssize_t seq_idx_store(struct device *dev,
  1069. struct device_attribute *attr,
  1070. const char *buf, size_t size)
  1071. {
  1072. unsigned long val;
  1073. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  1074. struct etmv4_config *config = &drvdata->config;
  1075. if (kstrtoul(buf, 16, &val))
  1076. return -EINVAL;
  1077. if (val >= drvdata->nrseqstate - 1)
  1078. return -EINVAL;
  1079. /*
  1080. * Use spinlock to ensure index doesn't change while it gets
  1081. * dereferenced multiple times within a spinlock block elsewhere.
  1082. */
  1083. spin_lock(&drvdata->spinlock);
  1084. config->seq_idx = val;
  1085. spin_unlock(&drvdata->spinlock);
  1086. return size;
  1087. }
  1088. static DEVICE_ATTR_RW(seq_idx);
  1089. static ssize_t seq_state_show(struct device *dev,
  1090. struct device_attribute *attr,
  1091. char *buf)
  1092. {
  1093. unsigned long val;
  1094. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  1095. struct etmv4_config *config = &drvdata->config;
  1096. val = config->seq_state;
  1097. return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
  1098. }
  1099. static ssize_t seq_state_store(struct device *dev,
  1100. struct device_attribute *attr,
  1101. const char *buf, size_t size)
  1102. {
  1103. unsigned long val;
  1104. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  1105. struct etmv4_config *config = &drvdata->config;
  1106. if (kstrtoul(buf, 16, &val))
  1107. return -EINVAL;
  1108. if (val >= drvdata->nrseqstate)
  1109. return -EINVAL;
  1110. config->seq_state = val;
  1111. return size;
  1112. }
  1113. static DEVICE_ATTR_RW(seq_state);
  1114. static ssize_t seq_event_show(struct device *dev,
  1115. struct device_attribute *attr,
  1116. char *buf)
  1117. {
  1118. u8 idx;
  1119. unsigned long val;
  1120. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  1121. struct etmv4_config *config = &drvdata->config;
  1122. spin_lock(&drvdata->spinlock);
  1123. idx = config->seq_idx;
  1124. val = config->seq_ctrl[idx];
  1125. spin_unlock(&drvdata->spinlock);
  1126. return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
  1127. }
  1128. static ssize_t seq_event_store(struct device *dev,
  1129. struct device_attribute *attr,
  1130. const char *buf, size_t size)
  1131. {
  1132. u8 idx;
  1133. unsigned long val;
  1134. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  1135. struct etmv4_config *config = &drvdata->config;
  1136. if (kstrtoul(buf, 16, &val))
  1137. return -EINVAL;
  1138. spin_lock(&drvdata->spinlock);
  1139. idx = config->seq_idx;
  1140. /* RST, bits[7:0] */
  1141. config->seq_ctrl[idx] = val & 0xFF;
  1142. spin_unlock(&drvdata->spinlock);
  1143. return size;
  1144. }
  1145. static DEVICE_ATTR_RW(seq_event);
  1146. static ssize_t seq_reset_event_show(struct device *dev,
  1147. struct device_attribute *attr,
  1148. char *buf)
  1149. {
  1150. unsigned long val;
  1151. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  1152. struct etmv4_config *config = &drvdata->config;
  1153. val = config->seq_rst;
  1154. return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
  1155. }
  1156. static ssize_t seq_reset_event_store(struct device *dev,
  1157. struct device_attribute *attr,
  1158. const char *buf, size_t size)
  1159. {
  1160. unsigned long val;
  1161. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  1162. struct etmv4_config *config = &drvdata->config;
  1163. if (kstrtoul(buf, 16, &val))
  1164. return -EINVAL;
  1165. if (!(drvdata->nrseqstate))
  1166. return -EINVAL;
  1167. config->seq_rst = val & ETMv4_EVENT_MASK;
  1168. return size;
  1169. }
  1170. static DEVICE_ATTR_RW(seq_reset_event);
  1171. static ssize_t cntr_idx_show(struct device *dev,
  1172. struct device_attribute *attr,
  1173. char *buf)
  1174. {
  1175. unsigned long val;
  1176. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  1177. struct etmv4_config *config = &drvdata->config;
  1178. val = config->cntr_idx;
  1179. return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
  1180. }
  1181. static ssize_t cntr_idx_store(struct device *dev,
  1182. struct device_attribute *attr,
  1183. const char *buf, size_t size)
  1184. {
  1185. unsigned long val;
  1186. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  1187. struct etmv4_config *config = &drvdata->config;
  1188. if (kstrtoul(buf, 16, &val))
  1189. return -EINVAL;
  1190. if (val >= drvdata->nr_cntr)
  1191. return -EINVAL;
  1192. /*
  1193. * Use spinlock to ensure index doesn't change while it gets
  1194. * dereferenced multiple times within a spinlock block elsewhere.
  1195. */
  1196. spin_lock(&drvdata->spinlock);
  1197. config->cntr_idx = val;
  1198. spin_unlock(&drvdata->spinlock);
  1199. return size;
  1200. }
  1201. static DEVICE_ATTR_RW(cntr_idx);
  1202. static ssize_t cntrldvr_show(struct device *dev,
  1203. struct device_attribute *attr,
  1204. char *buf)
  1205. {
  1206. u8 idx;
  1207. unsigned long val;
  1208. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  1209. struct etmv4_config *config = &drvdata->config;
  1210. spin_lock(&drvdata->spinlock);
  1211. idx = config->cntr_idx;
  1212. val = config->cntrldvr[idx];
  1213. spin_unlock(&drvdata->spinlock);
  1214. return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
  1215. }
  1216. static ssize_t cntrldvr_store(struct device *dev,
  1217. struct device_attribute *attr,
  1218. const char *buf, size_t size)
  1219. {
  1220. u8 idx;
  1221. unsigned long val;
  1222. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  1223. struct etmv4_config *config = &drvdata->config;
  1224. if (kstrtoul(buf, 16, &val))
  1225. return -EINVAL;
  1226. if (val > ETM_CNTR_MAX_VAL)
  1227. return -EINVAL;
  1228. spin_lock(&drvdata->spinlock);
  1229. idx = config->cntr_idx;
  1230. config->cntrldvr[idx] = val;
  1231. spin_unlock(&drvdata->spinlock);
  1232. return size;
  1233. }
  1234. static DEVICE_ATTR_RW(cntrldvr);
  1235. static ssize_t cntr_val_show(struct device *dev,
  1236. struct device_attribute *attr,
  1237. char *buf)
  1238. {
  1239. u8 idx;
  1240. unsigned long val;
  1241. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  1242. struct etmv4_config *config = &drvdata->config;
  1243. spin_lock(&drvdata->spinlock);
  1244. idx = config->cntr_idx;
  1245. val = config->cntr_val[idx];
  1246. spin_unlock(&drvdata->spinlock);
  1247. return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
  1248. }
  1249. static ssize_t cntr_val_store(struct device *dev,
  1250. struct device_attribute *attr,
  1251. const char *buf, size_t size)
  1252. {
  1253. u8 idx;
  1254. unsigned long val;
  1255. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  1256. struct etmv4_config *config = &drvdata->config;
  1257. if (kstrtoul(buf, 16, &val))
  1258. return -EINVAL;
  1259. if (val > ETM_CNTR_MAX_VAL)
  1260. return -EINVAL;
  1261. spin_lock(&drvdata->spinlock);
  1262. idx = config->cntr_idx;
  1263. config->cntr_val[idx] = val;
  1264. spin_unlock(&drvdata->spinlock);
  1265. return size;
  1266. }
  1267. static DEVICE_ATTR_RW(cntr_val);
  1268. static ssize_t cntr_ctrl_show(struct device *dev,
  1269. struct device_attribute *attr,
  1270. char *buf)
  1271. {
  1272. u8 idx;
  1273. unsigned long val;
  1274. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  1275. struct etmv4_config *config = &drvdata->config;
  1276. spin_lock(&drvdata->spinlock);
  1277. idx = config->cntr_idx;
  1278. val = config->cntr_ctrl[idx];
  1279. spin_unlock(&drvdata->spinlock);
  1280. return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
  1281. }
  1282. static ssize_t cntr_ctrl_store(struct device *dev,
  1283. struct device_attribute *attr,
  1284. const char *buf, size_t size)
  1285. {
  1286. u8 idx;
  1287. unsigned long val;
  1288. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  1289. struct etmv4_config *config = &drvdata->config;
  1290. if (kstrtoul(buf, 16, &val))
  1291. return -EINVAL;
  1292. spin_lock(&drvdata->spinlock);
  1293. idx = config->cntr_idx;
  1294. config->cntr_ctrl[idx] = val;
  1295. spin_unlock(&drvdata->spinlock);
  1296. return size;
  1297. }
  1298. static DEVICE_ATTR_RW(cntr_ctrl);
  1299. static ssize_t res_idx_show(struct device *dev,
  1300. struct device_attribute *attr,
  1301. char *buf)
  1302. {
  1303. unsigned long val;
  1304. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  1305. struct etmv4_config *config = &drvdata->config;
  1306. val = config->res_idx;
  1307. return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
  1308. }
  1309. static ssize_t res_idx_store(struct device *dev,
  1310. struct device_attribute *attr,
  1311. const char *buf, size_t size)
  1312. {
  1313. unsigned long val;
  1314. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  1315. struct etmv4_config *config = &drvdata->config;
  1316. if (kstrtoul(buf, 16, &val))
  1317. return -EINVAL;
  1318. /* Resource selector pair 0 is always implemented and reserved */
  1319. if ((val == 0) || (val >= drvdata->nr_resource))
  1320. return -EINVAL;
  1321. /*
  1322. * Use spinlock to ensure index doesn't change while it gets
  1323. * dereferenced multiple times within a spinlock block elsewhere.
  1324. */
  1325. spin_lock(&drvdata->spinlock);
  1326. config->res_idx = val;
  1327. spin_unlock(&drvdata->spinlock);
  1328. return size;
  1329. }
  1330. static DEVICE_ATTR_RW(res_idx);
  1331. static ssize_t res_ctrl_show(struct device *dev,
  1332. struct device_attribute *attr,
  1333. char *buf)
  1334. {
  1335. u8 idx;
  1336. unsigned long val;
  1337. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  1338. struct etmv4_config *config = &drvdata->config;
  1339. spin_lock(&drvdata->spinlock);
  1340. idx = config->res_idx;
  1341. val = config->res_ctrl[idx];
  1342. spin_unlock(&drvdata->spinlock);
  1343. return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
  1344. }
  1345. static ssize_t res_ctrl_store(struct device *dev,
  1346. struct device_attribute *attr,
  1347. const char *buf, size_t size)
  1348. {
  1349. u8 idx;
  1350. unsigned long val;
  1351. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  1352. struct etmv4_config *config = &drvdata->config;
  1353. if (kstrtoul(buf, 16, &val))
  1354. return -EINVAL;
  1355. spin_lock(&drvdata->spinlock);
  1356. idx = config->res_idx;
  1357. /* For odd idx pair inversal bit is RES0 */
  1358. if (idx % 2 != 0)
  1359. /* PAIRINV, bit[21] */
  1360. val &= ~BIT(21);
  1361. config->res_ctrl[idx] = val;
  1362. spin_unlock(&drvdata->spinlock);
  1363. return size;
  1364. }
  1365. static DEVICE_ATTR_RW(res_ctrl);
  1366. static ssize_t ctxid_idx_show(struct device *dev,
  1367. struct device_attribute *attr,
  1368. char *buf)
  1369. {
  1370. unsigned long val;
  1371. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  1372. struct etmv4_config *config = &drvdata->config;
  1373. val = config->ctxid_idx;
  1374. return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
  1375. }
  1376. static ssize_t ctxid_idx_store(struct device *dev,
  1377. struct device_attribute *attr,
  1378. const char *buf, size_t size)
  1379. {
  1380. unsigned long val;
  1381. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  1382. struct etmv4_config *config = &drvdata->config;
  1383. if (kstrtoul(buf, 16, &val))
  1384. return -EINVAL;
  1385. if (val >= drvdata->numcidc)
  1386. return -EINVAL;
  1387. /*
  1388. * Use spinlock to ensure index doesn't change while it gets
  1389. * dereferenced multiple times within a spinlock block elsewhere.
  1390. */
  1391. spin_lock(&drvdata->spinlock);
  1392. config->ctxid_idx = val;
  1393. spin_unlock(&drvdata->spinlock);
  1394. return size;
  1395. }
  1396. static DEVICE_ATTR_RW(ctxid_idx);
  1397. static ssize_t ctxid_pid_show(struct device *dev,
  1398. struct device_attribute *attr,
  1399. char *buf)
  1400. {
  1401. u8 idx;
  1402. unsigned long val;
  1403. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  1404. struct etmv4_config *config = &drvdata->config;
  1405. /*
  1406. * Don't use contextID tracing if coming from a PID namespace. See
  1407. * comment in ctxid_pid_store().
  1408. */
  1409. if (task_active_pid_ns(current) != &init_pid_ns)
  1410. return -EINVAL;
  1411. spin_lock(&drvdata->spinlock);
  1412. idx = config->ctxid_idx;
  1413. val = (unsigned long)config->ctxid_pid[idx];
  1414. spin_unlock(&drvdata->spinlock);
  1415. return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
  1416. }
  1417. static ssize_t ctxid_pid_store(struct device *dev,
  1418. struct device_attribute *attr,
  1419. const char *buf, size_t size)
  1420. {
  1421. u8 idx;
  1422. unsigned long pid;
  1423. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  1424. struct etmv4_config *config = &drvdata->config;
  1425. /*
  1426. * When contextID tracing is enabled the tracers will insert the
  1427. * value found in the contextID register in the trace stream. But if
  1428. * a process is in a namespace the PID of that process as seen from the
  1429. * namespace won't be what the kernel sees, something that makes the
  1430. * feature confusing and can potentially leak kernel only information.
  1431. * As such refuse to use the feature if @current is not in the initial
  1432. * PID namespace.
  1433. */
  1434. if (task_active_pid_ns(current) != &init_pid_ns)
  1435. return -EINVAL;
  1436. /*
  1437. * only implemented when ctxid tracing is enabled, i.e. at least one
  1438. * ctxid comparator is implemented and ctxid is greater than 0 bits
  1439. * in length
  1440. */
  1441. if (!drvdata->ctxid_size || !drvdata->numcidc)
  1442. return -EINVAL;
  1443. if (kstrtoul(buf, 16, &pid))
  1444. return -EINVAL;
  1445. spin_lock(&drvdata->spinlock);
  1446. idx = config->ctxid_idx;
  1447. config->ctxid_pid[idx] = (u64)pid;
  1448. spin_unlock(&drvdata->spinlock);
  1449. return size;
  1450. }
  1451. static DEVICE_ATTR_RW(ctxid_pid);
  1452. static ssize_t ctxid_masks_show(struct device *dev,
  1453. struct device_attribute *attr,
  1454. char *buf)
  1455. {
  1456. unsigned long val1, val2;
  1457. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  1458. struct etmv4_config *config = &drvdata->config;
  1459. /*
  1460. * Don't use contextID tracing if coming from a PID namespace. See
  1461. * comment in ctxid_pid_store().
  1462. */
  1463. if (task_active_pid_ns(current) != &init_pid_ns)
  1464. return -EINVAL;
  1465. spin_lock(&drvdata->spinlock);
  1466. val1 = config->ctxid_mask0;
  1467. val2 = config->ctxid_mask1;
  1468. spin_unlock(&drvdata->spinlock);
  1469. return scnprintf(buf, PAGE_SIZE, "%#lx %#lx\n", val1, val2);
  1470. }
  1471. static ssize_t ctxid_masks_store(struct device *dev,
  1472. struct device_attribute *attr,
  1473. const char *buf, size_t size)
  1474. {
  1475. u8 i, j, maskbyte;
  1476. unsigned long val1, val2, mask;
  1477. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  1478. struct etmv4_config *config = &drvdata->config;
  1479. /*
  1480. * Don't use contextID tracing if coming from a PID namespace. See
  1481. * comment in ctxid_pid_store().
  1482. */
  1483. if (task_active_pid_ns(current) != &init_pid_ns)
  1484. return -EINVAL;
  1485. /*
  1486. * only implemented when ctxid tracing is enabled, i.e. at least one
  1487. * ctxid comparator is implemented and ctxid is greater than 0 bits
  1488. * in length
  1489. */
  1490. if (!drvdata->ctxid_size || !drvdata->numcidc)
  1491. return -EINVAL;
  1492. if (sscanf(buf, "%lx %lx", &val1, &val2) != 2)
  1493. return -EINVAL;
  1494. spin_lock(&drvdata->spinlock);
  1495. /*
  1496. * each byte[0..3] controls mask value applied to ctxid
  1497. * comparator[0..3]
  1498. */
  1499. switch (drvdata->numcidc) {
  1500. case 0x1:
  1501. /* COMP0, bits[7:0] */
  1502. config->ctxid_mask0 = val1 & 0xFF;
  1503. break;
  1504. case 0x2:
  1505. /* COMP1, bits[15:8] */
  1506. config->ctxid_mask0 = val1 & 0xFFFF;
  1507. break;
  1508. case 0x3:
  1509. /* COMP2, bits[23:16] */
  1510. config->ctxid_mask0 = val1 & 0xFFFFFF;
  1511. break;
  1512. case 0x4:
  1513. /* COMP3, bits[31:24] */
  1514. config->ctxid_mask0 = val1;
  1515. break;
  1516. case 0x5:
  1517. /* COMP4, bits[7:0] */
  1518. config->ctxid_mask0 = val1;
  1519. config->ctxid_mask1 = val2 & 0xFF;
  1520. break;
  1521. case 0x6:
  1522. /* COMP5, bits[15:8] */
  1523. config->ctxid_mask0 = val1;
  1524. config->ctxid_mask1 = val2 & 0xFFFF;
  1525. break;
  1526. case 0x7:
  1527. /* COMP6, bits[23:16] */
  1528. config->ctxid_mask0 = val1;
  1529. config->ctxid_mask1 = val2 & 0xFFFFFF;
  1530. break;
  1531. case 0x8:
  1532. /* COMP7, bits[31:24] */
  1533. config->ctxid_mask0 = val1;
  1534. config->ctxid_mask1 = val2;
  1535. break;
  1536. default:
  1537. break;
  1538. }
  1539. /*
  1540. * If software sets a mask bit to 1, it must program relevant byte
  1541. * of ctxid comparator value 0x0, otherwise behavior is unpredictable.
  1542. * For example, if bit[3] of ctxid_mask0 is 1, we must clear bits[31:24]
  1543. * of ctxid comparator0 value (corresponding to byte 0) register.
  1544. */
  1545. mask = config->ctxid_mask0;
  1546. for (i = 0; i < drvdata->numcidc; i++) {
  1547. /* mask value of corresponding ctxid comparator */
  1548. maskbyte = mask & ETMv4_EVENT_MASK;
  1549. /*
  1550. * each bit corresponds to a byte of respective ctxid comparator
  1551. * value register
  1552. */
  1553. for (j = 0; j < 8; j++) {
  1554. if (maskbyte & 1)
  1555. config->ctxid_pid[i] &= ~(0xFFUL << (j * 8));
  1556. maskbyte >>= 1;
  1557. }
  1558. /* Select the next ctxid comparator mask value */
  1559. if (i == 3)
  1560. /* ctxid comparators[4-7] */
  1561. mask = config->ctxid_mask1;
  1562. else
  1563. mask >>= 0x8;
  1564. }
  1565. spin_unlock(&drvdata->spinlock);
  1566. return size;
  1567. }
  1568. static DEVICE_ATTR_RW(ctxid_masks);
  1569. static ssize_t vmid_idx_show(struct device *dev,
  1570. struct device_attribute *attr,
  1571. char *buf)
  1572. {
  1573. unsigned long val;
  1574. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  1575. struct etmv4_config *config = &drvdata->config;
  1576. val = config->vmid_idx;
  1577. return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
  1578. }
  1579. static ssize_t vmid_idx_store(struct device *dev,
  1580. struct device_attribute *attr,
  1581. const char *buf, size_t size)
  1582. {
  1583. unsigned long val;
  1584. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  1585. struct etmv4_config *config = &drvdata->config;
  1586. if (kstrtoul(buf, 16, &val))
  1587. return -EINVAL;
  1588. if (val >= drvdata->numvmidc)
  1589. return -EINVAL;
  1590. /*
  1591. * Use spinlock to ensure index doesn't change while it gets
  1592. * dereferenced multiple times within a spinlock block elsewhere.
  1593. */
  1594. spin_lock(&drvdata->spinlock);
  1595. config->vmid_idx = val;
  1596. spin_unlock(&drvdata->spinlock);
  1597. return size;
  1598. }
  1599. static DEVICE_ATTR_RW(vmid_idx);
  1600. static ssize_t vmid_val_show(struct device *dev,
  1601. struct device_attribute *attr,
  1602. char *buf)
  1603. {
  1604. unsigned long val;
  1605. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  1606. struct etmv4_config *config = &drvdata->config;
  1607. val = (unsigned long)config->vmid_val[config->vmid_idx];
  1608. return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
  1609. }
  1610. static ssize_t vmid_val_store(struct device *dev,
  1611. struct device_attribute *attr,
  1612. const char *buf, size_t size)
  1613. {
  1614. unsigned long val;
  1615. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  1616. struct etmv4_config *config = &drvdata->config;
  1617. /*
  1618. * only implemented when vmid tracing is enabled, i.e. at least one
  1619. * vmid comparator is implemented and at least 8 bit vmid size
  1620. */
  1621. if (!drvdata->vmid_size || !drvdata->numvmidc)
  1622. return -EINVAL;
  1623. if (kstrtoul(buf, 16, &val))
  1624. return -EINVAL;
  1625. spin_lock(&drvdata->spinlock);
  1626. config->vmid_val[config->vmid_idx] = (u64)val;
  1627. spin_unlock(&drvdata->spinlock);
  1628. return size;
  1629. }
  1630. static DEVICE_ATTR_RW(vmid_val);
  1631. static ssize_t vmid_masks_show(struct device *dev,
  1632. struct device_attribute *attr, char *buf)
  1633. {
  1634. unsigned long val1, val2;
  1635. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  1636. struct etmv4_config *config = &drvdata->config;
  1637. spin_lock(&drvdata->spinlock);
  1638. val1 = config->vmid_mask0;
  1639. val2 = config->vmid_mask1;
  1640. spin_unlock(&drvdata->spinlock);
  1641. return scnprintf(buf, PAGE_SIZE, "%#lx %#lx\n", val1, val2);
  1642. }
  1643. static ssize_t vmid_masks_store(struct device *dev,
  1644. struct device_attribute *attr,
  1645. const char *buf, size_t size)
  1646. {
  1647. u8 i, j, maskbyte;
  1648. unsigned long val1, val2, mask;
  1649. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  1650. struct etmv4_config *config = &drvdata->config;
  1651. /*
  1652. * only implemented when vmid tracing is enabled, i.e. at least one
  1653. * vmid comparator is implemented and at least 8 bit vmid size
  1654. */
  1655. if (!drvdata->vmid_size || !drvdata->numvmidc)
  1656. return -EINVAL;
  1657. if (sscanf(buf, "%lx %lx", &val1, &val2) != 2)
  1658. return -EINVAL;
  1659. spin_lock(&drvdata->spinlock);
  1660. /*
  1661. * each byte[0..3] controls mask value applied to vmid
  1662. * comparator[0..3]
  1663. */
  1664. switch (drvdata->numvmidc) {
  1665. case 0x1:
  1666. /* COMP0, bits[7:0] */
  1667. config->vmid_mask0 = val1 & 0xFF;
  1668. break;
  1669. case 0x2:
  1670. /* COMP1, bits[15:8] */
  1671. config->vmid_mask0 = val1 & 0xFFFF;
  1672. break;
  1673. case 0x3:
  1674. /* COMP2, bits[23:16] */
  1675. config->vmid_mask0 = val1 & 0xFFFFFF;
  1676. break;
  1677. case 0x4:
  1678. /* COMP3, bits[31:24] */
  1679. config->vmid_mask0 = val1;
  1680. break;
  1681. case 0x5:
  1682. /* COMP4, bits[7:0] */
  1683. config->vmid_mask0 = val1;
  1684. config->vmid_mask1 = val2 & 0xFF;
  1685. break;
  1686. case 0x6:
  1687. /* COMP5, bits[15:8] */
  1688. config->vmid_mask0 = val1;
  1689. config->vmid_mask1 = val2 & 0xFFFF;
  1690. break;
  1691. case 0x7:
  1692. /* COMP6, bits[23:16] */
  1693. config->vmid_mask0 = val1;
  1694. config->vmid_mask1 = val2 & 0xFFFFFF;
  1695. break;
  1696. case 0x8:
  1697. /* COMP7, bits[31:24] */
  1698. config->vmid_mask0 = val1;
  1699. config->vmid_mask1 = val2;
  1700. break;
  1701. default:
  1702. break;
  1703. }
  1704. /*
  1705. * If software sets a mask bit to 1, it must program relevant byte
  1706. * of vmid comparator value 0x0, otherwise behavior is unpredictable.
  1707. * For example, if bit[3] of vmid_mask0 is 1, we must clear bits[31:24]
  1708. * of vmid comparator0 value (corresponding to byte 0) register.
  1709. */
  1710. mask = config->vmid_mask0;
  1711. for (i = 0; i < drvdata->numvmidc; i++) {
  1712. /* mask value of corresponding vmid comparator */
  1713. maskbyte = mask & ETMv4_EVENT_MASK;
  1714. /*
  1715. * each bit corresponds to a byte of respective vmid comparator
  1716. * value register
  1717. */
  1718. for (j = 0; j < 8; j++) {
  1719. if (maskbyte & 1)
  1720. config->vmid_val[i] &= ~(0xFFUL << (j * 8));
  1721. maskbyte >>= 1;
  1722. }
  1723. /* Select the next vmid comparator mask value */
  1724. if (i == 3)
  1725. /* vmid comparators[4-7] */
  1726. mask = config->vmid_mask1;
  1727. else
  1728. mask >>= 0x8;
  1729. }
  1730. spin_unlock(&drvdata->spinlock);
  1731. return size;
  1732. }
  1733. static DEVICE_ATTR_RW(vmid_masks);
  1734. static ssize_t cpu_show(struct device *dev,
  1735. struct device_attribute *attr, char *buf)
  1736. {
  1737. int val;
  1738. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
  1739. val = drvdata->cpu;
  1740. return scnprintf(buf, PAGE_SIZE, "%d\n", val);
  1741. }
  1742. static DEVICE_ATTR_RO(cpu);
  1743. static struct attribute *coresight_etmv4_attrs[] = {
  1744. &dev_attr_nr_pe_cmp.attr,
  1745. &dev_attr_nr_addr_cmp.attr,
  1746. &dev_attr_nr_cntr.attr,
  1747. &dev_attr_nr_ext_inp.attr,
  1748. &dev_attr_numcidc.attr,
  1749. &dev_attr_numvmidc.attr,
  1750. &dev_attr_nrseqstate.attr,
  1751. &dev_attr_nr_resource.attr,
  1752. &dev_attr_nr_ss_cmp.attr,
  1753. &dev_attr_reset.attr,
  1754. &dev_attr_mode.attr,
  1755. &dev_attr_pe.attr,
  1756. &dev_attr_event.attr,
  1757. &dev_attr_event_instren.attr,
  1758. &dev_attr_event_ts.attr,
  1759. &dev_attr_syncfreq.attr,
  1760. &dev_attr_cyc_threshold.attr,
  1761. &dev_attr_bb_ctrl.attr,
  1762. &dev_attr_event_vinst.attr,
  1763. &dev_attr_s_exlevel_vinst.attr,
  1764. &dev_attr_ns_exlevel_vinst.attr,
  1765. &dev_attr_addr_idx.attr,
  1766. &dev_attr_addr_instdatatype.attr,
  1767. &dev_attr_addr_single.attr,
  1768. &dev_attr_addr_range.attr,
  1769. &dev_attr_addr_start.attr,
  1770. &dev_attr_addr_stop.attr,
  1771. &dev_attr_addr_ctxtype.attr,
  1772. &dev_attr_addr_context.attr,
  1773. &dev_attr_seq_idx.attr,
  1774. &dev_attr_seq_state.attr,
  1775. &dev_attr_seq_event.attr,
  1776. &dev_attr_seq_reset_event.attr,
  1777. &dev_attr_cntr_idx.attr,
  1778. &dev_attr_cntrldvr.attr,
  1779. &dev_attr_cntr_val.attr,
  1780. &dev_attr_cntr_ctrl.attr,
  1781. &dev_attr_res_idx.attr,
  1782. &dev_attr_res_ctrl.attr,
  1783. &dev_attr_ctxid_idx.attr,
  1784. &dev_attr_ctxid_pid.attr,
  1785. &dev_attr_ctxid_masks.attr,
  1786. &dev_attr_vmid_idx.attr,
  1787. &dev_attr_vmid_val.attr,
  1788. &dev_attr_vmid_masks.attr,
  1789. &dev_attr_cpu.attr,
  1790. NULL,
  1791. };
  1792. struct etmv4_reg {
  1793. void __iomem *addr;
  1794. u32 data;
  1795. };
  1796. static void do_smp_cross_read(void *data)
  1797. {
  1798. struct etmv4_reg *reg = data;
  1799. reg->data = readl_relaxed(reg->addr);
  1800. }
  1801. static u32 etmv4_cross_read(const struct device *dev, u32 offset)
  1802. {
  1803. struct etmv4_drvdata *drvdata = dev_get_drvdata(dev);
  1804. struct etmv4_reg reg;
  1805. reg.addr = drvdata->base + offset;
  1806. /*
  1807. * smp cross call ensures the CPU will be powered up before
  1808. * accessing the ETMv4 trace core registers
  1809. */
  1810. smp_call_function_single(drvdata->cpu, do_smp_cross_read, &reg, 1);
  1811. return reg.data;
  1812. }
  1813. #define coresight_etm4x_reg(name, offset) \
  1814. coresight_simple_reg32(struct etmv4_drvdata, name, offset)
  1815. #define coresight_etm4x_cross_read(name, offset) \
  1816. coresight_simple_func(struct etmv4_drvdata, etmv4_cross_read, \
  1817. name, offset)
  1818. coresight_etm4x_reg(trcpdcr, TRCPDCR);
  1819. coresight_etm4x_reg(trcpdsr, TRCPDSR);
  1820. coresight_etm4x_reg(trclsr, TRCLSR);
  1821. coresight_etm4x_reg(trcauthstatus, TRCAUTHSTATUS);
  1822. coresight_etm4x_reg(trcdevid, TRCDEVID);
  1823. coresight_etm4x_reg(trcdevtype, TRCDEVTYPE);
  1824. coresight_etm4x_reg(trcpidr0, TRCPIDR0);
  1825. coresight_etm4x_reg(trcpidr1, TRCPIDR1);
  1826. coresight_etm4x_reg(trcpidr2, TRCPIDR2);
  1827. coresight_etm4x_reg(trcpidr3, TRCPIDR3);
  1828. coresight_etm4x_cross_read(trcoslsr, TRCOSLSR);
  1829. coresight_etm4x_cross_read(trcconfig, TRCCONFIGR);
  1830. coresight_etm4x_cross_read(trctraceid, TRCTRACEIDR);
  1831. static struct attribute *coresight_etmv4_mgmt_attrs[] = {
  1832. &dev_attr_trcoslsr.attr,
  1833. &dev_attr_trcpdcr.attr,
  1834. &dev_attr_trcpdsr.attr,
  1835. &dev_attr_trclsr.attr,
  1836. &dev_attr_trcconfig.attr,
  1837. &dev_attr_trctraceid.attr,
  1838. &dev_attr_trcauthstatus.attr,
  1839. &dev_attr_trcdevid.attr,
  1840. &dev_attr_trcdevtype.attr,
  1841. &dev_attr_trcpidr0.attr,
  1842. &dev_attr_trcpidr1.attr,
  1843. &dev_attr_trcpidr2.attr,
  1844. &dev_attr_trcpidr3.attr,
  1845. NULL,
  1846. };
  1847. coresight_etm4x_cross_read(trcidr0, TRCIDR0);
  1848. coresight_etm4x_cross_read(trcidr1, TRCIDR1);
  1849. coresight_etm4x_cross_read(trcidr2, TRCIDR2);
  1850. coresight_etm4x_cross_read(trcidr3, TRCIDR3);
  1851. coresight_etm4x_cross_read(trcidr4, TRCIDR4);
  1852. coresight_etm4x_cross_read(trcidr5, TRCIDR5);
  1853. /* trcidr[6,7] are reserved */
  1854. coresight_etm4x_cross_read(trcidr8, TRCIDR8);
  1855. coresight_etm4x_cross_read(trcidr9, TRCIDR9);
  1856. coresight_etm4x_cross_read(trcidr10, TRCIDR10);
  1857. coresight_etm4x_cross_read(trcidr11, TRCIDR11);
  1858. coresight_etm4x_cross_read(trcidr12, TRCIDR12);
  1859. coresight_etm4x_cross_read(trcidr13, TRCIDR13);
  1860. static struct attribute *coresight_etmv4_trcidr_attrs[] = {
  1861. &dev_attr_trcidr0.attr,
  1862. &dev_attr_trcidr1.attr,
  1863. &dev_attr_trcidr2.attr,
  1864. &dev_attr_trcidr3.attr,
  1865. &dev_attr_trcidr4.attr,
  1866. &dev_attr_trcidr5.attr,
  1867. /* trcidr[6,7] are reserved */
  1868. &dev_attr_trcidr8.attr,
  1869. &dev_attr_trcidr9.attr,
  1870. &dev_attr_trcidr10.attr,
  1871. &dev_attr_trcidr11.attr,
  1872. &dev_attr_trcidr12.attr,
  1873. &dev_attr_trcidr13.attr,
  1874. NULL,
  1875. };
  1876. static const struct attribute_group coresight_etmv4_group = {
  1877. .attrs = coresight_etmv4_attrs,
  1878. };
  1879. static const struct attribute_group coresight_etmv4_mgmt_group = {
  1880. .attrs = coresight_etmv4_mgmt_attrs,
  1881. .name = "mgmt",
  1882. };
  1883. static const struct attribute_group coresight_etmv4_trcidr_group = {
  1884. .attrs = coresight_etmv4_trcidr_attrs,
  1885. .name = "trcidr",
  1886. };
  1887. const struct attribute_group *coresight_etmv4_groups[] = {
  1888. &coresight_etmv4_group,
  1889. &coresight_etmv4_mgmt_group,
  1890. &coresight_etmv4_trcidr_group,
  1891. NULL,
  1892. };