coresight-etm3x-sysfs.c 32 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272
  1. /*
  2. * Copyright(C) 2015 Linaro Limited. All rights reserved.
  3. * Author: Mathieu Poirier <mathieu.poirier@linaro.org>
  4. *
  5. * This program is free software; you can redistribute it and/or modify it
  6. * under the terms of the GNU General Public License version 2 as published by
  7. * the Free Software Foundation.
  8. *
  9. * This program is distributed in the hope that it will be useful, but WITHOUT
  10. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  12. * more details.
  13. *
  14. * You should have received a copy of the GNU General Public License along with
  15. * this program. If not, see <http://www.gnu.org/licenses/>.
  16. */
  17. #include <linux/pm_runtime.h>
  18. #include <linux/sysfs.h>
  19. #include "coresight-etm.h"
  20. static ssize_t nr_addr_cmp_show(struct device *dev,
  21. struct device_attribute *attr, char *buf)
  22. {
  23. unsigned long val;
  24. struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
  25. val = drvdata->nr_addr_cmp;
  26. return sprintf(buf, "%#lx\n", val);
  27. }
  28. static DEVICE_ATTR_RO(nr_addr_cmp);
  29. static ssize_t nr_cntr_show(struct device *dev,
  30. struct device_attribute *attr, char *buf)
  31. { unsigned long val;
  32. struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
  33. val = drvdata->nr_cntr;
  34. return sprintf(buf, "%#lx\n", val);
  35. }
  36. static DEVICE_ATTR_RO(nr_cntr);
  37. static ssize_t nr_ctxid_cmp_show(struct device *dev,
  38. struct device_attribute *attr, char *buf)
  39. {
  40. unsigned long val;
  41. struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
  42. val = drvdata->nr_ctxid_cmp;
  43. return sprintf(buf, "%#lx\n", val);
  44. }
  45. static DEVICE_ATTR_RO(nr_ctxid_cmp);
  46. static ssize_t etmsr_show(struct device *dev,
  47. struct device_attribute *attr, char *buf)
  48. {
  49. unsigned long flags, val;
  50. struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
  51. pm_runtime_get_sync(drvdata->dev);
  52. spin_lock_irqsave(&drvdata->spinlock, flags);
  53. CS_UNLOCK(drvdata->base);
  54. val = etm_readl(drvdata, ETMSR);
  55. CS_LOCK(drvdata->base);
  56. spin_unlock_irqrestore(&drvdata->spinlock, flags);
  57. pm_runtime_put(drvdata->dev);
  58. return sprintf(buf, "%#lx\n", val);
  59. }
  60. static DEVICE_ATTR_RO(etmsr);
  61. static ssize_t reset_store(struct device *dev,
  62. struct device_attribute *attr,
  63. const char *buf, size_t size)
  64. {
  65. int i, ret;
  66. unsigned long val;
  67. struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
  68. struct etm_config *config = &drvdata->config;
  69. ret = kstrtoul(buf, 16, &val);
  70. if (ret)
  71. return ret;
  72. if (val) {
  73. spin_lock(&drvdata->spinlock);
  74. memset(config, 0, sizeof(struct etm_config));
  75. config->mode = ETM_MODE_EXCLUDE;
  76. config->trigger_event = ETM_DEFAULT_EVENT_VAL;
  77. for (i = 0; i < drvdata->nr_addr_cmp; i++) {
  78. config->addr_type[i] = ETM_ADDR_TYPE_NONE;
  79. }
  80. etm_set_default(config);
  81. spin_unlock(&drvdata->spinlock);
  82. }
  83. return size;
  84. }
  85. static DEVICE_ATTR_WO(reset);
  86. static ssize_t mode_show(struct device *dev,
  87. struct device_attribute *attr, char *buf)
  88. {
  89. unsigned long val;
  90. struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
  91. struct etm_config *config = &drvdata->config;
  92. val = config->mode;
  93. return sprintf(buf, "%#lx\n", val);
  94. }
  95. static ssize_t mode_store(struct device *dev,
  96. struct device_attribute *attr,
  97. const char *buf, size_t size)
  98. {
  99. int ret;
  100. unsigned long val;
  101. struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
  102. struct etm_config *config = &drvdata->config;
  103. ret = kstrtoul(buf, 16, &val);
  104. if (ret)
  105. return ret;
  106. spin_lock(&drvdata->spinlock);
  107. config->mode = val & ETM_MODE_ALL;
  108. if (config->mode & ETM_MODE_EXCLUDE)
  109. config->enable_ctrl1 |= ETMTECR1_INC_EXC;
  110. else
  111. config->enable_ctrl1 &= ~ETMTECR1_INC_EXC;
  112. if (config->mode & ETM_MODE_CYCACC)
  113. config->ctrl |= ETMCR_CYC_ACC;
  114. else
  115. config->ctrl &= ~ETMCR_CYC_ACC;
  116. if (config->mode & ETM_MODE_STALL) {
  117. if (!(drvdata->etmccr & ETMCCR_FIFOFULL)) {
  118. dev_warn(drvdata->dev, "stall mode not supported\n");
  119. ret = -EINVAL;
  120. goto err_unlock;
  121. }
  122. config->ctrl |= ETMCR_STALL_MODE;
  123. } else
  124. config->ctrl &= ~ETMCR_STALL_MODE;
  125. if (config->mode & ETM_MODE_TIMESTAMP) {
  126. if (!(drvdata->etmccer & ETMCCER_TIMESTAMP)) {
  127. dev_warn(drvdata->dev, "timestamp not supported\n");
  128. ret = -EINVAL;
  129. goto err_unlock;
  130. }
  131. config->ctrl |= ETMCR_TIMESTAMP_EN;
  132. } else
  133. config->ctrl &= ~ETMCR_TIMESTAMP_EN;
  134. if (config->mode & ETM_MODE_CTXID)
  135. config->ctrl |= ETMCR_CTXID_SIZE;
  136. else
  137. config->ctrl &= ~ETMCR_CTXID_SIZE;
  138. if (config->mode & (ETM_MODE_EXCL_KERN | ETM_MODE_EXCL_USER))
  139. etm_config_trace_mode(config);
  140. spin_unlock(&drvdata->spinlock);
  141. return size;
  142. err_unlock:
  143. spin_unlock(&drvdata->spinlock);
  144. return ret;
  145. }
  146. static DEVICE_ATTR_RW(mode);
  147. static ssize_t trigger_event_show(struct device *dev,
  148. struct device_attribute *attr, char *buf)
  149. {
  150. unsigned long val;
  151. struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
  152. struct etm_config *config = &drvdata->config;
  153. val = config->trigger_event;
  154. return sprintf(buf, "%#lx\n", val);
  155. }
  156. static ssize_t trigger_event_store(struct device *dev,
  157. struct device_attribute *attr,
  158. const char *buf, size_t size)
  159. {
  160. int ret;
  161. unsigned long val;
  162. struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
  163. struct etm_config *config = &drvdata->config;
  164. ret = kstrtoul(buf, 16, &val);
  165. if (ret)
  166. return ret;
  167. config->trigger_event = val & ETM_EVENT_MASK;
  168. return size;
  169. }
  170. static DEVICE_ATTR_RW(trigger_event);
  171. static ssize_t enable_event_show(struct device *dev,
  172. struct device_attribute *attr, char *buf)
  173. {
  174. unsigned long val;
  175. struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
  176. struct etm_config *config = &drvdata->config;
  177. val = config->enable_event;
  178. return sprintf(buf, "%#lx\n", val);
  179. }
  180. static ssize_t enable_event_store(struct device *dev,
  181. struct device_attribute *attr,
  182. const char *buf, size_t size)
  183. {
  184. int ret;
  185. unsigned long val;
  186. struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
  187. struct etm_config *config = &drvdata->config;
  188. ret = kstrtoul(buf, 16, &val);
  189. if (ret)
  190. return ret;
  191. config->enable_event = val & ETM_EVENT_MASK;
  192. return size;
  193. }
  194. static DEVICE_ATTR_RW(enable_event);
  195. static ssize_t fifofull_level_show(struct device *dev,
  196. struct device_attribute *attr, char *buf)
  197. {
  198. unsigned long val;
  199. struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
  200. struct etm_config *config = &drvdata->config;
  201. val = config->fifofull_level;
  202. return sprintf(buf, "%#lx\n", val);
  203. }
  204. static ssize_t fifofull_level_store(struct device *dev,
  205. struct device_attribute *attr,
  206. const char *buf, size_t size)
  207. {
  208. int ret;
  209. unsigned long val;
  210. struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
  211. struct etm_config *config = &drvdata->config;
  212. ret = kstrtoul(buf, 16, &val);
  213. if (ret)
  214. return ret;
  215. config->fifofull_level = val;
  216. return size;
  217. }
  218. static DEVICE_ATTR_RW(fifofull_level);
  219. static ssize_t addr_idx_show(struct device *dev,
  220. struct device_attribute *attr, char *buf)
  221. {
  222. unsigned long val;
  223. struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
  224. struct etm_config *config = &drvdata->config;
  225. val = config->addr_idx;
  226. return sprintf(buf, "%#lx\n", val);
  227. }
  228. static ssize_t addr_idx_store(struct device *dev,
  229. struct device_attribute *attr,
  230. const char *buf, size_t size)
  231. {
  232. int ret;
  233. unsigned long val;
  234. struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
  235. struct etm_config *config = &drvdata->config;
  236. ret = kstrtoul(buf, 16, &val);
  237. if (ret)
  238. return ret;
  239. if (val >= drvdata->nr_addr_cmp)
  240. return -EINVAL;
  241. /*
  242. * Use spinlock to ensure index doesn't change while it gets
  243. * dereferenced multiple times within a spinlock block elsewhere.
  244. */
  245. spin_lock(&drvdata->spinlock);
  246. config->addr_idx = val;
  247. spin_unlock(&drvdata->spinlock);
  248. return size;
  249. }
  250. static DEVICE_ATTR_RW(addr_idx);
  251. static ssize_t addr_single_show(struct device *dev,
  252. struct device_attribute *attr, char *buf)
  253. {
  254. u8 idx;
  255. unsigned long val;
  256. struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
  257. struct etm_config *config = &drvdata->config;
  258. spin_lock(&drvdata->spinlock);
  259. idx = config->addr_idx;
  260. if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
  261. config->addr_type[idx] == ETM_ADDR_TYPE_SINGLE)) {
  262. spin_unlock(&drvdata->spinlock);
  263. return -EINVAL;
  264. }
  265. val = config->addr_val[idx];
  266. spin_unlock(&drvdata->spinlock);
  267. return sprintf(buf, "%#lx\n", val);
  268. }
  269. static ssize_t addr_single_store(struct device *dev,
  270. struct device_attribute *attr,
  271. const char *buf, size_t size)
  272. {
  273. u8 idx;
  274. int ret;
  275. unsigned long val;
  276. struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
  277. struct etm_config *config = &drvdata->config;
  278. ret = kstrtoul(buf, 16, &val);
  279. if (ret)
  280. return ret;
  281. spin_lock(&drvdata->spinlock);
  282. idx = config->addr_idx;
  283. if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
  284. config->addr_type[idx] == ETM_ADDR_TYPE_SINGLE)) {
  285. spin_unlock(&drvdata->spinlock);
  286. return -EINVAL;
  287. }
  288. config->addr_val[idx] = val;
  289. config->addr_type[idx] = ETM_ADDR_TYPE_SINGLE;
  290. spin_unlock(&drvdata->spinlock);
  291. return size;
  292. }
  293. static DEVICE_ATTR_RW(addr_single);
  294. static ssize_t addr_range_show(struct device *dev,
  295. struct device_attribute *attr, char *buf)
  296. {
  297. u8 idx;
  298. unsigned long val1, val2;
  299. struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
  300. struct etm_config *config = &drvdata->config;
  301. spin_lock(&drvdata->spinlock);
  302. idx = config->addr_idx;
  303. if (idx % 2 != 0) {
  304. spin_unlock(&drvdata->spinlock);
  305. return -EPERM;
  306. }
  307. if (!((config->addr_type[idx] == ETM_ADDR_TYPE_NONE &&
  308. config->addr_type[idx + 1] == ETM_ADDR_TYPE_NONE) ||
  309. (config->addr_type[idx] == ETM_ADDR_TYPE_RANGE &&
  310. config->addr_type[idx + 1] == ETM_ADDR_TYPE_RANGE))) {
  311. spin_unlock(&drvdata->spinlock);
  312. return -EPERM;
  313. }
  314. val1 = config->addr_val[idx];
  315. val2 = config->addr_val[idx + 1];
  316. spin_unlock(&drvdata->spinlock);
  317. return sprintf(buf, "%#lx %#lx\n", val1, val2);
  318. }
  319. static ssize_t addr_range_store(struct device *dev,
  320. struct device_attribute *attr,
  321. const char *buf, size_t size)
  322. {
  323. u8 idx;
  324. unsigned long val1, val2;
  325. struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
  326. struct etm_config *config = &drvdata->config;
  327. if (sscanf(buf, "%lx %lx", &val1, &val2) != 2)
  328. return -EINVAL;
  329. /* Lower address comparator cannot have a higher address value */
  330. if (val1 > val2)
  331. return -EINVAL;
  332. spin_lock(&drvdata->spinlock);
  333. idx = config->addr_idx;
  334. if (idx % 2 != 0) {
  335. spin_unlock(&drvdata->spinlock);
  336. return -EPERM;
  337. }
  338. if (!((config->addr_type[idx] == ETM_ADDR_TYPE_NONE &&
  339. config->addr_type[idx + 1] == ETM_ADDR_TYPE_NONE) ||
  340. (config->addr_type[idx] == ETM_ADDR_TYPE_RANGE &&
  341. config->addr_type[idx + 1] == ETM_ADDR_TYPE_RANGE))) {
  342. spin_unlock(&drvdata->spinlock);
  343. return -EPERM;
  344. }
  345. config->addr_val[idx] = val1;
  346. config->addr_type[idx] = ETM_ADDR_TYPE_RANGE;
  347. config->addr_val[idx + 1] = val2;
  348. config->addr_type[idx + 1] = ETM_ADDR_TYPE_RANGE;
  349. config->enable_ctrl1 |= (1 << (idx/2));
  350. spin_unlock(&drvdata->spinlock);
  351. return size;
  352. }
  353. static DEVICE_ATTR_RW(addr_range);
  354. static ssize_t addr_start_show(struct device *dev,
  355. struct device_attribute *attr, char *buf)
  356. {
  357. u8 idx;
  358. unsigned long val;
  359. struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
  360. struct etm_config *config = &drvdata->config;
  361. spin_lock(&drvdata->spinlock);
  362. idx = config->addr_idx;
  363. if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
  364. config->addr_type[idx] == ETM_ADDR_TYPE_START)) {
  365. spin_unlock(&drvdata->spinlock);
  366. return -EPERM;
  367. }
  368. val = config->addr_val[idx];
  369. spin_unlock(&drvdata->spinlock);
  370. return sprintf(buf, "%#lx\n", val);
  371. }
  372. static ssize_t addr_start_store(struct device *dev,
  373. struct device_attribute *attr,
  374. const char *buf, size_t size)
  375. {
  376. u8 idx;
  377. int ret;
  378. unsigned long val;
  379. struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
  380. struct etm_config *config = &drvdata->config;
  381. ret = kstrtoul(buf, 16, &val);
  382. if (ret)
  383. return ret;
  384. spin_lock(&drvdata->spinlock);
  385. idx = config->addr_idx;
  386. if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
  387. config->addr_type[idx] == ETM_ADDR_TYPE_START)) {
  388. spin_unlock(&drvdata->spinlock);
  389. return -EPERM;
  390. }
  391. config->addr_val[idx] = val;
  392. config->addr_type[idx] = ETM_ADDR_TYPE_START;
  393. config->startstop_ctrl |= (1 << idx);
  394. config->enable_ctrl1 |= BIT(25);
  395. spin_unlock(&drvdata->spinlock);
  396. return size;
  397. }
  398. static DEVICE_ATTR_RW(addr_start);
  399. static ssize_t addr_stop_show(struct device *dev,
  400. struct device_attribute *attr, char *buf)
  401. {
  402. u8 idx;
  403. unsigned long val;
  404. struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
  405. struct etm_config *config = &drvdata->config;
  406. spin_lock(&drvdata->spinlock);
  407. idx = config->addr_idx;
  408. if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
  409. config->addr_type[idx] == ETM_ADDR_TYPE_STOP)) {
  410. spin_unlock(&drvdata->spinlock);
  411. return -EPERM;
  412. }
  413. val = config->addr_val[idx];
  414. spin_unlock(&drvdata->spinlock);
  415. return sprintf(buf, "%#lx\n", val);
  416. }
  417. static ssize_t addr_stop_store(struct device *dev,
  418. struct device_attribute *attr,
  419. const char *buf, size_t size)
  420. {
  421. u8 idx;
  422. int ret;
  423. unsigned long val;
  424. struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
  425. struct etm_config *config = &drvdata->config;
  426. ret = kstrtoul(buf, 16, &val);
  427. if (ret)
  428. return ret;
  429. spin_lock(&drvdata->spinlock);
  430. idx = config->addr_idx;
  431. if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
  432. config->addr_type[idx] == ETM_ADDR_TYPE_STOP)) {
  433. spin_unlock(&drvdata->spinlock);
  434. return -EPERM;
  435. }
  436. config->addr_val[idx] = val;
  437. config->addr_type[idx] = ETM_ADDR_TYPE_STOP;
  438. config->startstop_ctrl |= (1 << (idx + 16));
  439. config->enable_ctrl1 |= ETMTECR1_START_STOP;
  440. spin_unlock(&drvdata->spinlock);
  441. return size;
  442. }
  443. static DEVICE_ATTR_RW(addr_stop);
  444. static ssize_t addr_acctype_show(struct device *dev,
  445. struct device_attribute *attr, char *buf)
  446. {
  447. unsigned long val;
  448. struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
  449. struct etm_config *config = &drvdata->config;
  450. spin_lock(&drvdata->spinlock);
  451. val = config->addr_acctype[config->addr_idx];
  452. spin_unlock(&drvdata->spinlock);
  453. return sprintf(buf, "%#lx\n", val);
  454. }
  455. static ssize_t addr_acctype_store(struct device *dev,
  456. struct device_attribute *attr,
  457. const char *buf, size_t size)
  458. {
  459. int ret;
  460. unsigned long val;
  461. struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
  462. struct etm_config *config = &drvdata->config;
  463. ret = kstrtoul(buf, 16, &val);
  464. if (ret)
  465. return ret;
  466. spin_lock(&drvdata->spinlock);
  467. config->addr_acctype[config->addr_idx] = val;
  468. spin_unlock(&drvdata->spinlock);
  469. return size;
  470. }
  471. static DEVICE_ATTR_RW(addr_acctype);
  472. static ssize_t cntr_idx_show(struct device *dev,
  473. struct device_attribute *attr, char *buf)
  474. {
  475. unsigned long val;
  476. struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
  477. struct etm_config *config = &drvdata->config;
  478. val = config->cntr_idx;
  479. return sprintf(buf, "%#lx\n", val);
  480. }
  481. static ssize_t cntr_idx_store(struct device *dev,
  482. struct device_attribute *attr,
  483. const char *buf, size_t size)
  484. {
  485. int ret;
  486. unsigned long val;
  487. struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
  488. struct etm_config *config = &drvdata->config;
  489. ret = kstrtoul(buf, 16, &val);
  490. if (ret)
  491. return ret;
  492. if (val >= drvdata->nr_cntr)
  493. return -EINVAL;
  494. /*
  495. * Use spinlock to ensure index doesn't change while it gets
  496. * dereferenced multiple times within a spinlock block elsewhere.
  497. */
  498. spin_lock(&drvdata->spinlock);
  499. config->cntr_idx = val;
  500. spin_unlock(&drvdata->spinlock);
  501. return size;
  502. }
  503. static DEVICE_ATTR_RW(cntr_idx);
  504. static ssize_t cntr_rld_val_show(struct device *dev,
  505. struct device_attribute *attr, char *buf)
  506. {
  507. unsigned long val;
  508. struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
  509. struct etm_config *config = &drvdata->config;
  510. spin_lock(&drvdata->spinlock);
  511. val = config->cntr_rld_val[config->cntr_idx];
  512. spin_unlock(&drvdata->spinlock);
  513. return sprintf(buf, "%#lx\n", val);
  514. }
  515. static ssize_t cntr_rld_val_store(struct device *dev,
  516. struct device_attribute *attr,
  517. const char *buf, size_t size)
  518. {
  519. int ret;
  520. unsigned long val;
  521. struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
  522. struct etm_config *config = &drvdata->config;
  523. ret = kstrtoul(buf, 16, &val);
  524. if (ret)
  525. return ret;
  526. spin_lock(&drvdata->spinlock);
  527. config->cntr_rld_val[config->cntr_idx] = val;
  528. spin_unlock(&drvdata->spinlock);
  529. return size;
  530. }
  531. static DEVICE_ATTR_RW(cntr_rld_val);
  532. static ssize_t cntr_event_show(struct device *dev,
  533. struct device_attribute *attr, char *buf)
  534. {
  535. unsigned long val;
  536. struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
  537. struct etm_config *config = &drvdata->config;
  538. spin_lock(&drvdata->spinlock);
  539. val = config->cntr_event[config->cntr_idx];
  540. spin_unlock(&drvdata->spinlock);
  541. return sprintf(buf, "%#lx\n", val);
  542. }
  543. static ssize_t cntr_event_store(struct device *dev,
  544. struct device_attribute *attr,
  545. const char *buf, size_t size)
  546. {
  547. int ret;
  548. unsigned long val;
  549. struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
  550. struct etm_config *config = &drvdata->config;
  551. ret = kstrtoul(buf, 16, &val);
  552. if (ret)
  553. return ret;
  554. spin_lock(&drvdata->spinlock);
  555. config->cntr_event[config->cntr_idx] = val & ETM_EVENT_MASK;
  556. spin_unlock(&drvdata->spinlock);
  557. return size;
  558. }
  559. static DEVICE_ATTR_RW(cntr_event);
  560. static ssize_t cntr_rld_event_show(struct device *dev,
  561. struct device_attribute *attr, char *buf)
  562. {
  563. unsigned long val;
  564. struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
  565. struct etm_config *config = &drvdata->config;
  566. spin_lock(&drvdata->spinlock);
  567. val = config->cntr_rld_event[config->cntr_idx];
  568. spin_unlock(&drvdata->spinlock);
  569. return sprintf(buf, "%#lx\n", val);
  570. }
  571. static ssize_t cntr_rld_event_store(struct device *dev,
  572. struct device_attribute *attr,
  573. const char *buf, size_t size)
  574. {
  575. int ret;
  576. unsigned long val;
  577. struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
  578. struct etm_config *config = &drvdata->config;
  579. ret = kstrtoul(buf, 16, &val);
  580. if (ret)
  581. return ret;
  582. spin_lock(&drvdata->spinlock);
  583. config->cntr_rld_event[config->cntr_idx] = val & ETM_EVENT_MASK;
  584. spin_unlock(&drvdata->spinlock);
  585. return size;
  586. }
  587. static DEVICE_ATTR_RW(cntr_rld_event);
  588. static ssize_t cntr_val_show(struct device *dev,
  589. struct device_attribute *attr, char *buf)
  590. {
  591. int i, ret = 0;
  592. u32 val;
  593. struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
  594. struct etm_config *config = &drvdata->config;
  595. if (!local_read(&drvdata->mode)) {
  596. spin_lock(&drvdata->spinlock);
  597. for (i = 0; i < drvdata->nr_cntr; i++)
  598. ret += sprintf(buf, "counter %d: %x\n",
  599. i, config->cntr_val[i]);
  600. spin_unlock(&drvdata->spinlock);
  601. return ret;
  602. }
  603. for (i = 0; i < drvdata->nr_cntr; i++) {
  604. val = etm_readl(drvdata, ETMCNTVRn(i));
  605. ret += sprintf(buf, "counter %d: %x\n", i, val);
  606. }
  607. return ret;
  608. }
  609. static ssize_t cntr_val_store(struct device *dev,
  610. struct device_attribute *attr,
  611. const char *buf, size_t size)
  612. {
  613. int ret;
  614. unsigned long val;
  615. struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
  616. struct etm_config *config = &drvdata->config;
  617. ret = kstrtoul(buf, 16, &val);
  618. if (ret)
  619. return ret;
  620. spin_lock(&drvdata->spinlock);
  621. config->cntr_val[config->cntr_idx] = val;
  622. spin_unlock(&drvdata->spinlock);
  623. return size;
  624. }
  625. static DEVICE_ATTR_RW(cntr_val);
  626. static ssize_t seq_12_event_show(struct device *dev,
  627. struct device_attribute *attr, char *buf)
  628. {
  629. unsigned long val;
  630. struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
  631. struct etm_config *config = &drvdata->config;
  632. val = config->seq_12_event;
  633. return sprintf(buf, "%#lx\n", val);
  634. }
  635. static ssize_t seq_12_event_store(struct device *dev,
  636. struct device_attribute *attr,
  637. const char *buf, size_t size)
  638. {
  639. int ret;
  640. unsigned long val;
  641. struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
  642. struct etm_config *config = &drvdata->config;
  643. ret = kstrtoul(buf, 16, &val);
  644. if (ret)
  645. return ret;
  646. config->seq_12_event = val & ETM_EVENT_MASK;
  647. return size;
  648. }
  649. static DEVICE_ATTR_RW(seq_12_event);
  650. static ssize_t seq_21_event_show(struct device *dev,
  651. struct device_attribute *attr, char *buf)
  652. {
  653. unsigned long val;
  654. struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
  655. struct etm_config *config = &drvdata->config;
  656. val = config->seq_21_event;
  657. return sprintf(buf, "%#lx\n", val);
  658. }
  659. static ssize_t seq_21_event_store(struct device *dev,
  660. struct device_attribute *attr,
  661. const char *buf, size_t size)
  662. {
  663. int ret;
  664. unsigned long val;
  665. struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
  666. struct etm_config *config = &drvdata->config;
  667. ret = kstrtoul(buf, 16, &val);
  668. if (ret)
  669. return ret;
  670. config->seq_21_event = val & ETM_EVENT_MASK;
  671. return size;
  672. }
  673. static DEVICE_ATTR_RW(seq_21_event);
  674. static ssize_t seq_23_event_show(struct device *dev,
  675. struct device_attribute *attr, char *buf)
  676. {
  677. unsigned long val;
  678. struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
  679. struct etm_config *config = &drvdata->config;
  680. val = config->seq_23_event;
  681. return sprintf(buf, "%#lx\n", val);
  682. }
  683. static ssize_t seq_23_event_store(struct device *dev,
  684. struct device_attribute *attr,
  685. const char *buf, size_t size)
  686. {
  687. int ret;
  688. unsigned long val;
  689. struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
  690. struct etm_config *config = &drvdata->config;
  691. ret = kstrtoul(buf, 16, &val);
  692. if (ret)
  693. return ret;
  694. config->seq_23_event = val & ETM_EVENT_MASK;
  695. return size;
  696. }
  697. static DEVICE_ATTR_RW(seq_23_event);
  698. static ssize_t seq_31_event_show(struct device *dev,
  699. struct device_attribute *attr, char *buf)
  700. {
  701. unsigned long val;
  702. struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
  703. struct etm_config *config = &drvdata->config;
  704. val = config->seq_31_event;
  705. return sprintf(buf, "%#lx\n", val);
  706. }
  707. static ssize_t seq_31_event_store(struct device *dev,
  708. struct device_attribute *attr,
  709. const char *buf, size_t size)
  710. {
  711. int ret;
  712. unsigned long val;
  713. struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
  714. struct etm_config *config = &drvdata->config;
  715. ret = kstrtoul(buf, 16, &val);
  716. if (ret)
  717. return ret;
  718. config->seq_31_event = val & ETM_EVENT_MASK;
  719. return size;
  720. }
  721. static DEVICE_ATTR_RW(seq_31_event);
  722. static ssize_t seq_32_event_show(struct device *dev,
  723. struct device_attribute *attr, char *buf)
  724. {
  725. unsigned long val;
  726. struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
  727. struct etm_config *config = &drvdata->config;
  728. val = config->seq_32_event;
  729. return sprintf(buf, "%#lx\n", val);
  730. }
  731. static ssize_t seq_32_event_store(struct device *dev,
  732. struct device_attribute *attr,
  733. const char *buf, size_t size)
  734. {
  735. int ret;
  736. unsigned long val;
  737. struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
  738. struct etm_config *config = &drvdata->config;
  739. ret = kstrtoul(buf, 16, &val);
  740. if (ret)
  741. return ret;
  742. config->seq_32_event = val & ETM_EVENT_MASK;
  743. return size;
  744. }
  745. static DEVICE_ATTR_RW(seq_32_event);
  746. static ssize_t seq_13_event_show(struct device *dev,
  747. struct device_attribute *attr, char *buf)
  748. {
  749. unsigned long val;
  750. struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
  751. struct etm_config *config = &drvdata->config;
  752. val = config->seq_13_event;
  753. return sprintf(buf, "%#lx\n", val);
  754. }
  755. static ssize_t seq_13_event_store(struct device *dev,
  756. struct device_attribute *attr,
  757. const char *buf, size_t size)
  758. {
  759. int ret;
  760. unsigned long val;
  761. struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
  762. struct etm_config *config = &drvdata->config;
  763. ret = kstrtoul(buf, 16, &val);
  764. if (ret)
  765. return ret;
  766. config->seq_13_event = val & ETM_EVENT_MASK;
  767. return size;
  768. }
  769. static DEVICE_ATTR_RW(seq_13_event);
  770. static ssize_t seq_curr_state_show(struct device *dev,
  771. struct device_attribute *attr, char *buf)
  772. {
  773. unsigned long val, flags;
  774. struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
  775. struct etm_config *config = &drvdata->config;
  776. if (!local_read(&drvdata->mode)) {
  777. val = config->seq_curr_state;
  778. goto out;
  779. }
  780. pm_runtime_get_sync(drvdata->dev);
  781. spin_lock_irqsave(&drvdata->spinlock, flags);
  782. CS_UNLOCK(drvdata->base);
  783. val = (etm_readl(drvdata, ETMSQR) & ETM_SQR_MASK);
  784. CS_LOCK(drvdata->base);
  785. spin_unlock_irqrestore(&drvdata->spinlock, flags);
  786. pm_runtime_put(drvdata->dev);
  787. out:
  788. return sprintf(buf, "%#lx\n", val);
  789. }
  790. static ssize_t seq_curr_state_store(struct device *dev,
  791. struct device_attribute *attr,
  792. const char *buf, size_t size)
  793. {
  794. int ret;
  795. unsigned long val;
  796. struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
  797. struct etm_config *config = &drvdata->config;
  798. ret = kstrtoul(buf, 16, &val);
  799. if (ret)
  800. return ret;
  801. if (val > ETM_SEQ_STATE_MAX_VAL)
  802. return -EINVAL;
  803. config->seq_curr_state = val;
  804. return size;
  805. }
  806. static DEVICE_ATTR_RW(seq_curr_state);
  807. static ssize_t ctxid_idx_show(struct device *dev,
  808. struct device_attribute *attr, char *buf)
  809. {
  810. unsigned long val;
  811. struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
  812. struct etm_config *config = &drvdata->config;
  813. val = config->ctxid_idx;
  814. return sprintf(buf, "%#lx\n", val);
  815. }
  816. static ssize_t ctxid_idx_store(struct device *dev,
  817. struct device_attribute *attr,
  818. const char *buf, size_t size)
  819. {
  820. int ret;
  821. unsigned long val;
  822. struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
  823. struct etm_config *config = &drvdata->config;
  824. ret = kstrtoul(buf, 16, &val);
  825. if (ret)
  826. return ret;
  827. if (val >= drvdata->nr_ctxid_cmp)
  828. return -EINVAL;
  829. /*
  830. * Use spinlock to ensure index doesn't change while it gets
  831. * dereferenced multiple times within a spinlock block elsewhere.
  832. */
  833. spin_lock(&drvdata->spinlock);
  834. config->ctxid_idx = val;
  835. spin_unlock(&drvdata->spinlock);
  836. return size;
  837. }
  838. static DEVICE_ATTR_RW(ctxid_idx);
  839. static ssize_t ctxid_pid_show(struct device *dev,
  840. struct device_attribute *attr, char *buf)
  841. {
  842. unsigned long val;
  843. struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
  844. struct etm_config *config = &drvdata->config;
  845. spin_lock(&drvdata->spinlock);
  846. val = config->ctxid_vpid[config->ctxid_idx];
  847. spin_unlock(&drvdata->spinlock);
  848. return sprintf(buf, "%#lx\n", val);
  849. }
  850. static ssize_t ctxid_pid_store(struct device *dev,
  851. struct device_attribute *attr,
  852. const char *buf, size_t size)
  853. {
  854. int ret;
  855. unsigned long vpid, pid;
  856. struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
  857. struct etm_config *config = &drvdata->config;
  858. ret = kstrtoul(buf, 16, &vpid);
  859. if (ret)
  860. return ret;
  861. pid = coresight_vpid_to_pid(vpid);
  862. spin_lock(&drvdata->spinlock);
  863. config->ctxid_pid[config->ctxid_idx] = pid;
  864. config->ctxid_vpid[config->ctxid_idx] = vpid;
  865. spin_unlock(&drvdata->spinlock);
  866. return size;
  867. }
  868. static DEVICE_ATTR_RW(ctxid_pid);
  869. static ssize_t ctxid_mask_show(struct device *dev,
  870. struct device_attribute *attr, char *buf)
  871. {
  872. unsigned long val;
  873. struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
  874. struct etm_config *config = &drvdata->config;
  875. val = config->ctxid_mask;
  876. return sprintf(buf, "%#lx\n", val);
  877. }
  878. static ssize_t ctxid_mask_store(struct device *dev,
  879. struct device_attribute *attr,
  880. const char *buf, size_t size)
  881. {
  882. int ret;
  883. unsigned long val;
  884. struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
  885. struct etm_config *config = &drvdata->config;
  886. ret = kstrtoul(buf, 16, &val);
  887. if (ret)
  888. return ret;
  889. config->ctxid_mask = val;
  890. return size;
  891. }
  892. static DEVICE_ATTR_RW(ctxid_mask);
  893. static ssize_t sync_freq_show(struct device *dev,
  894. struct device_attribute *attr, char *buf)
  895. {
  896. unsigned long val;
  897. struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
  898. struct etm_config *config = &drvdata->config;
  899. val = config->sync_freq;
  900. return sprintf(buf, "%#lx\n", val);
  901. }
  902. static ssize_t sync_freq_store(struct device *dev,
  903. struct device_attribute *attr,
  904. const char *buf, size_t size)
  905. {
  906. int ret;
  907. unsigned long val;
  908. struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
  909. struct etm_config *config = &drvdata->config;
  910. ret = kstrtoul(buf, 16, &val);
  911. if (ret)
  912. return ret;
  913. config->sync_freq = val & ETM_SYNC_MASK;
  914. return size;
  915. }
  916. static DEVICE_ATTR_RW(sync_freq);
  917. static ssize_t timestamp_event_show(struct device *dev,
  918. struct device_attribute *attr, char *buf)
  919. {
  920. unsigned long val;
  921. struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
  922. struct etm_config *config = &drvdata->config;
  923. val = config->timestamp_event;
  924. return sprintf(buf, "%#lx\n", val);
  925. }
  926. static ssize_t timestamp_event_store(struct device *dev,
  927. struct device_attribute *attr,
  928. const char *buf, size_t size)
  929. {
  930. int ret;
  931. unsigned long val;
  932. struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
  933. struct etm_config *config = &drvdata->config;
  934. ret = kstrtoul(buf, 16, &val);
  935. if (ret)
  936. return ret;
  937. config->timestamp_event = val & ETM_EVENT_MASK;
  938. return size;
  939. }
  940. static DEVICE_ATTR_RW(timestamp_event);
  941. static ssize_t cpu_show(struct device *dev,
  942. struct device_attribute *attr, char *buf)
  943. {
  944. int val;
  945. struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
  946. val = drvdata->cpu;
  947. return scnprintf(buf, PAGE_SIZE, "%d\n", val);
  948. }
  949. static DEVICE_ATTR_RO(cpu);
  950. static ssize_t traceid_show(struct device *dev,
  951. struct device_attribute *attr, char *buf)
  952. {
  953. unsigned long val;
  954. struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
  955. val = etm_get_trace_id(drvdata);
  956. return sprintf(buf, "%#lx\n", val);
  957. }
  958. static ssize_t traceid_store(struct device *dev,
  959. struct device_attribute *attr,
  960. const char *buf, size_t size)
  961. {
  962. int ret;
  963. unsigned long val;
  964. struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
  965. ret = kstrtoul(buf, 16, &val);
  966. if (ret)
  967. return ret;
  968. drvdata->traceid = val & ETM_TRACEID_MASK;
  969. return size;
  970. }
  971. static DEVICE_ATTR_RW(traceid);
  972. static struct attribute *coresight_etm_attrs[] = {
  973. &dev_attr_nr_addr_cmp.attr,
  974. &dev_attr_nr_cntr.attr,
  975. &dev_attr_nr_ctxid_cmp.attr,
  976. &dev_attr_etmsr.attr,
  977. &dev_attr_reset.attr,
  978. &dev_attr_mode.attr,
  979. &dev_attr_trigger_event.attr,
  980. &dev_attr_enable_event.attr,
  981. &dev_attr_fifofull_level.attr,
  982. &dev_attr_addr_idx.attr,
  983. &dev_attr_addr_single.attr,
  984. &dev_attr_addr_range.attr,
  985. &dev_attr_addr_start.attr,
  986. &dev_attr_addr_stop.attr,
  987. &dev_attr_addr_acctype.attr,
  988. &dev_attr_cntr_idx.attr,
  989. &dev_attr_cntr_rld_val.attr,
  990. &dev_attr_cntr_event.attr,
  991. &dev_attr_cntr_rld_event.attr,
  992. &dev_attr_cntr_val.attr,
  993. &dev_attr_seq_12_event.attr,
  994. &dev_attr_seq_21_event.attr,
  995. &dev_attr_seq_23_event.attr,
  996. &dev_attr_seq_31_event.attr,
  997. &dev_attr_seq_32_event.attr,
  998. &dev_attr_seq_13_event.attr,
  999. &dev_attr_seq_curr_state.attr,
  1000. &dev_attr_ctxid_idx.attr,
  1001. &dev_attr_ctxid_pid.attr,
  1002. &dev_attr_ctxid_mask.attr,
  1003. &dev_attr_sync_freq.attr,
  1004. &dev_attr_timestamp_event.attr,
  1005. &dev_attr_traceid.attr,
  1006. &dev_attr_cpu.attr,
  1007. NULL,
  1008. };
  1009. #define coresight_simple_func(name, offset) \
  1010. static ssize_t name##_show(struct device *_dev, \
  1011. struct device_attribute *attr, char *buf) \
  1012. { \
  1013. struct etm_drvdata *drvdata = dev_get_drvdata(_dev->parent); \
  1014. return scnprintf(buf, PAGE_SIZE, "0x%x\n", \
  1015. readl_relaxed(drvdata->base + offset)); \
  1016. } \
  1017. DEVICE_ATTR_RO(name)
  1018. coresight_simple_func(etmccr, ETMCCR);
  1019. coresight_simple_func(etmccer, ETMCCER);
  1020. coresight_simple_func(etmscr, ETMSCR);
  1021. coresight_simple_func(etmidr, ETMIDR);
  1022. coresight_simple_func(etmcr, ETMCR);
  1023. coresight_simple_func(etmtraceidr, ETMTRACEIDR);
  1024. coresight_simple_func(etmteevr, ETMTEEVR);
  1025. coresight_simple_func(etmtssvr, ETMTSSCR);
  1026. coresight_simple_func(etmtecr1, ETMTECR1);
  1027. coresight_simple_func(etmtecr2, ETMTECR2);
  1028. static struct attribute *coresight_etm_mgmt_attrs[] = {
  1029. &dev_attr_etmccr.attr,
  1030. &dev_attr_etmccer.attr,
  1031. &dev_attr_etmscr.attr,
  1032. &dev_attr_etmidr.attr,
  1033. &dev_attr_etmcr.attr,
  1034. &dev_attr_etmtraceidr.attr,
  1035. &dev_attr_etmteevr.attr,
  1036. &dev_attr_etmtssvr.attr,
  1037. &dev_attr_etmtecr1.attr,
  1038. &dev_attr_etmtecr2.attr,
  1039. NULL,
  1040. };
  1041. static const struct attribute_group coresight_etm_group = {
  1042. .attrs = coresight_etm_attrs,
  1043. };
  1044. static const struct attribute_group coresight_etm_mgmt_group = {
  1045. .attrs = coresight_etm_mgmt_attrs,
  1046. .name = "mgmt",
  1047. };
  1048. const struct attribute_group *coresight_etm_groups[] = {
  1049. &coresight_etm_group,
  1050. &coresight_etm_mgmt_group,
  1051. NULL,
  1052. };