coresight-etm3x-sysfs.c 31 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright(C) 2015 Linaro Limited. All rights reserved.
  4. * Author: Mathieu Poirier <mathieu.poirier@linaro.org>
  5. */
  6. #include <linux/pm_runtime.h>
  7. #include <linux/sysfs.h>
  8. #include "coresight-etm.h"
  9. #include "coresight-priv.h"
  10. static ssize_t nr_addr_cmp_show(struct device *dev,
  11. struct device_attribute *attr, char *buf)
  12. {
  13. unsigned long val;
  14. struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
  15. val = drvdata->nr_addr_cmp;
  16. return sprintf(buf, "%#lx\n", val);
  17. }
  18. static DEVICE_ATTR_RO(nr_addr_cmp);
  19. static ssize_t nr_cntr_show(struct device *dev,
  20. struct device_attribute *attr, char *buf)
  21. { unsigned long val;
  22. struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
  23. val = drvdata->nr_cntr;
  24. return sprintf(buf, "%#lx\n", val);
  25. }
  26. static DEVICE_ATTR_RO(nr_cntr);
  27. static ssize_t nr_ctxid_cmp_show(struct device *dev,
  28. struct device_attribute *attr, char *buf)
  29. {
  30. unsigned long val;
  31. struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
  32. val = drvdata->nr_ctxid_cmp;
  33. return sprintf(buf, "%#lx\n", val);
  34. }
  35. static DEVICE_ATTR_RO(nr_ctxid_cmp);
  36. static ssize_t etmsr_show(struct device *dev,
  37. struct device_attribute *attr, char *buf)
  38. {
  39. unsigned long flags, val;
  40. struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
  41. pm_runtime_get_sync(drvdata->dev);
  42. spin_lock_irqsave(&drvdata->spinlock, flags);
  43. CS_UNLOCK(drvdata->base);
  44. val = etm_readl(drvdata, ETMSR);
  45. CS_LOCK(drvdata->base);
  46. spin_unlock_irqrestore(&drvdata->spinlock, flags);
  47. pm_runtime_put(drvdata->dev);
  48. return sprintf(buf, "%#lx\n", val);
  49. }
  50. static DEVICE_ATTR_RO(etmsr);
  51. static ssize_t reset_store(struct device *dev,
  52. struct device_attribute *attr,
  53. const char *buf, size_t size)
  54. {
  55. int i, ret;
  56. unsigned long val;
  57. struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
  58. struct etm_config *config = &drvdata->config;
  59. ret = kstrtoul(buf, 16, &val);
  60. if (ret)
  61. return ret;
  62. if (val) {
  63. spin_lock(&drvdata->spinlock);
  64. memset(config, 0, sizeof(struct etm_config));
  65. config->mode = ETM_MODE_EXCLUDE;
  66. config->trigger_event = ETM_DEFAULT_EVENT_VAL;
  67. for (i = 0; i < drvdata->nr_addr_cmp; i++) {
  68. config->addr_type[i] = ETM_ADDR_TYPE_NONE;
  69. }
  70. etm_set_default(config);
  71. spin_unlock(&drvdata->spinlock);
  72. }
  73. return size;
  74. }
  75. static DEVICE_ATTR_WO(reset);
  76. static ssize_t mode_show(struct device *dev,
  77. struct device_attribute *attr, char *buf)
  78. {
  79. unsigned long val;
  80. struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
  81. struct etm_config *config = &drvdata->config;
  82. val = config->mode;
  83. return sprintf(buf, "%#lx\n", val);
  84. }
  85. static ssize_t mode_store(struct device *dev,
  86. struct device_attribute *attr,
  87. const char *buf, size_t size)
  88. {
  89. int ret;
  90. unsigned long val;
  91. struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
  92. struct etm_config *config = &drvdata->config;
  93. ret = kstrtoul(buf, 16, &val);
  94. if (ret)
  95. return ret;
  96. spin_lock(&drvdata->spinlock);
  97. config->mode = val & ETM_MODE_ALL;
  98. if (config->mode & ETM_MODE_EXCLUDE)
  99. config->enable_ctrl1 |= ETMTECR1_INC_EXC;
  100. else
  101. config->enable_ctrl1 &= ~ETMTECR1_INC_EXC;
  102. if (config->mode & ETM_MODE_CYCACC)
  103. config->ctrl |= ETMCR_CYC_ACC;
  104. else
  105. config->ctrl &= ~ETMCR_CYC_ACC;
  106. if (config->mode & ETM_MODE_STALL) {
  107. if (!(drvdata->etmccr & ETMCCR_FIFOFULL)) {
  108. dev_warn(drvdata->dev, "stall mode not supported\n");
  109. ret = -EINVAL;
  110. goto err_unlock;
  111. }
  112. config->ctrl |= ETMCR_STALL_MODE;
  113. } else
  114. config->ctrl &= ~ETMCR_STALL_MODE;
  115. if (config->mode & ETM_MODE_TIMESTAMP) {
  116. if (!(drvdata->etmccer & ETMCCER_TIMESTAMP)) {
  117. dev_warn(drvdata->dev, "timestamp not supported\n");
  118. ret = -EINVAL;
  119. goto err_unlock;
  120. }
  121. config->ctrl |= ETMCR_TIMESTAMP_EN;
  122. } else
  123. config->ctrl &= ~ETMCR_TIMESTAMP_EN;
  124. if (config->mode & ETM_MODE_CTXID)
  125. config->ctrl |= ETMCR_CTXID_SIZE;
  126. else
  127. config->ctrl &= ~ETMCR_CTXID_SIZE;
  128. if (config->mode & ETM_MODE_BBROAD)
  129. config->ctrl |= ETMCR_BRANCH_BROADCAST;
  130. else
  131. config->ctrl &= ~ETMCR_BRANCH_BROADCAST;
  132. if (config->mode & ETM_MODE_RET_STACK)
  133. config->ctrl |= ETMCR_RETURN_STACK;
  134. else
  135. config->ctrl &= ~ETMCR_RETURN_STACK;
  136. if (config->mode & (ETM_MODE_EXCL_KERN | ETM_MODE_EXCL_USER))
  137. etm_config_trace_mode(config);
  138. spin_unlock(&drvdata->spinlock);
  139. return size;
  140. err_unlock:
  141. spin_unlock(&drvdata->spinlock);
  142. return ret;
  143. }
  144. static DEVICE_ATTR_RW(mode);
  145. static ssize_t trigger_event_show(struct device *dev,
  146. struct device_attribute *attr, char *buf)
  147. {
  148. unsigned long val;
  149. struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
  150. struct etm_config *config = &drvdata->config;
  151. val = config->trigger_event;
  152. return sprintf(buf, "%#lx\n", val);
  153. }
  154. static ssize_t trigger_event_store(struct device *dev,
  155. struct device_attribute *attr,
  156. const char *buf, size_t size)
  157. {
  158. int ret;
  159. unsigned long val;
  160. struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
  161. struct etm_config *config = &drvdata->config;
  162. ret = kstrtoul(buf, 16, &val);
  163. if (ret)
  164. return ret;
  165. config->trigger_event = val & ETM_EVENT_MASK;
  166. return size;
  167. }
  168. static DEVICE_ATTR_RW(trigger_event);
  169. static ssize_t enable_event_show(struct device *dev,
  170. struct device_attribute *attr, char *buf)
  171. {
  172. unsigned long val;
  173. struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
  174. struct etm_config *config = &drvdata->config;
  175. val = config->enable_event;
  176. return sprintf(buf, "%#lx\n", val);
  177. }
  178. static ssize_t enable_event_store(struct device *dev,
  179. struct device_attribute *attr,
  180. const char *buf, size_t size)
  181. {
  182. int ret;
  183. unsigned long val;
  184. struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
  185. struct etm_config *config = &drvdata->config;
  186. ret = kstrtoul(buf, 16, &val);
  187. if (ret)
  188. return ret;
  189. config->enable_event = val & ETM_EVENT_MASK;
  190. return size;
  191. }
  192. static DEVICE_ATTR_RW(enable_event);
  193. static ssize_t fifofull_level_show(struct device *dev,
  194. struct device_attribute *attr, char *buf)
  195. {
  196. unsigned long val;
  197. struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
  198. struct etm_config *config = &drvdata->config;
  199. val = config->fifofull_level;
  200. return sprintf(buf, "%#lx\n", val);
  201. }
  202. static ssize_t fifofull_level_store(struct device *dev,
  203. struct device_attribute *attr,
  204. const char *buf, size_t size)
  205. {
  206. int ret;
  207. unsigned long val;
  208. struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
  209. struct etm_config *config = &drvdata->config;
  210. ret = kstrtoul(buf, 16, &val);
  211. if (ret)
  212. return ret;
  213. config->fifofull_level = val;
  214. return size;
  215. }
  216. static DEVICE_ATTR_RW(fifofull_level);
  217. static ssize_t addr_idx_show(struct device *dev,
  218. struct device_attribute *attr, char *buf)
  219. {
  220. unsigned long val;
  221. struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
  222. struct etm_config *config = &drvdata->config;
  223. val = config->addr_idx;
  224. return sprintf(buf, "%#lx\n", val);
  225. }
  226. static ssize_t addr_idx_store(struct device *dev,
  227. struct device_attribute *attr,
  228. const char *buf, size_t size)
  229. {
  230. int ret;
  231. unsigned long val;
  232. struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
  233. struct etm_config *config = &drvdata->config;
  234. ret = kstrtoul(buf, 16, &val);
  235. if (ret)
  236. return ret;
  237. if (val >= drvdata->nr_addr_cmp)
  238. return -EINVAL;
  239. /*
  240. * Use spinlock to ensure index doesn't change while it gets
  241. * dereferenced multiple times within a spinlock block elsewhere.
  242. */
  243. spin_lock(&drvdata->spinlock);
  244. config->addr_idx = val;
  245. spin_unlock(&drvdata->spinlock);
  246. return size;
  247. }
  248. static DEVICE_ATTR_RW(addr_idx);
  249. static ssize_t addr_single_show(struct device *dev,
  250. struct device_attribute *attr, char *buf)
  251. {
  252. u8 idx;
  253. unsigned long val;
  254. struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
  255. struct etm_config *config = &drvdata->config;
  256. spin_lock(&drvdata->spinlock);
  257. idx = config->addr_idx;
  258. if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
  259. config->addr_type[idx] == ETM_ADDR_TYPE_SINGLE)) {
  260. spin_unlock(&drvdata->spinlock);
  261. return -EINVAL;
  262. }
  263. val = config->addr_val[idx];
  264. spin_unlock(&drvdata->spinlock);
  265. return sprintf(buf, "%#lx\n", val);
  266. }
  267. static ssize_t addr_single_store(struct device *dev,
  268. struct device_attribute *attr,
  269. const char *buf, size_t size)
  270. {
  271. u8 idx;
  272. int ret;
  273. unsigned long val;
  274. struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
  275. struct etm_config *config = &drvdata->config;
  276. ret = kstrtoul(buf, 16, &val);
  277. if (ret)
  278. return ret;
  279. spin_lock(&drvdata->spinlock);
  280. idx = config->addr_idx;
  281. if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
  282. config->addr_type[idx] == ETM_ADDR_TYPE_SINGLE)) {
  283. spin_unlock(&drvdata->spinlock);
  284. return -EINVAL;
  285. }
  286. config->addr_val[idx] = val;
  287. config->addr_type[idx] = ETM_ADDR_TYPE_SINGLE;
  288. spin_unlock(&drvdata->spinlock);
  289. return size;
  290. }
  291. static DEVICE_ATTR_RW(addr_single);
  292. static ssize_t addr_range_show(struct device *dev,
  293. struct device_attribute *attr, char *buf)
  294. {
  295. u8 idx;
  296. unsigned long val1, val2;
  297. struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
  298. struct etm_config *config = &drvdata->config;
  299. spin_lock(&drvdata->spinlock);
  300. idx = config->addr_idx;
  301. if (idx % 2 != 0) {
  302. spin_unlock(&drvdata->spinlock);
  303. return -EPERM;
  304. }
  305. if (!((config->addr_type[idx] == ETM_ADDR_TYPE_NONE &&
  306. config->addr_type[idx + 1] == ETM_ADDR_TYPE_NONE) ||
  307. (config->addr_type[idx] == ETM_ADDR_TYPE_RANGE &&
  308. config->addr_type[idx + 1] == ETM_ADDR_TYPE_RANGE))) {
  309. spin_unlock(&drvdata->spinlock);
  310. return -EPERM;
  311. }
  312. val1 = config->addr_val[idx];
  313. val2 = config->addr_val[idx + 1];
  314. spin_unlock(&drvdata->spinlock);
  315. return sprintf(buf, "%#lx %#lx\n", val1, val2);
  316. }
  317. static ssize_t addr_range_store(struct device *dev,
  318. struct device_attribute *attr,
  319. const char *buf, size_t size)
  320. {
  321. u8 idx;
  322. unsigned long val1, val2;
  323. struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
  324. struct etm_config *config = &drvdata->config;
  325. if (sscanf(buf, "%lx %lx", &val1, &val2) != 2)
  326. return -EINVAL;
  327. /* Lower address comparator cannot have a higher address value */
  328. if (val1 > val2)
  329. return -EINVAL;
  330. spin_lock(&drvdata->spinlock);
  331. idx = config->addr_idx;
  332. if (idx % 2 != 0) {
  333. spin_unlock(&drvdata->spinlock);
  334. return -EPERM;
  335. }
  336. if (!((config->addr_type[idx] == ETM_ADDR_TYPE_NONE &&
  337. config->addr_type[idx + 1] == ETM_ADDR_TYPE_NONE) ||
  338. (config->addr_type[idx] == ETM_ADDR_TYPE_RANGE &&
  339. config->addr_type[idx + 1] == ETM_ADDR_TYPE_RANGE))) {
  340. spin_unlock(&drvdata->spinlock);
  341. return -EPERM;
  342. }
  343. config->addr_val[idx] = val1;
  344. config->addr_type[idx] = ETM_ADDR_TYPE_RANGE;
  345. config->addr_val[idx + 1] = val2;
  346. config->addr_type[idx + 1] = ETM_ADDR_TYPE_RANGE;
  347. config->enable_ctrl1 |= (1 << (idx/2));
  348. spin_unlock(&drvdata->spinlock);
  349. return size;
  350. }
  351. static DEVICE_ATTR_RW(addr_range);
  352. static ssize_t addr_start_show(struct device *dev,
  353. struct device_attribute *attr, char *buf)
  354. {
  355. u8 idx;
  356. unsigned long val;
  357. struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
  358. struct etm_config *config = &drvdata->config;
  359. spin_lock(&drvdata->spinlock);
  360. idx = config->addr_idx;
  361. if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
  362. config->addr_type[idx] == ETM_ADDR_TYPE_START)) {
  363. spin_unlock(&drvdata->spinlock);
  364. return -EPERM;
  365. }
  366. val = config->addr_val[idx];
  367. spin_unlock(&drvdata->spinlock);
  368. return sprintf(buf, "%#lx\n", val);
  369. }
  370. static ssize_t addr_start_store(struct device *dev,
  371. struct device_attribute *attr,
  372. const char *buf, size_t size)
  373. {
  374. u8 idx;
  375. int ret;
  376. unsigned long val;
  377. struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
  378. struct etm_config *config = &drvdata->config;
  379. ret = kstrtoul(buf, 16, &val);
  380. if (ret)
  381. return ret;
  382. spin_lock(&drvdata->spinlock);
  383. idx = config->addr_idx;
  384. if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
  385. config->addr_type[idx] == ETM_ADDR_TYPE_START)) {
  386. spin_unlock(&drvdata->spinlock);
  387. return -EPERM;
  388. }
  389. config->addr_val[idx] = val;
  390. config->addr_type[idx] = ETM_ADDR_TYPE_START;
  391. config->startstop_ctrl |= (1 << idx);
  392. config->enable_ctrl1 |= BIT(25);
  393. spin_unlock(&drvdata->spinlock);
  394. return size;
  395. }
  396. static DEVICE_ATTR_RW(addr_start);
  397. static ssize_t addr_stop_show(struct device *dev,
  398. struct device_attribute *attr, char *buf)
  399. {
  400. u8 idx;
  401. unsigned long val;
  402. struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
  403. struct etm_config *config = &drvdata->config;
  404. spin_lock(&drvdata->spinlock);
  405. idx = config->addr_idx;
  406. if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
  407. config->addr_type[idx] == ETM_ADDR_TYPE_STOP)) {
  408. spin_unlock(&drvdata->spinlock);
  409. return -EPERM;
  410. }
  411. val = config->addr_val[idx];
  412. spin_unlock(&drvdata->spinlock);
  413. return sprintf(buf, "%#lx\n", val);
  414. }
  415. static ssize_t addr_stop_store(struct device *dev,
  416. struct device_attribute *attr,
  417. const char *buf, size_t size)
  418. {
  419. u8 idx;
  420. int ret;
  421. unsigned long val;
  422. struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
  423. struct etm_config *config = &drvdata->config;
  424. ret = kstrtoul(buf, 16, &val);
  425. if (ret)
  426. return ret;
  427. spin_lock(&drvdata->spinlock);
  428. idx = config->addr_idx;
  429. if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
  430. config->addr_type[idx] == ETM_ADDR_TYPE_STOP)) {
  431. spin_unlock(&drvdata->spinlock);
  432. return -EPERM;
  433. }
  434. config->addr_val[idx] = val;
  435. config->addr_type[idx] = ETM_ADDR_TYPE_STOP;
  436. config->startstop_ctrl |= (1 << (idx + 16));
  437. config->enable_ctrl1 |= ETMTECR1_START_STOP;
  438. spin_unlock(&drvdata->spinlock);
  439. return size;
  440. }
  441. static DEVICE_ATTR_RW(addr_stop);
  442. static ssize_t addr_acctype_show(struct device *dev,
  443. struct device_attribute *attr, char *buf)
  444. {
  445. unsigned long val;
  446. struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
  447. struct etm_config *config = &drvdata->config;
  448. spin_lock(&drvdata->spinlock);
  449. val = config->addr_acctype[config->addr_idx];
  450. spin_unlock(&drvdata->spinlock);
  451. return sprintf(buf, "%#lx\n", val);
  452. }
  453. static ssize_t addr_acctype_store(struct device *dev,
  454. struct device_attribute *attr,
  455. const char *buf, size_t size)
  456. {
  457. int ret;
  458. unsigned long val;
  459. struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
  460. struct etm_config *config = &drvdata->config;
  461. ret = kstrtoul(buf, 16, &val);
  462. if (ret)
  463. return ret;
  464. spin_lock(&drvdata->spinlock);
  465. config->addr_acctype[config->addr_idx] = val;
  466. spin_unlock(&drvdata->spinlock);
  467. return size;
  468. }
  469. static DEVICE_ATTR_RW(addr_acctype);
  470. static ssize_t cntr_idx_show(struct device *dev,
  471. struct device_attribute *attr, char *buf)
  472. {
  473. unsigned long val;
  474. struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
  475. struct etm_config *config = &drvdata->config;
  476. val = config->cntr_idx;
  477. return sprintf(buf, "%#lx\n", val);
  478. }
  479. static ssize_t cntr_idx_store(struct device *dev,
  480. struct device_attribute *attr,
  481. const char *buf, size_t size)
  482. {
  483. int ret;
  484. unsigned long val;
  485. struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
  486. struct etm_config *config = &drvdata->config;
  487. ret = kstrtoul(buf, 16, &val);
  488. if (ret)
  489. return ret;
  490. if (val >= drvdata->nr_cntr)
  491. return -EINVAL;
  492. /*
  493. * Use spinlock to ensure index doesn't change while it gets
  494. * dereferenced multiple times within a spinlock block elsewhere.
  495. */
  496. spin_lock(&drvdata->spinlock);
  497. config->cntr_idx = val;
  498. spin_unlock(&drvdata->spinlock);
  499. return size;
  500. }
  501. static DEVICE_ATTR_RW(cntr_idx);
  502. static ssize_t cntr_rld_val_show(struct device *dev,
  503. struct device_attribute *attr, char *buf)
  504. {
  505. unsigned long val;
  506. struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
  507. struct etm_config *config = &drvdata->config;
  508. spin_lock(&drvdata->spinlock);
  509. val = config->cntr_rld_val[config->cntr_idx];
  510. spin_unlock(&drvdata->spinlock);
  511. return sprintf(buf, "%#lx\n", val);
  512. }
  513. static ssize_t cntr_rld_val_store(struct device *dev,
  514. struct device_attribute *attr,
  515. const char *buf, size_t size)
  516. {
  517. int ret;
  518. unsigned long val;
  519. struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
  520. struct etm_config *config = &drvdata->config;
  521. ret = kstrtoul(buf, 16, &val);
  522. if (ret)
  523. return ret;
  524. spin_lock(&drvdata->spinlock);
  525. config->cntr_rld_val[config->cntr_idx] = val;
  526. spin_unlock(&drvdata->spinlock);
  527. return size;
  528. }
  529. static DEVICE_ATTR_RW(cntr_rld_val);
  530. static ssize_t cntr_event_show(struct device *dev,
  531. struct device_attribute *attr, char *buf)
  532. {
  533. unsigned long val;
  534. struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
  535. struct etm_config *config = &drvdata->config;
  536. spin_lock(&drvdata->spinlock);
  537. val = config->cntr_event[config->cntr_idx];
  538. spin_unlock(&drvdata->spinlock);
  539. return sprintf(buf, "%#lx\n", val);
  540. }
  541. static ssize_t cntr_event_store(struct device *dev,
  542. struct device_attribute *attr,
  543. const char *buf, size_t size)
  544. {
  545. int ret;
  546. unsigned long val;
  547. struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
  548. struct etm_config *config = &drvdata->config;
  549. ret = kstrtoul(buf, 16, &val);
  550. if (ret)
  551. return ret;
  552. spin_lock(&drvdata->spinlock);
  553. config->cntr_event[config->cntr_idx] = val & ETM_EVENT_MASK;
  554. spin_unlock(&drvdata->spinlock);
  555. return size;
  556. }
  557. static DEVICE_ATTR_RW(cntr_event);
  558. static ssize_t cntr_rld_event_show(struct device *dev,
  559. struct device_attribute *attr, char *buf)
  560. {
  561. unsigned long val;
  562. struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
  563. struct etm_config *config = &drvdata->config;
  564. spin_lock(&drvdata->spinlock);
  565. val = config->cntr_rld_event[config->cntr_idx];
  566. spin_unlock(&drvdata->spinlock);
  567. return sprintf(buf, "%#lx\n", val);
  568. }
  569. static ssize_t cntr_rld_event_store(struct device *dev,
  570. struct device_attribute *attr,
  571. const char *buf, size_t size)
  572. {
  573. int ret;
  574. unsigned long val;
  575. struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
  576. struct etm_config *config = &drvdata->config;
  577. ret = kstrtoul(buf, 16, &val);
  578. if (ret)
  579. return ret;
  580. spin_lock(&drvdata->spinlock);
  581. config->cntr_rld_event[config->cntr_idx] = val & ETM_EVENT_MASK;
  582. spin_unlock(&drvdata->spinlock);
  583. return size;
  584. }
  585. static DEVICE_ATTR_RW(cntr_rld_event);
  586. static ssize_t cntr_val_show(struct device *dev,
  587. struct device_attribute *attr, char *buf)
  588. {
  589. int i, ret = 0;
  590. u32 val;
  591. struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
  592. struct etm_config *config = &drvdata->config;
  593. if (!local_read(&drvdata->mode)) {
  594. spin_lock(&drvdata->spinlock);
  595. for (i = 0; i < drvdata->nr_cntr; i++)
  596. ret += sprintf(buf, "counter %d: %x\n",
  597. i, config->cntr_val[i]);
  598. spin_unlock(&drvdata->spinlock);
  599. return ret;
  600. }
  601. for (i = 0; i < drvdata->nr_cntr; i++) {
  602. val = etm_readl(drvdata, ETMCNTVRn(i));
  603. ret += sprintf(buf, "counter %d: %x\n", i, val);
  604. }
  605. return ret;
  606. }
  607. static ssize_t cntr_val_store(struct device *dev,
  608. struct device_attribute *attr,
  609. const char *buf, size_t size)
  610. {
  611. int ret;
  612. unsigned long val;
  613. struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
  614. struct etm_config *config = &drvdata->config;
  615. ret = kstrtoul(buf, 16, &val);
  616. if (ret)
  617. return ret;
  618. spin_lock(&drvdata->spinlock);
  619. config->cntr_val[config->cntr_idx] = val;
  620. spin_unlock(&drvdata->spinlock);
  621. return size;
  622. }
  623. static DEVICE_ATTR_RW(cntr_val);
  624. static ssize_t seq_12_event_show(struct device *dev,
  625. struct device_attribute *attr, char *buf)
  626. {
  627. unsigned long val;
  628. struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
  629. struct etm_config *config = &drvdata->config;
  630. val = config->seq_12_event;
  631. return sprintf(buf, "%#lx\n", val);
  632. }
  633. static ssize_t seq_12_event_store(struct device *dev,
  634. struct device_attribute *attr,
  635. const char *buf, size_t size)
  636. {
  637. int ret;
  638. unsigned long val;
  639. struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
  640. struct etm_config *config = &drvdata->config;
  641. ret = kstrtoul(buf, 16, &val);
  642. if (ret)
  643. return ret;
  644. config->seq_12_event = val & ETM_EVENT_MASK;
  645. return size;
  646. }
  647. static DEVICE_ATTR_RW(seq_12_event);
  648. static ssize_t seq_21_event_show(struct device *dev,
  649. struct device_attribute *attr, char *buf)
  650. {
  651. unsigned long val;
  652. struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
  653. struct etm_config *config = &drvdata->config;
  654. val = config->seq_21_event;
  655. return sprintf(buf, "%#lx\n", val);
  656. }
  657. static ssize_t seq_21_event_store(struct device *dev,
  658. struct device_attribute *attr,
  659. const char *buf, size_t size)
  660. {
  661. int ret;
  662. unsigned long val;
  663. struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
  664. struct etm_config *config = &drvdata->config;
  665. ret = kstrtoul(buf, 16, &val);
  666. if (ret)
  667. return ret;
  668. config->seq_21_event = val & ETM_EVENT_MASK;
  669. return size;
  670. }
  671. static DEVICE_ATTR_RW(seq_21_event);
  672. static ssize_t seq_23_event_show(struct device *dev,
  673. struct device_attribute *attr, char *buf)
  674. {
  675. unsigned long val;
  676. struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
  677. struct etm_config *config = &drvdata->config;
  678. val = config->seq_23_event;
  679. return sprintf(buf, "%#lx\n", val);
  680. }
  681. static ssize_t seq_23_event_store(struct device *dev,
  682. struct device_attribute *attr,
  683. const char *buf, size_t size)
  684. {
  685. int ret;
  686. unsigned long val;
  687. struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
  688. struct etm_config *config = &drvdata->config;
  689. ret = kstrtoul(buf, 16, &val);
  690. if (ret)
  691. return ret;
  692. config->seq_23_event = val & ETM_EVENT_MASK;
  693. return size;
  694. }
  695. static DEVICE_ATTR_RW(seq_23_event);
  696. static ssize_t seq_31_event_show(struct device *dev,
  697. struct device_attribute *attr, char *buf)
  698. {
  699. unsigned long val;
  700. struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
  701. struct etm_config *config = &drvdata->config;
  702. val = config->seq_31_event;
  703. return sprintf(buf, "%#lx\n", val);
  704. }
  705. static ssize_t seq_31_event_store(struct device *dev,
  706. struct device_attribute *attr,
  707. const char *buf, size_t size)
  708. {
  709. int ret;
  710. unsigned long val;
  711. struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
  712. struct etm_config *config = &drvdata->config;
  713. ret = kstrtoul(buf, 16, &val);
  714. if (ret)
  715. return ret;
  716. config->seq_31_event = val & ETM_EVENT_MASK;
  717. return size;
  718. }
  719. static DEVICE_ATTR_RW(seq_31_event);
  720. static ssize_t seq_32_event_show(struct device *dev,
  721. struct device_attribute *attr, char *buf)
  722. {
  723. unsigned long val;
  724. struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
  725. struct etm_config *config = &drvdata->config;
  726. val = config->seq_32_event;
  727. return sprintf(buf, "%#lx\n", val);
  728. }
  729. static ssize_t seq_32_event_store(struct device *dev,
  730. struct device_attribute *attr,
  731. const char *buf, size_t size)
  732. {
  733. int ret;
  734. unsigned long val;
  735. struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
  736. struct etm_config *config = &drvdata->config;
  737. ret = kstrtoul(buf, 16, &val);
  738. if (ret)
  739. return ret;
  740. config->seq_32_event = val & ETM_EVENT_MASK;
  741. return size;
  742. }
  743. static DEVICE_ATTR_RW(seq_32_event);
  744. static ssize_t seq_13_event_show(struct device *dev,
  745. struct device_attribute *attr, char *buf)
  746. {
  747. unsigned long val;
  748. struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
  749. struct etm_config *config = &drvdata->config;
  750. val = config->seq_13_event;
  751. return sprintf(buf, "%#lx\n", val);
  752. }
  753. static ssize_t seq_13_event_store(struct device *dev,
  754. struct device_attribute *attr,
  755. const char *buf, size_t size)
  756. {
  757. int ret;
  758. unsigned long val;
  759. struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
  760. struct etm_config *config = &drvdata->config;
  761. ret = kstrtoul(buf, 16, &val);
  762. if (ret)
  763. return ret;
  764. config->seq_13_event = val & ETM_EVENT_MASK;
  765. return size;
  766. }
  767. static DEVICE_ATTR_RW(seq_13_event);
  768. static ssize_t seq_curr_state_show(struct device *dev,
  769. struct device_attribute *attr, char *buf)
  770. {
  771. unsigned long val, flags;
  772. struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
  773. struct etm_config *config = &drvdata->config;
  774. if (!local_read(&drvdata->mode)) {
  775. val = config->seq_curr_state;
  776. goto out;
  777. }
  778. pm_runtime_get_sync(drvdata->dev);
  779. spin_lock_irqsave(&drvdata->spinlock, flags);
  780. CS_UNLOCK(drvdata->base);
  781. val = (etm_readl(drvdata, ETMSQR) & ETM_SQR_MASK);
  782. CS_LOCK(drvdata->base);
  783. spin_unlock_irqrestore(&drvdata->spinlock, flags);
  784. pm_runtime_put(drvdata->dev);
  785. out:
  786. return sprintf(buf, "%#lx\n", val);
  787. }
  788. static ssize_t seq_curr_state_store(struct device *dev,
  789. struct device_attribute *attr,
  790. const char *buf, size_t size)
  791. {
  792. int ret;
  793. unsigned long val;
  794. struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
  795. struct etm_config *config = &drvdata->config;
  796. ret = kstrtoul(buf, 16, &val);
  797. if (ret)
  798. return ret;
  799. if (val > ETM_SEQ_STATE_MAX_VAL)
  800. return -EINVAL;
  801. config->seq_curr_state = val;
  802. return size;
  803. }
  804. static DEVICE_ATTR_RW(seq_curr_state);
  805. static ssize_t ctxid_idx_show(struct device *dev,
  806. struct device_attribute *attr, char *buf)
  807. {
  808. unsigned long val;
  809. struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
  810. struct etm_config *config = &drvdata->config;
  811. val = config->ctxid_idx;
  812. return sprintf(buf, "%#lx\n", val);
  813. }
  814. static ssize_t ctxid_idx_store(struct device *dev,
  815. struct device_attribute *attr,
  816. const char *buf, size_t size)
  817. {
  818. int ret;
  819. unsigned long val;
  820. struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
  821. struct etm_config *config = &drvdata->config;
  822. ret = kstrtoul(buf, 16, &val);
  823. if (ret)
  824. return ret;
  825. if (val >= drvdata->nr_ctxid_cmp)
  826. return -EINVAL;
  827. /*
  828. * Use spinlock to ensure index doesn't change while it gets
  829. * dereferenced multiple times within a spinlock block elsewhere.
  830. */
  831. spin_lock(&drvdata->spinlock);
  832. config->ctxid_idx = val;
  833. spin_unlock(&drvdata->spinlock);
  834. return size;
  835. }
  836. static DEVICE_ATTR_RW(ctxid_idx);
  837. static ssize_t ctxid_pid_show(struct device *dev,
  838. struct device_attribute *attr, char *buf)
  839. {
  840. unsigned long val;
  841. struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
  842. struct etm_config *config = &drvdata->config;
  843. spin_lock(&drvdata->spinlock);
  844. val = config->ctxid_vpid[config->ctxid_idx];
  845. spin_unlock(&drvdata->spinlock);
  846. return sprintf(buf, "%#lx\n", val);
  847. }
  848. static ssize_t ctxid_pid_store(struct device *dev,
  849. struct device_attribute *attr,
  850. const char *buf, size_t size)
  851. {
  852. int ret;
  853. unsigned long vpid, pid;
  854. struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
  855. struct etm_config *config = &drvdata->config;
  856. ret = kstrtoul(buf, 16, &vpid);
  857. if (ret)
  858. return ret;
  859. pid = coresight_vpid_to_pid(vpid);
  860. spin_lock(&drvdata->spinlock);
  861. config->ctxid_pid[config->ctxid_idx] = pid;
  862. config->ctxid_vpid[config->ctxid_idx] = vpid;
  863. spin_unlock(&drvdata->spinlock);
  864. return size;
  865. }
  866. static DEVICE_ATTR_RW(ctxid_pid);
  867. static ssize_t ctxid_mask_show(struct device *dev,
  868. struct device_attribute *attr, char *buf)
  869. {
  870. unsigned long val;
  871. struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
  872. struct etm_config *config = &drvdata->config;
  873. val = config->ctxid_mask;
  874. return sprintf(buf, "%#lx\n", val);
  875. }
  876. static ssize_t ctxid_mask_store(struct device *dev,
  877. struct device_attribute *attr,
  878. const char *buf, size_t size)
  879. {
  880. int ret;
  881. unsigned long val;
  882. struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
  883. struct etm_config *config = &drvdata->config;
  884. ret = kstrtoul(buf, 16, &val);
  885. if (ret)
  886. return ret;
  887. config->ctxid_mask = val;
  888. return size;
  889. }
  890. static DEVICE_ATTR_RW(ctxid_mask);
  891. static ssize_t sync_freq_show(struct device *dev,
  892. struct device_attribute *attr, char *buf)
  893. {
  894. unsigned long val;
  895. struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
  896. struct etm_config *config = &drvdata->config;
  897. val = config->sync_freq;
  898. return sprintf(buf, "%#lx\n", val);
  899. }
  900. static ssize_t sync_freq_store(struct device *dev,
  901. struct device_attribute *attr,
  902. const char *buf, size_t size)
  903. {
  904. int ret;
  905. unsigned long val;
  906. struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
  907. struct etm_config *config = &drvdata->config;
  908. ret = kstrtoul(buf, 16, &val);
  909. if (ret)
  910. return ret;
  911. config->sync_freq = val & ETM_SYNC_MASK;
  912. return size;
  913. }
  914. static DEVICE_ATTR_RW(sync_freq);
  915. static ssize_t timestamp_event_show(struct device *dev,
  916. struct device_attribute *attr, char *buf)
  917. {
  918. unsigned long val;
  919. struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
  920. struct etm_config *config = &drvdata->config;
  921. val = config->timestamp_event;
  922. return sprintf(buf, "%#lx\n", val);
  923. }
  924. static ssize_t timestamp_event_store(struct device *dev,
  925. struct device_attribute *attr,
  926. const char *buf, size_t size)
  927. {
  928. int ret;
  929. unsigned long val;
  930. struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
  931. struct etm_config *config = &drvdata->config;
  932. ret = kstrtoul(buf, 16, &val);
  933. if (ret)
  934. return ret;
  935. config->timestamp_event = val & ETM_EVENT_MASK;
  936. return size;
  937. }
  938. static DEVICE_ATTR_RW(timestamp_event);
  939. static ssize_t cpu_show(struct device *dev,
  940. struct device_attribute *attr, char *buf)
  941. {
  942. int val;
  943. struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
  944. val = drvdata->cpu;
  945. return scnprintf(buf, PAGE_SIZE, "%d\n", val);
  946. }
  947. static DEVICE_ATTR_RO(cpu);
  948. static ssize_t traceid_show(struct device *dev,
  949. struct device_attribute *attr, char *buf)
  950. {
  951. unsigned long val;
  952. struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
  953. val = etm_get_trace_id(drvdata);
  954. return sprintf(buf, "%#lx\n", val);
  955. }
  956. static ssize_t traceid_store(struct device *dev,
  957. struct device_attribute *attr,
  958. const char *buf, size_t size)
  959. {
  960. int ret;
  961. unsigned long val;
  962. struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
  963. ret = kstrtoul(buf, 16, &val);
  964. if (ret)
  965. return ret;
  966. drvdata->traceid = val & ETM_TRACEID_MASK;
  967. return size;
  968. }
  969. static DEVICE_ATTR_RW(traceid);
  970. static struct attribute *coresight_etm_attrs[] = {
  971. &dev_attr_nr_addr_cmp.attr,
  972. &dev_attr_nr_cntr.attr,
  973. &dev_attr_nr_ctxid_cmp.attr,
  974. &dev_attr_etmsr.attr,
  975. &dev_attr_reset.attr,
  976. &dev_attr_mode.attr,
  977. &dev_attr_trigger_event.attr,
  978. &dev_attr_enable_event.attr,
  979. &dev_attr_fifofull_level.attr,
  980. &dev_attr_addr_idx.attr,
  981. &dev_attr_addr_single.attr,
  982. &dev_attr_addr_range.attr,
  983. &dev_attr_addr_start.attr,
  984. &dev_attr_addr_stop.attr,
  985. &dev_attr_addr_acctype.attr,
  986. &dev_attr_cntr_idx.attr,
  987. &dev_attr_cntr_rld_val.attr,
  988. &dev_attr_cntr_event.attr,
  989. &dev_attr_cntr_rld_event.attr,
  990. &dev_attr_cntr_val.attr,
  991. &dev_attr_seq_12_event.attr,
  992. &dev_attr_seq_21_event.attr,
  993. &dev_attr_seq_23_event.attr,
  994. &dev_attr_seq_31_event.attr,
  995. &dev_attr_seq_32_event.attr,
  996. &dev_attr_seq_13_event.attr,
  997. &dev_attr_seq_curr_state.attr,
  998. &dev_attr_ctxid_idx.attr,
  999. &dev_attr_ctxid_pid.attr,
  1000. &dev_attr_ctxid_mask.attr,
  1001. &dev_attr_sync_freq.attr,
  1002. &dev_attr_timestamp_event.attr,
  1003. &dev_attr_traceid.attr,
  1004. &dev_attr_cpu.attr,
  1005. NULL,
  1006. };
  1007. #define coresight_etm3x_reg(name, offset) \
  1008. coresight_simple_reg32(struct etm_drvdata, name, offset)
  1009. coresight_etm3x_reg(etmccr, ETMCCR);
  1010. coresight_etm3x_reg(etmccer, ETMCCER);
  1011. coresight_etm3x_reg(etmscr, ETMSCR);
  1012. coresight_etm3x_reg(etmidr, ETMIDR);
  1013. coresight_etm3x_reg(etmcr, ETMCR);
  1014. coresight_etm3x_reg(etmtraceidr, ETMTRACEIDR);
  1015. coresight_etm3x_reg(etmteevr, ETMTEEVR);
  1016. coresight_etm3x_reg(etmtssvr, ETMTSSCR);
  1017. coresight_etm3x_reg(etmtecr1, ETMTECR1);
  1018. coresight_etm3x_reg(etmtecr2, ETMTECR2);
  1019. static struct attribute *coresight_etm_mgmt_attrs[] = {
  1020. &dev_attr_etmccr.attr,
  1021. &dev_attr_etmccer.attr,
  1022. &dev_attr_etmscr.attr,
  1023. &dev_attr_etmidr.attr,
  1024. &dev_attr_etmcr.attr,
  1025. &dev_attr_etmtraceidr.attr,
  1026. &dev_attr_etmteevr.attr,
  1027. &dev_attr_etmtssvr.attr,
  1028. &dev_attr_etmtecr1.attr,
  1029. &dev_attr_etmtecr2.attr,
  1030. NULL,
  1031. };
  1032. static const struct attribute_group coresight_etm_group = {
  1033. .attrs = coresight_etm_attrs,
  1034. };
  1035. static const struct attribute_group coresight_etm_mgmt_group = {
  1036. .attrs = coresight_etm_mgmt_attrs,
  1037. .name = "mgmt",
  1038. };
  1039. const struct attribute_group *coresight_etm_groups[] = {
  1040. &coresight_etm_group,
  1041. &coresight_etm_mgmt_group,
  1042. NULL,
  1043. };