coresight-cpu-debug.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700
  1. /*
  2. * Copyright (c) 2017 Linaro Limited. All rights reserved.
  3. *
  4. * Author: Leo Yan <leo.yan@linaro.org>
  5. *
  6. * This program is free software; you can redistribute it and/or modify it
  7. * under the terms of the GNU General Public License version 2 as published by
  8. * the Free Software Foundation.
  9. *
  10. * This program is distributed in the hope that it will be useful, but WITHOUT
  11. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  13. * more details.
  14. *
  15. * You should have received a copy of the GNU General Public License along with
  16. * this program. If not, see <http://www.gnu.org/licenses/>.
  17. *
  18. */
  19. #include <linux/amba/bus.h>
  20. #include <linux/coresight.h>
  21. #include <linux/cpu.h>
  22. #include <linux/debugfs.h>
  23. #include <linux/delay.h>
  24. #include <linux/device.h>
  25. #include <linux/err.h>
  26. #include <linux/init.h>
  27. #include <linux/io.h>
  28. #include <linux/iopoll.h>
  29. #include <linux/kernel.h>
  30. #include <linux/module.h>
  31. #include <linux/moduleparam.h>
  32. #include <linux/pm_qos.h>
  33. #include <linux/slab.h>
  34. #include <linux/smp.h>
  35. #include <linux/types.h>
  36. #include <linux/uaccess.h>
  37. #include "coresight-priv.h"
  38. #define EDPCSR 0x0A0
  39. #define EDCIDSR 0x0A4
  40. #define EDVIDSR 0x0A8
  41. #define EDPCSR_HI 0x0AC
  42. #define EDOSLAR 0x300
  43. #define EDPRCR 0x310
  44. #define EDPRSR 0x314
  45. #define EDDEVID1 0xFC4
  46. #define EDDEVID 0xFC8
  47. #define EDPCSR_PROHIBITED 0xFFFFFFFF
  48. /* bits definition for EDPCSR */
  49. #define EDPCSR_THUMB BIT(0)
  50. #define EDPCSR_ARM_INST_MASK GENMASK(31, 2)
  51. #define EDPCSR_THUMB_INST_MASK GENMASK(31, 1)
  52. /* bits definition for EDPRCR */
  53. #define EDPRCR_COREPURQ BIT(3)
  54. #define EDPRCR_CORENPDRQ BIT(0)
  55. /* bits definition for EDPRSR */
  56. #define EDPRSR_DLK BIT(6)
  57. #define EDPRSR_PU BIT(0)
  58. /* bits definition for EDVIDSR */
  59. #define EDVIDSR_NS BIT(31)
  60. #define EDVIDSR_E2 BIT(30)
  61. #define EDVIDSR_E3 BIT(29)
  62. #define EDVIDSR_HV BIT(28)
  63. #define EDVIDSR_VMID GENMASK(7, 0)
  64. /*
  65. * bits definition for EDDEVID1:PSCROffset
  66. *
  67. * NOTE: armv8 and armv7 have different definition for the register,
  68. * so consolidate the bits definition as below:
  69. *
  70. * 0b0000 - Sample offset applies based on the instruction state, we
  71. * rely on EDDEVID to check if EDPCSR is implemented or not
  72. * 0b0001 - No offset applies.
  73. * 0b0010 - No offset applies, but do not use in AArch32 mode
  74. *
  75. */
  76. #define EDDEVID1_PCSR_OFFSET_MASK GENMASK(3, 0)
  77. #define EDDEVID1_PCSR_OFFSET_INS_SET (0x0)
  78. #define EDDEVID1_PCSR_NO_OFFSET_DIS_AARCH32 (0x2)
  79. /* bits definition for EDDEVID */
  80. #define EDDEVID_PCSAMPLE_MODE GENMASK(3, 0)
  81. #define EDDEVID_IMPL_EDPCSR (0x1)
  82. #define EDDEVID_IMPL_EDPCSR_EDCIDSR (0x2)
  83. #define EDDEVID_IMPL_FULL (0x3)
  84. #define DEBUG_WAIT_SLEEP 1000
  85. #define DEBUG_WAIT_TIMEOUT 32000
  86. struct debug_drvdata {
  87. void __iomem *base;
  88. struct device *dev;
  89. int cpu;
  90. bool edpcsr_present;
  91. bool edcidsr_present;
  92. bool edvidsr_present;
  93. bool pc_has_offset;
  94. u32 edpcsr;
  95. u32 edpcsr_hi;
  96. u32 edprsr;
  97. u32 edvidsr;
  98. u32 edcidsr;
  99. };
  100. static DEFINE_MUTEX(debug_lock);
  101. static DEFINE_PER_CPU(struct debug_drvdata *, debug_drvdata);
  102. static int debug_count;
  103. static struct dentry *debug_debugfs_dir;
  104. static bool debug_enable;
  105. module_param_named(enable, debug_enable, bool, 0600);
  106. MODULE_PARM_DESC(enable, "Control to enable coresight CPU debug functionality");
  107. static void debug_os_unlock(struct debug_drvdata *drvdata)
  108. {
  109. /* Unlocks the debug registers */
  110. writel_relaxed(0x0, drvdata->base + EDOSLAR);
  111. /* Make sure the registers are unlocked before accessing */
  112. wmb();
  113. }
  114. /*
  115. * According to ARM DDI 0487A.k, before access external debug
  116. * registers should firstly check the access permission; if any
  117. * below condition has been met then cannot access debug
  118. * registers to avoid lockup issue:
  119. *
  120. * - CPU power domain is powered off;
  121. * - The OS Double Lock is locked;
  122. *
  123. * By checking EDPRSR can get to know if meet these conditions.
  124. */
  125. static bool debug_access_permitted(struct debug_drvdata *drvdata)
  126. {
  127. /* CPU is powered off */
  128. if (!(drvdata->edprsr & EDPRSR_PU))
  129. return false;
  130. /* The OS Double Lock is locked */
  131. if (drvdata->edprsr & EDPRSR_DLK)
  132. return false;
  133. return true;
  134. }
  135. static void debug_force_cpu_powered_up(struct debug_drvdata *drvdata)
  136. {
  137. u32 edprcr;
  138. try_again:
  139. /*
  140. * Send request to power management controller and assert
  141. * DBGPWRUPREQ signal; if power management controller has
  142. * sane implementation, it should enable CPU power domain
  143. * in case CPU is in low power state.
  144. */
  145. edprcr = readl_relaxed(drvdata->base + EDPRCR);
  146. edprcr |= EDPRCR_COREPURQ;
  147. writel_relaxed(edprcr, drvdata->base + EDPRCR);
  148. /* Wait for CPU to be powered up (timeout~=32ms) */
  149. if (readx_poll_timeout_atomic(readl_relaxed, drvdata->base + EDPRSR,
  150. drvdata->edprsr, (drvdata->edprsr & EDPRSR_PU),
  151. DEBUG_WAIT_SLEEP, DEBUG_WAIT_TIMEOUT)) {
  152. /*
  153. * Unfortunately the CPU cannot be powered up, so return
  154. * back and later has no permission to access other
  155. * registers. For this case, should disable CPU low power
  156. * states to ensure CPU power domain is enabled!
  157. */
  158. dev_err(drvdata->dev, "%s: power up request for CPU%d failed\n",
  159. __func__, drvdata->cpu);
  160. return;
  161. }
  162. /*
  163. * At this point the CPU is powered up, so set the no powerdown
  164. * request bit so we don't lose power and emulate power down.
  165. */
  166. edprcr = readl_relaxed(drvdata->base + EDPRCR);
  167. edprcr |= EDPRCR_COREPURQ | EDPRCR_CORENPDRQ;
  168. writel_relaxed(edprcr, drvdata->base + EDPRCR);
  169. drvdata->edprsr = readl_relaxed(drvdata->base + EDPRSR);
  170. /* The core power domain got switched off on use, try again */
  171. if (unlikely(!(drvdata->edprsr & EDPRSR_PU)))
  172. goto try_again;
  173. }
  174. static void debug_read_regs(struct debug_drvdata *drvdata)
  175. {
  176. u32 save_edprcr;
  177. CS_UNLOCK(drvdata->base);
  178. /* Unlock os lock */
  179. debug_os_unlock(drvdata);
  180. /* Save EDPRCR register */
  181. save_edprcr = readl_relaxed(drvdata->base + EDPRCR);
  182. /*
  183. * Ensure CPU power domain is enabled to let registers
  184. * are accessiable.
  185. */
  186. debug_force_cpu_powered_up(drvdata);
  187. if (!debug_access_permitted(drvdata))
  188. goto out;
  189. drvdata->edpcsr = readl_relaxed(drvdata->base + EDPCSR);
  190. /*
  191. * As described in ARM DDI 0487A.k, if the processing
  192. * element (PE) is in debug state, or sample-based
  193. * profiling is prohibited, EDPCSR reads as 0xFFFFFFFF;
  194. * EDCIDSR, EDVIDSR and EDPCSR_HI registers also become
  195. * UNKNOWN state. So directly bail out for this case.
  196. */
  197. if (drvdata->edpcsr == EDPCSR_PROHIBITED)
  198. goto out;
  199. /*
  200. * A read of the EDPCSR normally has the side-effect of
  201. * indirectly writing to EDCIDSR, EDVIDSR and EDPCSR_HI;
  202. * at this point it's safe to read value from them.
  203. */
  204. if (IS_ENABLED(CONFIG_64BIT))
  205. drvdata->edpcsr_hi = readl_relaxed(drvdata->base + EDPCSR_HI);
  206. if (drvdata->edcidsr_present)
  207. drvdata->edcidsr = readl_relaxed(drvdata->base + EDCIDSR);
  208. if (drvdata->edvidsr_present)
  209. drvdata->edvidsr = readl_relaxed(drvdata->base + EDVIDSR);
  210. out:
  211. /* Restore EDPRCR register */
  212. writel_relaxed(save_edprcr, drvdata->base + EDPRCR);
  213. CS_LOCK(drvdata->base);
  214. }
  215. #ifdef CONFIG_64BIT
  216. static unsigned long debug_adjust_pc(struct debug_drvdata *drvdata)
  217. {
  218. return (unsigned long)drvdata->edpcsr_hi << 32 |
  219. (unsigned long)drvdata->edpcsr;
  220. }
  221. #else
  222. static unsigned long debug_adjust_pc(struct debug_drvdata *drvdata)
  223. {
  224. unsigned long arm_inst_offset = 0, thumb_inst_offset = 0;
  225. unsigned long pc;
  226. pc = (unsigned long)drvdata->edpcsr;
  227. if (drvdata->pc_has_offset) {
  228. arm_inst_offset = 8;
  229. thumb_inst_offset = 4;
  230. }
  231. /* Handle thumb instruction */
  232. if (pc & EDPCSR_THUMB) {
  233. pc = (pc & EDPCSR_THUMB_INST_MASK) - thumb_inst_offset;
  234. return pc;
  235. }
  236. /*
  237. * Handle arm instruction offset, if the arm instruction
  238. * is not 4 byte alignment then it's possible the case
  239. * for implementation defined; keep original value for this
  240. * case and print info for notice.
  241. */
  242. if (pc & BIT(1))
  243. dev_emerg(drvdata->dev,
  244. "Instruction offset is implementation defined\n");
  245. else
  246. pc = (pc & EDPCSR_ARM_INST_MASK) - arm_inst_offset;
  247. return pc;
  248. }
  249. #endif
  250. static void debug_dump_regs(struct debug_drvdata *drvdata)
  251. {
  252. struct device *dev = drvdata->dev;
  253. unsigned long pc;
  254. dev_emerg(dev, " EDPRSR: %08x (Power:%s DLK:%s)\n",
  255. drvdata->edprsr,
  256. drvdata->edprsr & EDPRSR_PU ? "On" : "Off",
  257. drvdata->edprsr & EDPRSR_DLK ? "Lock" : "Unlock");
  258. if (!debug_access_permitted(drvdata)) {
  259. dev_emerg(dev, "No permission to access debug registers!\n");
  260. return;
  261. }
  262. if (drvdata->edpcsr == EDPCSR_PROHIBITED) {
  263. dev_emerg(dev, "CPU is in Debug state or profiling is prohibited!\n");
  264. return;
  265. }
  266. pc = debug_adjust_pc(drvdata);
  267. dev_emerg(dev, " EDPCSR: [<%p>] %pS\n", (void *)pc, (void *)pc);
  268. if (drvdata->edcidsr_present)
  269. dev_emerg(dev, " EDCIDSR: %08x\n", drvdata->edcidsr);
  270. if (drvdata->edvidsr_present)
  271. dev_emerg(dev, " EDVIDSR: %08x (State:%s Mode:%s Width:%dbits VMID:%x)\n",
  272. drvdata->edvidsr,
  273. drvdata->edvidsr & EDVIDSR_NS ?
  274. "Non-secure" : "Secure",
  275. drvdata->edvidsr & EDVIDSR_E3 ? "EL3" :
  276. (drvdata->edvidsr & EDVIDSR_E2 ?
  277. "EL2" : "EL1/0"),
  278. drvdata->edvidsr & EDVIDSR_HV ? 64 : 32,
  279. drvdata->edvidsr & (u32)EDVIDSR_VMID);
  280. }
  281. static void debug_init_arch_data(void *info)
  282. {
  283. struct debug_drvdata *drvdata = info;
  284. u32 mode, pcsr_offset;
  285. u32 eddevid, eddevid1;
  286. CS_UNLOCK(drvdata->base);
  287. /* Read device info */
  288. eddevid = readl_relaxed(drvdata->base + EDDEVID);
  289. eddevid1 = readl_relaxed(drvdata->base + EDDEVID1);
  290. CS_LOCK(drvdata->base);
  291. /* Parse implementation feature */
  292. mode = eddevid & EDDEVID_PCSAMPLE_MODE;
  293. pcsr_offset = eddevid1 & EDDEVID1_PCSR_OFFSET_MASK;
  294. drvdata->edpcsr_present = false;
  295. drvdata->edcidsr_present = false;
  296. drvdata->edvidsr_present = false;
  297. drvdata->pc_has_offset = false;
  298. switch (mode) {
  299. case EDDEVID_IMPL_FULL:
  300. drvdata->edvidsr_present = true;
  301. /* Fall through */
  302. case EDDEVID_IMPL_EDPCSR_EDCIDSR:
  303. drvdata->edcidsr_present = true;
  304. /* Fall through */
  305. case EDDEVID_IMPL_EDPCSR:
  306. /*
  307. * In ARM DDI 0487A.k, the EDDEVID1.PCSROffset is used to
  308. * define if has the offset for PC sampling value; if read
  309. * back EDDEVID1.PCSROffset == 0x2, then this means the debug
  310. * module does not sample the instruction set state when
  311. * armv8 CPU in AArch32 state.
  312. */
  313. drvdata->edpcsr_present =
  314. ((IS_ENABLED(CONFIG_64BIT) && pcsr_offset != 0) ||
  315. (pcsr_offset != EDDEVID1_PCSR_NO_OFFSET_DIS_AARCH32));
  316. drvdata->pc_has_offset =
  317. (pcsr_offset == EDDEVID1_PCSR_OFFSET_INS_SET);
  318. break;
  319. default:
  320. break;
  321. }
  322. }
  323. /*
  324. * Dump out information on panic.
  325. */
  326. static int debug_notifier_call(struct notifier_block *self,
  327. unsigned long v, void *p)
  328. {
  329. int cpu;
  330. struct debug_drvdata *drvdata;
  331. mutex_lock(&debug_lock);
  332. /* Bail out if the functionality is disabled */
  333. if (!debug_enable)
  334. goto skip_dump;
  335. pr_emerg("ARM external debug module:\n");
  336. for_each_possible_cpu(cpu) {
  337. drvdata = per_cpu(debug_drvdata, cpu);
  338. if (!drvdata)
  339. continue;
  340. dev_emerg(drvdata->dev, "CPU[%d]:\n", drvdata->cpu);
  341. debug_read_regs(drvdata);
  342. debug_dump_regs(drvdata);
  343. }
  344. skip_dump:
  345. mutex_unlock(&debug_lock);
  346. return 0;
  347. }
  348. static struct notifier_block debug_notifier = {
  349. .notifier_call = debug_notifier_call,
  350. };
  351. static int debug_enable_func(void)
  352. {
  353. struct debug_drvdata *drvdata;
  354. int cpu, ret = 0;
  355. cpumask_t mask;
  356. /*
  357. * Use cpumask to track which debug power domains have
  358. * been powered on and use it to handle failure case.
  359. */
  360. cpumask_clear(&mask);
  361. for_each_possible_cpu(cpu) {
  362. drvdata = per_cpu(debug_drvdata, cpu);
  363. if (!drvdata)
  364. continue;
  365. ret = pm_runtime_get_sync(drvdata->dev);
  366. if (ret < 0)
  367. goto err;
  368. else
  369. cpumask_set_cpu(cpu, &mask);
  370. }
  371. return 0;
  372. err:
  373. /*
  374. * If pm_runtime_get_sync() has failed, need rollback on
  375. * all the other CPUs that have been enabled before that.
  376. */
  377. for_each_cpu(cpu, &mask) {
  378. drvdata = per_cpu(debug_drvdata, cpu);
  379. pm_runtime_put_noidle(drvdata->dev);
  380. }
  381. return ret;
  382. }
  383. static int debug_disable_func(void)
  384. {
  385. struct debug_drvdata *drvdata;
  386. int cpu, ret, err = 0;
  387. /*
  388. * Disable debug power domains, records the error and keep
  389. * circling through all other CPUs when an error has been
  390. * encountered.
  391. */
  392. for_each_possible_cpu(cpu) {
  393. drvdata = per_cpu(debug_drvdata, cpu);
  394. if (!drvdata)
  395. continue;
  396. ret = pm_runtime_put(drvdata->dev);
  397. if (ret < 0)
  398. err = ret;
  399. }
  400. return err;
  401. }
  402. static ssize_t debug_func_knob_write(struct file *f,
  403. const char __user *buf, size_t count, loff_t *ppos)
  404. {
  405. u8 val;
  406. int ret;
  407. ret = kstrtou8_from_user(buf, count, 2, &val);
  408. if (ret)
  409. return ret;
  410. mutex_lock(&debug_lock);
  411. if (val == debug_enable)
  412. goto out;
  413. if (val)
  414. ret = debug_enable_func();
  415. else
  416. ret = debug_disable_func();
  417. if (ret) {
  418. pr_err("%s: unable to %s debug function: %d\n",
  419. __func__, val ? "enable" : "disable", ret);
  420. goto err;
  421. }
  422. debug_enable = val;
  423. out:
  424. ret = count;
  425. err:
  426. mutex_unlock(&debug_lock);
  427. return ret;
  428. }
  429. static ssize_t debug_func_knob_read(struct file *f,
  430. char __user *ubuf, size_t count, loff_t *ppos)
  431. {
  432. ssize_t ret;
  433. char buf[3];
  434. mutex_lock(&debug_lock);
  435. snprintf(buf, sizeof(buf), "%d\n", debug_enable);
  436. mutex_unlock(&debug_lock);
  437. ret = simple_read_from_buffer(ubuf, count, ppos, buf, sizeof(buf));
  438. return ret;
  439. }
  440. static const struct file_operations debug_func_knob_fops = {
  441. .open = simple_open,
  442. .read = debug_func_knob_read,
  443. .write = debug_func_knob_write,
  444. };
  445. static int debug_func_init(void)
  446. {
  447. struct dentry *file;
  448. int ret;
  449. /* Create debugfs node */
  450. debug_debugfs_dir = debugfs_create_dir("coresight_cpu_debug", NULL);
  451. if (!debug_debugfs_dir) {
  452. pr_err("%s: unable to create debugfs directory\n", __func__);
  453. return -ENOMEM;
  454. }
  455. file = debugfs_create_file("enable", 0644, debug_debugfs_dir, NULL,
  456. &debug_func_knob_fops);
  457. if (!file) {
  458. pr_err("%s: unable to create enable knob file\n", __func__);
  459. ret = -ENOMEM;
  460. goto err;
  461. }
  462. /* Register function to be called for panic */
  463. ret = atomic_notifier_chain_register(&panic_notifier_list,
  464. &debug_notifier);
  465. if (ret) {
  466. pr_err("%s: unable to register notifier: %d\n",
  467. __func__, ret);
  468. goto err;
  469. }
  470. return 0;
  471. err:
  472. debugfs_remove_recursive(debug_debugfs_dir);
  473. return ret;
  474. }
  475. static void debug_func_exit(void)
  476. {
  477. atomic_notifier_chain_unregister(&panic_notifier_list,
  478. &debug_notifier);
  479. debugfs_remove_recursive(debug_debugfs_dir);
  480. }
  481. static int debug_probe(struct amba_device *adev, const struct amba_id *id)
  482. {
  483. void __iomem *base;
  484. struct device *dev = &adev->dev;
  485. struct debug_drvdata *drvdata;
  486. struct resource *res = &adev->res;
  487. struct device_node *np = adev->dev.of_node;
  488. int ret;
  489. drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
  490. if (!drvdata)
  491. return -ENOMEM;
  492. drvdata->cpu = np ? of_coresight_get_cpu(np) : 0;
  493. if (per_cpu(debug_drvdata, drvdata->cpu)) {
  494. dev_err(dev, "CPU%d drvdata has already been initialized\n",
  495. drvdata->cpu);
  496. return -EBUSY;
  497. }
  498. drvdata->dev = &adev->dev;
  499. amba_set_drvdata(adev, drvdata);
  500. /* Validity for the resource is already checked by the AMBA core */
  501. base = devm_ioremap_resource(dev, res);
  502. if (IS_ERR(base))
  503. return PTR_ERR(base);
  504. drvdata->base = base;
  505. get_online_cpus();
  506. per_cpu(debug_drvdata, drvdata->cpu) = drvdata;
  507. ret = smp_call_function_single(drvdata->cpu, debug_init_arch_data,
  508. drvdata, 1);
  509. put_online_cpus();
  510. if (ret) {
  511. dev_err(dev, "CPU%d debug arch init failed\n", drvdata->cpu);
  512. goto err;
  513. }
  514. if (!drvdata->edpcsr_present) {
  515. dev_err(dev, "CPU%d sample-based profiling isn't implemented\n",
  516. drvdata->cpu);
  517. ret = -ENXIO;
  518. goto err;
  519. }
  520. if (!debug_count++) {
  521. ret = debug_func_init();
  522. if (ret)
  523. goto err_func_init;
  524. }
  525. mutex_lock(&debug_lock);
  526. /* Turn off debug power domain if debugging is disabled */
  527. if (!debug_enable)
  528. pm_runtime_put(dev);
  529. mutex_unlock(&debug_lock);
  530. dev_info(dev, "Coresight debug-CPU%d initialized\n", drvdata->cpu);
  531. return 0;
  532. err_func_init:
  533. debug_count--;
  534. err:
  535. per_cpu(debug_drvdata, drvdata->cpu) = NULL;
  536. return ret;
  537. }
  538. static int debug_remove(struct amba_device *adev)
  539. {
  540. struct device *dev = &adev->dev;
  541. struct debug_drvdata *drvdata = amba_get_drvdata(adev);
  542. per_cpu(debug_drvdata, drvdata->cpu) = NULL;
  543. mutex_lock(&debug_lock);
  544. /* Turn off debug power domain before rmmod the module */
  545. if (debug_enable)
  546. pm_runtime_put(dev);
  547. mutex_unlock(&debug_lock);
  548. if (!--debug_count)
  549. debug_func_exit();
  550. return 0;
  551. }
  552. static const struct amba_id debug_ids[] = {
  553. { /* Debug for Cortex-A53 */
  554. .id = 0x000bbd03,
  555. .mask = 0x000fffff,
  556. },
  557. { /* Debug for Cortex-A57 */
  558. .id = 0x000bbd07,
  559. .mask = 0x000fffff,
  560. },
  561. { /* Debug for Cortex-A72 */
  562. .id = 0x000bbd08,
  563. .mask = 0x000fffff,
  564. },
  565. { 0, 0 },
  566. };
  567. static struct amba_driver debug_driver = {
  568. .drv = {
  569. .name = "coresight-cpu-debug",
  570. .suppress_bind_attrs = true,
  571. },
  572. .probe = debug_probe,
  573. .remove = debug_remove,
  574. .id_table = debug_ids,
  575. };
  576. module_amba_driver(debug_driver);
  577. MODULE_AUTHOR("Leo Yan <leo.yan@linaro.org>");
  578. MODULE_DESCRIPTION("ARM Coresight CPU Debug Driver");
  579. MODULE_LICENSE("GPL");