amdgpu_debugfs.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766
  1. /*
  2. * Copyright 2008 Advanced Micro Devices, Inc.
  3. * Copyright 2008 Red Hat Inc.
  4. * Copyright 2009 Jerome Glisse.
  5. *
  6. * Permission is hereby granted, free of charge, to any person obtaining a
  7. * copy of this software and associated documentation files (the "Software"),
  8. * to deal in the Software without restriction, including without limitation
  9. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  10. * and/or sell copies of the Software, and to permit persons to whom the
  11. * Software is furnished to do so, subject to the following conditions:
  12. *
  13. * The above copyright notice and this permission notice shall be included in
  14. * all copies or substantial portions of the Software.
  15. *
  16. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  17. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  18. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  19. * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  20. * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  21. * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  22. * OTHER DEALINGS IN THE SOFTWARE.
  23. *
  24. */
  25. #include <linux/kthread.h>
  26. #include <drm/drmP.h>
  27. #include <linux/debugfs.h>
  28. #include "amdgpu.h"
  29. /*
  30. * Debugfs
  31. */
  32. int amdgpu_debugfs_add_files(struct amdgpu_device *adev,
  33. const struct drm_info_list *files,
  34. unsigned nfiles)
  35. {
  36. unsigned i;
  37. for (i = 0; i < adev->debugfs_count; i++) {
  38. if (adev->debugfs[i].files == files) {
  39. /* Already registered */
  40. return 0;
  41. }
  42. }
  43. i = adev->debugfs_count + 1;
  44. if (i > AMDGPU_DEBUGFS_MAX_COMPONENTS) {
  45. DRM_ERROR("Reached maximum number of debugfs components.\n");
  46. DRM_ERROR("Report so we increase "
  47. "AMDGPU_DEBUGFS_MAX_COMPONENTS.\n");
  48. return -EINVAL;
  49. }
  50. adev->debugfs[adev->debugfs_count].files = files;
  51. adev->debugfs[adev->debugfs_count].num_files = nfiles;
  52. adev->debugfs_count = i;
  53. #if defined(CONFIG_DEBUG_FS)
  54. drm_debugfs_create_files(files, nfiles,
  55. adev->ddev->primary->debugfs_root,
  56. adev->ddev->primary);
  57. #endif
  58. return 0;
  59. }
  60. #if defined(CONFIG_DEBUG_FS)
  61. static int amdgpu_debugfs_process_reg_op(bool read, struct file *f,
  62. char __user *buf, size_t size, loff_t *pos)
  63. {
  64. struct amdgpu_device *adev = file_inode(f)->i_private;
  65. ssize_t result = 0;
  66. int r;
  67. bool pm_pg_lock, use_bank, use_ring;
  68. unsigned instance_bank, sh_bank, se_bank, me, pipe, queue;
  69. pm_pg_lock = use_bank = use_ring = false;
  70. instance_bank = sh_bank = se_bank = me = pipe = queue = 0;
  71. if (size & 0x3 || *pos & 0x3 ||
  72. ((*pos & (1ULL << 62)) && (*pos & (1ULL << 61))))
  73. return -EINVAL;
  74. /* are we reading registers for which a PG lock is necessary? */
  75. pm_pg_lock = (*pos >> 23) & 1;
  76. if (*pos & (1ULL << 62)) {
  77. se_bank = (*pos & GENMASK_ULL(33, 24)) >> 24;
  78. sh_bank = (*pos & GENMASK_ULL(43, 34)) >> 34;
  79. instance_bank = (*pos & GENMASK_ULL(53, 44)) >> 44;
  80. if (se_bank == 0x3FF)
  81. se_bank = 0xFFFFFFFF;
  82. if (sh_bank == 0x3FF)
  83. sh_bank = 0xFFFFFFFF;
  84. if (instance_bank == 0x3FF)
  85. instance_bank = 0xFFFFFFFF;
  86. use_bank = 1;
  87. } else if (*pos & (1ULL << 61)) {
  88. me = (*pos & GENMASK_ULL(33, 24)) >> 24;
  89. pipe = (*pos & GENMASK_ULL(43, 34)) >> 34;
  90. queue = (*pos & GENMASK_ULL(53, 44)) >> 44;
  91. use_ring = 1;
  92. } else {
  93. use_bank = use_ring = 0;
  94. }
  95. *pos &= (1UL << 22) - 1;
  96. if (use_bank) {
  97. if ((sh_bank != 0xFFFFFFFF && sh_bank >= adev->gfx.config.max_sh_per_se) ||
  98. (se_bank != 0xFFFFFFFF && se_bank >= adev->gfx.config.max_shader_engines))
  99. return -EINVAL;
  100. mutex_lock(&adev->grbm_idx_mutex);
  101. amdgpu_gfx_select_se_sh(adev, se_bank,
  102. sh_bank, instance_bank);
  103. } else if (use_ring) {
  104. mutex_lock(&adev->srbm_mutex);
  105. amdgpu_gfx_select_me_pipe_q(adev, me, pipe, queue);
  106. }
  107. if (pm_pg_lock)
  108. mutex_lock(&adev->pm.mutex);
  109. while (size) {
  110. uint32_t value;
  111. if (*pos > adev->rmmio_size)
  112. goto end;
  113. if (read) {
  114. value = RREG32(*pos >> 2);
  115. r = put_user(value, (uint32_t *)buf);
  116. } else {
  117. r = get_user(value, (uint32_t *)buf);
  118. if (!r)
  119. WREG32(*pos >> 2, value);
  120. }
  121. if (r) {
  122. result = r;
  123. goto end;
  124. }
  125. result += 4;
  126. buf += 4;
  127. *pos += 4;
  128. size -= 4;
  129. }
  130. end:
  131. if (use_bank) {
  132. amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
  133. mutex_unlock(&adev->grbm_idx_mutex);
  134. } else if (use_ring) {
  135. amdgpu_gfx_select_me_pipe_q(adev, 0, 0, 0);
  136. mutex_unlock(&adev->srbm_mutex);
  137. }
  138. if (pm_pg_lock)
  139. mutex_unlock(&adev->pm.mutex);
  140. return result;
  141. }
  142. static ssize_t amdgpu_debugfs_regs_read(struct file *f, char __user *buf,
  143. size_t size, loff_t *pos)
  144. {
  145. return amdgpu_debugfs_process_reg_op(true, f, buf, size, pos);
  146. }
  147. static ssize_t amdgpu_debugfs_regs_write(struct file *f, const char __user *buf,
  148. size_t size, loff_t *pos)
  149. {
  150. return amdgpu_debugfs_process_reg_op(false, f, (char __user *)buf, size, pos);
  151. }
  152. static ssize_t amdgpu_debugfs_regs_pcie_read(struct file *f, char __user *buf,
  153. size_t size, loff_t *pos)
  154. {
  155. struct amdgpu_device *adev = file_inode(f)->i_private;
  156. ssize_t result = 0;
  157. int r;
  158. if (size & 0x3 || *pos & 0x3)
  159. return -EINVAL;
  160. while (size) {
  161. uint32_t value;
  162. value = RREG32_PCIE(*pos >> 2);
  163. r = put_user(value, (uint32_t *)buf);
  164. if (r)
  165. return r;
  166. result += 4;
  167. buf += 4;
  168. *pos += 4;
  169. size -= 4;
  170. }
  171. return result;
  172. }
  173. static ssize_t amdgpu_debugfs_regs_pcie_write(struct file *f, const char __user *buf,
  174. size_t size, loff_t *pos)
  175. {
  176. struct amdgpu_device *adev = file_inode(f)->i_private;
  177. ssize_t result = 0;
  178. int r;
  179. if (size & 0x3 || *pos & 0x3)
  180. return -EINVAL;
  181. while (size) {
  182. uint32_t value;
  183. r = get_user(value, (uint32_t *)buf);
  184. if (r)
  185. return r;
  186. WREG32_PCIE(*pos >> 2, value);
  187. result += 4;
  188. buf += 4;
  189. *pos += 4;
  190. size -= 4;
  191. }
  192. return result;
  193. }
  194. static ssize_t amdgpu_debugfs_regs_didt_read(struct file *f, char __user *buf,
  195. size_t size, loff_t *pos)
  196. {
  197. struct amdgpu_device *adev = file_inode(f)->i_private;
  198. ssize_t result = 0;
  199. int r;
  200. if (size & 0x3 || *pos & 0x3)
  201. return -EINVAL;
  202. while (size) {
  203. uint32_t value;
  204. value = RREG32_DIDT(*pos >> 2);
  205. r = put_user(value, (uint32_t *)buf);
  206. if (r)
  207. return r;
  208. result += 4;
  209. buf += 4;
  210. *pos += 4;
  211. size -= 4;
  212. }
  213. return result;
  214. }
  215. static ssize_t amdgpu_debugfs_regs_didt_write(struct file *f, const char __user *buf,
  216. size_t size, loff_t *pos)
  217. {
  218. struct amdgpu_device *adev = file_inode(f)->i_private;
  219. ssize_t result = 0;
  220. int r;
  221. if (size & 0x3 || *pos & 0x3)
  222. return -EINVAL;
  223. while (size) {
  224. uint32_t value;
  225. r = get_user(value, (uint32_t *)buf);
  226. if (r)
  227. return r;
  228. WREG32_DIDT(*pos >> 2, value);
  229. result += 4;
  230. buf += 4;
  231. *pos += 4;
  232. size -= 4;
  233. }
  234. return result;
  235. }
  236. static ssize_t amdgpu_debugfs_regs_smc_read(struct file *f, char __user *buf,
  237. size_t size, loff_t *pos)
  238. {
  239. struct amdgpu_device *adev = file_inode(f)->i_private;
  240. ssize_t result = 0;
  241. int r;
  242. if (size & 0x3 || *pos & 0x3)
  243. return -EINVAL;
  244. while (size) {
  245. uint32_t value;
  246. value = RREG32_SMC(*pos);
  247. r = put_user(value, (uint32_t *)buf);
  248. if (r)
  249. return r;
  250. result += 4;
  251. buf += 4;
  252. *pos += 4;
  253. size -= 4;
  254. }
  255. return result;
  256. }
  257. static ssize_t amdgpu_debugfs_regs_smc_write(struct file *f, const char __user *buf,
  258. size_t size, loff_t *pos)
  259. {
  260. struct amdgpu_device *adev = file_inode(f)->i_private;
  261. ssize_t result = 0;
  262. int r;
  263. if (size & 0x3 || *pos & 0x3)
  264. return -EINVAL;
  265. while (size) {
  266. uint32_t value;
  267. r = get_user(value, (uint32_t *)buf);
  268. if (r)
  269. return r;
  270. WREG32_SMC(*pos, value);
  271. result += 4;
  272. buf += 4;
  273. *pos += 4;
  274. size -= 4;
  275. }
  276. return result;
  277. }
  278. static ssize_t amdgpu_debugfs_gca_config_read(struct file *f, char __user *buf,
  279. size_t size, loff_t *pos)
  280. {
  281. struct amdgpu_device *adev = file_inode(f)->i_private;
  282. ssize_t result = 0;
  283. int r;
  284. uint32_t *config, no_regs = 0;
  285. if (size & 0x3 || *pos & 0x3)
  286. return -EINVAL;
  287. config = kmalloc_array(256, sizeof(*config), GFP_KERNEL);
  288. if (!config)
  289. return -ENOMEM;
  290. /* version, increment each time something is added */
  291. config[no_regs++] = 3;
  292. config[no_regs++] = adev->gfx.config.max_shader_engines;
  293. config[no_regs++] = adev->gfx.config.max_tile_pipes;
  294. config[no_regs++] = adev->gfx.config.max_cu_per_sh;
  295. config[no_regs++] = adev->gfx.config.max_sh_per_se;
  296. config[no_regs++] = adev->gfx.config.max_backends_per_se;
  297. config[no_regs++] = adev->gfx.config.max_texture_channel_caches;
  298. config[no_regs++] = adev->gfx.config.max_gprs;
  299. config[no_regs++] = adev->gfx.config.max_gs_threads;
  300. config[no_regs++] = adev->gfx.config.max_hw_contexts;
  301. config[no_regs++] = adev->gfx.config.sc_prim_fifo_size_frontend;
  302. config[no_regs++] = adev->gfx.config.sc_prim_fifo_size_backend;
  303. config[no_regs++] = adev->gfx.config.sc_hiz_tile_fifo_size;
  304. config[no_regs++] = adev->gfx.config.sc_earlyz_tile_fifo_size;
  305. config[no_regs++] = adev->gfx.config.num_tile_pipes;
  306. config[no_regs++] = adev->gfx.config.backend_enable_mask;
  307. config[no_regs++] = adev->gfx.config.mem_max_burst_length_bytes;
  308. config[no_regs++] = adev->gfx.config.mem_row_size_in_kb;
  309. config[no_regs++] = adev->gfx.config.shader_engine_tile_size;
  310. config[no_regs++] = adev->gfx.config.num_gpus;
  311. config[no_regs++] = adev->gfx.config.multi_gpu_tile_size;
  312. config[no_regs++] = adev->gfx.config.mc_arb_ramcfg;
  313. config[no_regs++] = adev->gfx.config.gb_addr_config;
  314. config[no_regs++] = adev->gfx.config.num_rbs;
  315. /* rev==1 */
  316. config[no_regs++] = adev->rev_id;
  317. config[no_regs++] = adev->pg_flags;
  318. config[no_regs++] = adev->cg_flags;
  319. /* rev==2 */
  320. config[no_regs++] = adev->family;
  321. config[no_regs++] = adev->external_rev_id;
  322. /* rev==3 */
  323. config[no_regs++] = adev->pdev->device;
  324. config[no_regs++] = adev->pdev->revision;
  325. config[no_regs++] = adev->pdev->subsystem_device;
  326. config[no_regs++] = adev->pdev->subsystem_vendor;
  327. while (size && (*pos < no_regs * 4)) {
  328. uint32_t value;
  329. value = config[*pos >> 2];
  330. r = put_user(value, (uint32_t *)buf);
  331. if (r) {
  332. kfree(config);
  333. return r;
  334. }
  335. result += 4;
  336. buf += 4;
  337. *pos += 4;
  338. size -= 4;
  339. }
  340. kfree(config);
  341. return result;
  342. }
  343. static ssize_t amdgpu_debugfs_sensor_read(struct file *f, char __user *buf,
  344. size_t size, loff_t *pos)
  345. {
  346. struct amdgpu_device *adev = file_inode(f)->i_private;
  347. int idx, x, outsize, r, valuesize;
  348. uint32_t values[16];
  349. if (size & 3 || *pos & 0x3)
  350. return -EINVAL;
  351. if (amdgpu_dpm == 0)
  352. return -EINVAL;
  353. /* convert offset to sensor number */
  354. idx = *pos >> 2;
  355. valuesize = sizeof(values);
  356. if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->read_sensor)
  357. r = amdgpu_dpm_read_sensor(adev, idx, &values[0], &valuesize);
  358. else
  359. return -EINVAL;
  360. if (size > valuesize)
  361. return -EINVAL;
  362. outsize = 0;
  363. x = 0;
  364. if (!r) {
  365. while (size) {
  366. r = put_user(values[x++], (int32_t *)buf);
  367. buf += 4;
  368. size -= 4;
  369. outsize += 4;
  370. }
  371. }
  372. return !r ? outsize : r;
  373. }
  374. static ssize_t amdgpu_debugfs_wave_read(struct file *f, char __user *buf,
  375. size_t size, loff_t *pos)
  376. {
  377. struct amdgpu_device *adev = f->f_inode->i_private;
  378. int r, x;
  379. ssize_t result=0;
  380. uint32_t offset, se, sh, cu, wave, simd, data[32];
  381. if (size & 3 || *pos & 3)
  382. return -EINVAL;
  383. /* decode offset */
  384. offset = (*pos & GENMASK_ULL(6, 0));
  385. se = (*pos & GENMASK_ULL(14, 7)) >> 7;
  386. sh = (*pos & GENMASK_ULL(22, 15)) >> 15;
  387. cu = (*pos & GENMASK_ULL(30, 23)) >> 23;
  388. wave = (*pos & GENMASK_ULL(36, 31)) >> 31;
  389. simd = (*pos & GENMASK_ULL(44, 37)) >> 37;
  390. /* switch to the specific se/sh/cu */
  391. mutex_lock(&adev->grbm_idx_mutex);
  392. amdgpu_gfx_select_se_sh(adev, se, sh, cu);
  393. x = 0;
  394. if (adev->gfx.funcs->read_wave_data)
  395. adev->gfx.funcs->read_wave_data(adev, simd, wave, data, &x);
  396. amdgpu_gfx_select_se_sh(adev, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF);
  397. mutex_unlock(&adev->grbm_idx_mutex);
  398. if (!x)
  399. return -EINVAL;
  400. while (size && (offset < x * 4)) {
  401. uint32_t value;
  402. value = data[offset >> 2];
  403. r = put_user(value, (uint32_t *)buf);
  404. if (r)
  405. return r;
  406. result += 4;
  407. buf += 4;
  408. offset += 4;
  409. size -= 4;
  410. }
  411. return result;
  412. }
  413. static ssize_t amdgpu_debugfs_gpr_read(struct file *f, char __user *buf,
  414. size_t size, loff_t *pos)
  415. {
  416. struct amdgpu_device *adev = f->f_inode->i_private;
  417. int r;
  418. ssize_t result = 0;
  419. uint32_t offset, se, sh, cu, wave, simd, thread, bank, *data;
  420. if (size & 3 || *pos & 3)
  421. return -EINVAL;
  422. /* decode offset */
  423. offset = *pos & GENMASK_ULL(11, 0);
  424. se = (*pos & GENMASK_ULL(19, 12)) >> 12;
  425. sh = (*pos & GENMASK_ULL(27, 20)) >> 20;
  426. cu = (*pos & GENMASK_ULL(35, 28)) >> 28;
  427. wave = (*pos & GENMASK_ULL(43, 36)) >> 36;
  428. simd = (*pos & GENMASK_ULL(51, 44)) >> 44;
  429. thread = (*pos & GENMASK_ULL(59, 52)) >> 52;
  430. bank = (*pos & GENMASK_ULL(61, 60)) >> 60;
  431. data = kmalloc_array(1024, sizeof(*data), GFP_KERNEL);
  432. if (!data)
  433. return -ENOMEM;
  434. /* switch to the specific se/sh/cu */
  435. mutex_lock(&adev->grbm_idx_mutex);
  436. amdgpu_gfx_select_se_sh(adev, se, sh, cu);
  437. if (bank == 0) {
  438. if (adev->gfx.funcs->read_wave_vgprs)
  439. adev->gfx.funcs->read_wave_vgprs(adev, simd, wave, thread, offset, size>>2, data);
  440. } else {
  441. if (adev->gfx.funcs->read_wave_sgprs)
  442. adev->gfx.funcs->read_wave_sgprs(adev, simd, wave, offset, size>>2, data);
  443. }
  444. amdgpu_gfx_select_se_sh(adev, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF);
  445. mutex_unlock(&adev->grbm_idx_mutex);
  446. while (size) {
  447. uint32_t value;
  448. value = data[offset++];
  449. r = put_user(value, (uint32_t *)buf);
  450. if (r) {
  451. result = r;
  452. goto err;
  453. }
  454. result += 4;
  455. buf += 4;
  456. size -= 4;
  457. }
  458. err:
  459. kfree(data);
  460. return result;
  461. }
  462. static const struct file_operations amdgpu_debugfs_regs_fops = {
  463. .owner = THIS_MODULE,
  464. .read = amdgpu_debugfs_regs_read,
  465. .write = amdgpu_debugfs_regs_write,
  466. .llseek = default_llseek
  467. };
  468. static const struct file_operations amdgpu_debugfs_regs_didt_fops = {
  469. .owner = THIS_MODULE,
  470. .read = amdgpu_debugfs_regs_didt_read,
  471. .write = amdgpu_debugfs_regs_didt_write,
  472. .llseek = default_llseek
  473. };
  474. static const struct file_operations amdgpu_debugfs_regs_pcie_fops = {
  475. .owner = THIS_MODULE,
  476. .read = amdgpu_debugfs_regs_pcie_read,
  477. .write = amdgpu_debugfs_regs_pcie_write,
  478. .llseek = default_llseek
  479. };
  480. static const struct file_operations amdgpu_debugfs_regs_smc_fops = {
  481. .owner = THIS_MODULE,
  482. .read = amdgpu_debugfs_regs_smc_read,
  483. .write = amdgpu_debugfs_regs_smc_write,
  484. .llseek = default_llseek
  485. };
  486. static const struct file_operations amdgpu_debugfs_gca_config_fops = {
  487. .owner = THIS_MODULE,
  488. .read = amdgpu_debugfs_gca_config_read,
  489. .llseek = default_llseek
  490. };
  491. static const struct file_operations amdgpu_debugfs_sensors_fops = {
  492. .owner = THIS_MODULE,
  493. .read = amdgpu_debugfs_sensor_read,
  494. .llseek = default_llseek
  495. };
  496. static const struct file_operations amdgpu_debugfs_wave_fops = {
  497. .owner = THIS_MODULE,
  498. .read = amdgpu_debugfs_wave_read,
  499. .llseek = default_llseek
  500. };
  501. static const struct file_operations amdgpu_debugfs_gpr_fops = {
  502. .owner = THIS_MODULE,
  503. .read = amdgpu_debugfs_gpr_read,
  504. .llseek = default_llseek
  505. };
  506. static const struct file_operations *debugfs_regs[] = {
  507. &amdgpu_debugfs_regs_fops,
  508. &amdgpu_debugfs_regs_didt_fops,
  509. &amdgpu_debugfs_regs_pcie_fops,
  510. &amdgpu_debugfs_regs_smc_fops,
  511. &amdgpu_debugfs_gca_config_fops,
  512. &amdgpu_debugfs_sensors_fops,
  513. &amdgpu_debugfs_wave_fops,
  514. &amdgpu_debugfs_gpr_fops,
  515. };
  516. static const char *debugfs_regs_names[] = {
  517. "amdgpu_regs",
  518. "amdgpu_regs_didt",
  519. "amdgpu_regs_pcie",
  520. "amdgpu_regs_smc",
  521. "amdgpu_gca_config",
  522. "amdgpu_sensors",
  523. "amdgpu_wave",
  524. "amdgpu_gpr",
  525. };
  526. int amdgpu_debugfs_regs_init(struct amdgpu_device *adev)
  527. {
  528. struct drm_minor *minor = adev->ddev->primary;
  529. struct dentry *ent, *root = minor->debugfs_root;
  530. unsigned i, j;
  531. for (i = 0; i < ARRAY_SIZE(debugfs_regs); i++) {
  532. ent = debugfs_create_file(debugfs_regs_names[i],
  533. S_IFREG | S_IRUGO, root,
  534. adev, debugfs_regs[i]);
  535. if (IS_ERR(ent)) {
  536. for (j = 0; j < i; j++) {
  537. debugfs_remove(adev->debugfs_regs[i]);
  538. adev->debugfs_regs[i] = NULL;
  539. }
  540. return PTR_ERR(ent);
  541. }
  542. if (!i)
  543. i_size_write(ent->d_inode, adev->rmmio_size);
  544. adev->debugfs_regs[i] = ent;
  545. }
  546. return 0;
  547. }
  548. void amdgpu_debugfs_regs_cleanup(struct amdgpu_device *adev)
  549. {
  550. unsigned i;
  551. for (i = 0; i < ARRAY_SIZE(debugfs_regs); i++) {
  552. if (adev->debugfs_regs[i]) {
  553. debugfs_remove(adev->debugfs_regs[i]);
  554. adev->debugfs_regs[i] = NULL;
  555. }
  556. }
  557. }
  558. static int amdgpu_debugfs_test_ib(struct seq_file *m, void *data)
  559. {
  560. struct drm_info_node *node = (struct drm_info_node *) m->private;
  561. struct drm_device *dev = node->minor->dev;
  562. struct amdgpu_device *adev = dev->dev_private;
  563. int r = 0, i;
  564. /* hold on the scheduler */
  565. for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
  566. struct amdgpu_ring *ring = adev->rings[i];
  567. if (!ring || !ring->sched.thread)
  568. continue;
  569. kthread_park(ring->sched.thread);
  570. }
  571. seq_printf(m, "run ib test:\n");
  572. r = amdgpu_ib_ring_tests(adev);
  573. if (r)
  574. seq_printf(m, "ib ring tests failed (%d).\n", r);
  575. else
  576. seq_printf(m, "ib ring tests passed.\n");
  577. /* go on the scheduler */
  578. for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
  579. struct amdgpu_ring *ring = adev->rings[i];
  580. if (!ring || !ring->sched.thread)
  581. continue;
  582. kthread_unpark(ring->sched.thread);
  583. }
  584. return 0;
  585. }
  586. static int amdgpu_debugfs_get_vbios_dump(struct seq_file *m, void *data)
  587. {
  588. struct drm_info_node *node = (struct drm_info_node *) m->private;
  589. struct drm_device *dev = node->minor->dev;
  590. struct amdgpu_device *adev = dev->dev_private;
  591. seq_write(m, adev->bios, adev->bios_size);
  592. return 0;
  593. }
  594. static int amdgpu_debugfs_evict_vram(struct seq_file *m, void *data)
  595. {
  596. struct drm_info_node *node = (struct drm_info_node *)m->private;
  597. struct drm_device *dev = node->minor->dev;
  598. struct amdgpu_device *adev = dev->dev_private;
  599. seq_printf(m, "(%d)\n", amdgpu_bo_evict_vram(adev));
  600. return 0;
  601. }
  602. static int amdgpu_debugfs_evict_gtt(struct seq_file *m, void *data)
  603. {
  604. struct drm_info_node *node = (struct drm_info_node *)m->private;
  605. struct drm_device *dev = node->minor->dev;
  606. struct amdgpu_device *adev = dev->dev_private;
  607. seq_printf(m, "(%d)\n", ttm_bo_evict_mm(&adev->mman.bdev, TTM_PL_TT));
  608. return 0;
  609. }
  610. static const struct drm_info_list amdgpu_debugfs_list[] = {
  611. {"amdgpu_vbios", amdgpu_debugfs_get_vbios_dump},
  612. {"amdgpu_test_ib", &amdgpu_debugfs_test_ib},
  613. {"amdgpu_evict_vram", &amdgpu_debugfs_evict_vram},
  614. {"amdgpu_evict_gtt", &amdgpu_debugfs_evict_gtt},
  615. };
  616. int amdgpu_debugfs_init(struct amdgpu_device *adev)
  617. {
  618. return amdgpu_debugfs_add_files(adev, amdgpu_debugfs_list,
  619. ARRAY_SIZE(amdgpu_debugfs_list));
  620. }
  621. #else
  622. int amdgpu_debugfs_init(struct amdgpu_device *adev)
  623. {
  624. return 0;
  625. }
  626. int amdgpu_debugfs_regs_init(struct amdgpu_device *adev)
  627. {
  628. return 0;
  629. }
  630. void amdgpu_debugfs_regs_cleanup(struct amdgpu_device *adev) { }
  631. #endif