amdgpu_debugfs.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792
  1. /*
  2. * Copyright 2008 Advanced Micro Devices, Inc.
  3. * Copyright 2008 Red Hat Inc.
  4. * Copyright 2009 Jerome Glisse.
  5. *
  6. * Permission is hereby granted, free of charge, to any person obtaining a
  7. * copy of this software and associated documentation files (the "Software"),
  8. * to deal in the Software without restriction, including without limitation
  9. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  10. * and/or sell copies of the Software, and to permit persons to whom the
  11. * Software is furnished to do so, subject to the following conditions:
  12. *
  13. * The above copyright notice and this permission notice shall be included in
  14. * all copies or substantial portions of the Software.
  15. *
  16. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  17. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  18. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  19. * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  20. * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  21. * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  22. * OTHER DEALINGS IN THE SOFTWARE.
  23. *
  24. */
  25. #include <linux/kthread.h>
  26. #include <drm/drmP.h>
  27. #include <linux/debugfs.h>
  28. #include "amdgpu.h"
  29. /*
  30. * Debugfs
  31. */
  32. int amdgpu_debugfs_add_files(struct amdgpu_device *adev,
  33. const struct drm_info_list *files,
  34. unsigned nfiles)
  35. {
  36. unsigned i;
  37. for (i = 0; i < adev->debugfs_count; i++) {
  38. if (adev->debugfs[i].files == files) {
  39. /* Already registered */
  40. return 0;
  41. }
  42. }
  43. i = adev->debugfs_count + 1;
  44. if (i > AMDGPU_DEBUGFS_MAX_COMPONENTS) {
  45. DRM_ERROR("Reached maximum number of debugfs components.\n");
  46. DRM_ERROR("Report so we increase "
  47. "AMDGPU_DEBUGFS_MAX_COMPONENTS.\n");
  48. return -EINVAL;
  49. }
  50. adev->debugfs[adev->debugfs_count].files = files;
  51. adev->debugfs[adev->debugfs_count].num_files = nfiles;
  52. adev->debugfs_count = i;
  53. #if defined(CONFIG_DEBUG_FS)
  54. drm_debugfs_create_files(files, nfiles,
  55. adev->ddev->primary->debugfs_root,
  56. adev->ddev->primary);
  57. #endif
  58. return 0;
  59. }
  60. #if defined(CONFIG_DEBUG_FS)
  61. static ssize_t amdgpu_debugfs_regs_read(struct file *f, char __user *buf,
  62. size_t size, loff_t *pos)
  63. {
  64. struct amdgpu_device *adev = file_inode(f)->i_private;
  65. ssize_t result = 0;
  66. int r;
  67. bool pm_pg_lock, use_bank;
  68. unsigned instance_bank, sh_bank, se_bank;
  69. if (size & 0x3 || *pos & 0x3)
  70. return -EINVAL;
  71. /* are we reading registers for which a PG lock is necessary? */
  72. pm_pg_lock = (*pos >> 23) & 1;
  73. if (*pos & (1ULL << 62)) {
  74. se_bank = (*pos & GENMASK_ULL(33, 24)) >> 24;
  75. sh_bank = (*pos & GENMASK_ULL(43, 34)) >> 34;
  76. instance_bank = (*pos & GENMASK_ULL(53, 44)) >> 44;
  77. if (se_bank == 0x3FF)
  78. se_bank = 0xFFFFFFFF;
  79. if (sh_bank == 0x3FF)
  80. sh_bank = 0xFFFFFFFF;
  81. if (instance_bank == 0x3FF)
  82. instance_bank = 0xFFFFFFFF;
  83. use_bank = 1;
  84. } else {
  85. use_bank = 0;
  86. }
  87. *pos &= (1UL << 22) - 1;
  88. if (use_bank) {
  89. if ((sh_bank != 0xFFFFFFFF && sh_bank >= adev->gfx.config.max_sh_per_se) ||
  90. (se_bank != 0xFFFFFFFF && se_bank >= adev->gfx.config.max_shader_engines))
  91. return -EINVAL;
  92. mutex_lock(&adev->grbm_idx_mutex);
  93. amdgpu_gfx_select_se_sh(adev, se_bank,
  94. sh_bank, instance_bank);
  95. }
  96. if (pm_pg_lock)
  97. mutex_lock(&adev->pm.mutex);
  98. while (size) {
  99. uint32_t value;
  100. if (*pos > adev->rmmio_size)
  101. goto end;
  102. value = RREG32(*pos >> 2);
  103. r = put_user(value, (uint32_t *)buf);
  104. if (r) {
  105. result = r;
  106. goto end;
  107. }
  108. result += 4;
  109. buf += 4;
  110. *pos += 4;
  111. size -= 4;
  112. }
  113. end:
  114. if (use_bank) {
  115. amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
  116. mutex_unlock(&adev->grbm_idx_mutex);
  117. }
  118. if (pm_pg_lock)
  119. mutex_unlock(&adev->pm.mutex);
  120. return result;
  121. }
  122. static ssize_t amdgpu_debugfs_regs_write(struct file *f, const char __user *buf,
  123. size_t size, loff_t *pos)
  124. {
  125. struct amdgpu_device *adev = file_inode(f)->i_private;
  126. ssize_t result = 0;
  127. int r;
  128. bool pm_pg_lock, use_bank;
  129. unsigned instance_bank, sh_bank, se_bank;
  130. if (size & 0x3 || *pos & 0x3)
  131. return -EINVAL;
  132. /* are we reading registers for which a PG lock is necessary? */
  133. pm_pg_lock = (*pos >> 23) & 1;
  134. if (*pos & (1ULL << 62)) {
  135. se_bank = (*pos & GENMASK_ULL(33, 24)) >> 24;
  136. sh_bank = (*pos & GENMASK_ULL(43, 34)) >> 34;
  137. instance_bank = (*pos & GENMASK_ULL(53, 44)) >> 44;
  138. if (se_bank == 0x3FF)
  139. se_bank = 0xFFFFFFFF;
  140. if (sh_bank == 0x3FF)
  141. sh_bank = 0xFFFFFFFF;
  142. if (instance_bank == 0x3FF)
  143. instance_bank = 0xFFFFFFFF;
  144. use_bank = 1;
  145. } else {
  146. use_bank = 0;
  147. }
  148. *pos &= (1UL << 22) - 1;
  149. if (use_bank) {
  150. if ((sh_bank != 0xFFFFFFFF && sh_bank >= adev->gfx.config.max_sh_per_se) ||
  151. (se_bank != 0xFFFFFFFF && se_bank >= adev->gfx.config.max_shader_engines))
  152. return -EINVAL;
  153. mutex_lock(&adev->grbm_idx_mutex);
  154. amdgpu_gfx_select_se_sh(adev, se_bank,
  155. sh_bank, instance_bank);
  156. }
  157. if (pm_pg_lock)
  158. mutex_lock(&adev->pm.mutex);
  159. while (size) {
  160. uint32_t value;
  161. if (*pos > adev->rmmio_size)
  162. return result;
  163. r = get_user(value, (uint32_t *)buf);
  164. if (r)
  165. return r;
  166. WREG32(*pos >> 2, value);
  167. result += 4;
  168. buf += 4;
  169. *pos += 4;
  170. size -= 4;
  171. }
  172. if (use_bank) {
  173. amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
  174. mutex_unlock(&adev->grbm_idx_mutex);
  175. }
  176. if (pm_pg_lock)
  177. mutex_unlock(&adev->pm.mutex);
  178. return result;
  179. }
  180. static ssize_t amdgpu_debugfs_regs_pcie_read(struct file *f, char __user *buf,
  181. size_t size, loff_t *pos)
  182. {
  183. struct amdgpu_device *adev = file_inode(f)->i_private;
  184. ssize_t result = 0;
  185. int r;
  186. if (size & 0x3 || *pos & 0x3)
  187. return -EINVAL;
  188. while (size) {
  189. uint32_t value;
  190. value = RREG32_PCIE(*pos >> 2);
  191. r = put_user(value, (uint32_t *)buf);
  192. if (r)
  193. return r;
  194. result += 4;
  195. buf += 4;
  196. *pos += 4;
  197. size -= 4;
  198. }
  199. return result;
  200. }
  201. static ssize_t amdgpu_debugfs_regs_pcie_write(struct file *f, const char __user *buf,
  202. size_t size, loff_t *pos)
  203. {
  204. struct amdgpu_device *adev = file_inode(f)->i_private;
  205. ssize_t result = 0;
  206. int r;
  207. if (size & 0x3 || *pos & 0x3)
  208. return -EINVAL;
  209. while (size) {
  210. uint32_t value;
  211. r = get_user(value, (uint32_t *)buf);
  212. if (r)
  213. return r;
  214. WREG32_PCIE(*pos >> 2, value);
  215. result += 4;
  216. buf += 4;
  217. *pos += 4;
  218. size -= 4;
  219. }
  220. return result;
  221. }
  222. static ssize_t amdgpu_debugfs_regs_didt_read(struct file *f, char __user *buf,
  223. size_t size, loff_t *pos)
  224. {
  225. struct amdgpu_device *adev = file_inode(f)->i_private;
  226. ssize_t result = 0;
  227. int r;
  228. if (size & 0x3 || *pos & 0x3)
  229. return -EINVAL;
  230. while (size) {
  231. uint32_t value;
  232. value = RREG32_DIDT(*pos >> 2);
  233. r = put_user(value, (uint32_t *)buf);
  234. if (r)
  235. return r;
  236. result += 4;
  237. buf += 4;
  238. *pos += 4;
  239. size -= 4;
  240. }
  241. return result;
  242. }
  243. static ssize_t amdgpu_debugfs_regs_didt_write(struct file *f, const char __user *buf,
  244. size_t size, loff_t *pos)
  245. {
  246. struct amdgpu_device *adev = file_inode(f)->i_private;
  247. ssize_t result = 0;
  248. int r;
  249. if (size & 0x3 || *pos & 0x3)
  250. return -EINVAL;
  251. while (size) {
  252. uint32_t value;
  253. r = get_user(value, (uint32_t *)buf);
  254. if (r)
  255. return r;
  256. WREG32_DIDT(*pos >> 2, value);
  257. result += 4;
  258. buf += 4;
  259. *pos += 4;
  260. size -= 4;
  261. }
  262. return result;
  263. }
  264. static ssize_t amdgpu_debugfs_regs_smc_read(struct file *f, char __user *buf,
  265. size_t size, loff_t *pos)
  266. {
  267. struct amdgpu_device *adev = file_inode(f)->i_private;
  268. ssize_t result = 0;
  269. int r;
  270. if (size & 0x3 || *pos & 0x3)
  271. return -EINVAL;
  272. while (size) {
  273. uint32_t value;
  274. value = RREG32_SMC(*pos);
  275. r = put_user(value, (uint32_t *)buf);
  276. if (r)
  277. return r;
  278. result += 4;
  279. buf += 4;
  280. *pos += 4;
  281. size -= 4;
  282. }
  283. return result;
  284. }
  285. static ssize_t amdgpu_debugfs_regs_smc_write(struct file *f, const char __user *buf,
  286. size_t size, loff_t *pos)
  287. {
  288. struct amdgpu_device *adev = file_inode(f)->i_private;
  289. ssize_t result = 0;
  290. int r;
  291. if (size & 0x3 || *pos & 0x3)
  292. return -EINVAL;
  293. while (size) {
  294. uint32_t value;
  295. r = get_user(value, (uint32_t *)buf);
  296. if (r)
  297. return r;
  298. WREG32_SMC(*pos, value);
  299. result += 4;
  300. buf += 4;
  301. *pos += 4;
  302. size -= 4;
  303. }
  304. return result;
  305. }
  306. static ssize_t amdgpu_debugfs_gca_config_read(struct file *f, char __user *buf,
  307. size_t size, loff_t *pos)
  308. {
  309. struct amdgpu_device *adev = file_inode(f)->i_private;
  310. ssize_t result = 0;
  311. int r;
  312. uint32_t *config, no_regs = 0;
  313. if (size & 0x3 || *pos & 0x3)
  314. return -EINVAL;
  315. config = kmalloc_array(256, sizeof(*config), GFP_KERNEL);
  316. if (!config)
  317. return -ENOMEM;
  318. /* version, increment each time something is added */
  319. config[no_regs++] = 3;
  320. config[no_regs++] = adev->gfx.config.max_shader_engines;
  321. config[no_regs++] = adev->gfx.config.max_tile_pipes;
  322. config[no_regs++] = adev->gfx.config.max_cu_per_sh;
  323. config[no_regs++] = adev->gfx.config.max_sh_per_se;
  324. config[no_regs++] = adev->gfx.config.max_backends_per_se;
  325. config[no_regs++] = adev->gfx.config.max_texture_channel_caches;
  326. config[no_regs++] = adev->gfx.config.max_gprs;
  327. config[no_regs++] = adev->gfx.config.max_gs_threads;
  328. config[no_regs++] = adev->gfx.config.max_hw_contexts;
  329. config[no_regs++] = adev->gfx.config.sc_prim_fifo_size_frontend;
  330. config[no_regs++] = adev->gfx.config.sc_prim_fifo_size_backend;
  331. config[no_regs++] = adev->gfx.config.sc_hiz_tile_fifo_size;
  332. config[no_regs++] = adev->gfx.config.sc_earlyz_tile_fifo_size;
  333. config[no_regs++] = adev->gfx.config.num_tile_pipes;
  334. config[no_regs++] = adev->gfx.config.backend_enable_mask;
  335. config[no_regs++] = adev->gfx.config.mem_max_burst_length_bytes;
  336. config[no_regs++] = adev->gfx.config.mem_row_size_in_kb;
  337. config[no_regs++] = adev->gfx.config.shader_engine_tile_size;
  338. config[no_regs++] = adev->gfx.config.num_gpus;
  339. config[no_regs++] = adev->gfx.config.multi_gpu_tile_size;
  340. config[no_regs++] = adev->gfx.config.mc_arb_ramcfg;
  341. config[no_regs++] = adev->gfx.config.gb_addr_config;
  342. config[no_regs++] = adev->gfx.config.num_rbs;
  343. /* rev==1 */
  344. config[no_regs++] = adev->rev_id;
  345. config[no_regs++] = adev->pg_flags;
  346. config[no_regs++] = adev->cg_flags;
  347. /* rev==2 */
  348. config[no_regs++] = adev->family;
  349. config[no_regs++] = adev->external_rev_id;
  350. /* rev==3 */
  351. config[no_regs++] = adev->pdev->device;
  352. config[no_regs++] = adev->pdev->revision;
  353. config[no_regs++] = adev->pdev->subsystem_device;
  354. config[no_regs++] = adev->pdev->subsystem_vendor;
  355. while (size && (*pos < no_regs * 4)) {
  356. uint32_t value;
  357. value = config[*pos >> 2];
  358. r = put_user(value, (uint32_t *)buf);
  359. if (r) {
  360. kfree(config);
  361. return r;
  362. }
  363. result += 4;
  364. buf += 4;
  365. *pos += 4;
  366. size -= 4;
  367. }
  368. kfree(config);
  369. return result;
  370. }
  371. static ssize_t amdgpu_debugfs_sensor_read(struct file *f, char __user *buf,
  372. size_t size, loff_t *pos)
  373. {
  374. struct amdgpu_device *adev = file_inode(f)->i_private;
  375. int idx, x, outsize, r, valuesize;
  376. uint32_t values[16];
  377. if (size & 3 || *pos & 0x3)
  378. return -EINVAL;
  379. if (amdgpu_dpm == 0)
  380. return -EINVAL;
  381. /* convert offset to sensor number */
  382. idx = *pos >> 2;
  383. valuesize = sizeof(values);
  384. if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->read_sensor)
  385. r = amdgpu_dpm_read_sensor(adev, idx, &values[0], &valuesize);
  386. else
  387. return -EINVAL;
  388. if (size > valuesize)
  389. return -EINVAL;
  390. outsize = 0;
  391. x = 0;
  392. if (!r) {
  393. while (size) {
  394. r = put_user(values[x++], (int32_t *)buf);
  395. buf += 4;
  396. size -= 4;
  397. outsize += 4;
  398. }
  399. }
  400. return !r ? outsize : r;
  401. }
  402. static ssize_t amdgpu_debugfs_wave_read(struct file *f, char __user *buf,
  403. size_t size, loff_t *pos)
  404. {
  405. struct amdgpu_device *adev = f->f_inode->i_private;
  406. int r, x;
  407. ssize_t result=0;
  408. uint32_t offset, se, sh, cu, wave, simd, data[32];
  409. if (size & 3 || *pos & 3)
  410. return -EINVAL;
  411. /* decode offset */
  412. offset = (*pos & GENMASK_ULL(6, 0));
  413. se = (*pos & GENMASK_ULL(14, 7)) >> 7;
  414. sh = (*pos & GENMASK_ULL(22, 15)) >> 15;
  415. cu = (*pos & GENMASK_ULL(30, 23)) >> 23;
  416. wave = (*pos & GENMASK_ULL(36, 31)) >> 31;
  417. simd = (*pos & GENMASK_ULL(44, 37)) >> 37;
  418. /* switch to the specific se/sh/cu */
  419. mutex_lock(&adev->grbm_idx_mutex);
  420. amdgpu_gfx_select_se_sh(adev, se, sh, cu);
  421. x = 0;
  422. if (adev->gfx.funcs->read_wave_data)
  423. adev->gfx.funcs->read_wave_data(adev, simd, wave, data, &x);
  424. amdgpu_gfx_select_se_sh(adev, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF);
  425. mutex_unlock(&adev->grbm_idx_mutex);
  426. if (!x)
  427. return -EINVAL;
  428. while (size && (offset < x * 4)) {
  429. uint32_t value;
  430. value = data[offset >> 2];
  431. r = put_user(value, (uint32_t *)buf);
  432. if (r)
  433. return r;
  434. result += 4;
  435. buf += 4;
  436. offset += 4;
  437. size -= 4;
  438. }
  439. return result;
  440. }
  441. static ssize_t amdgpu_debugfs_gpr_read(struct file *f, char __user *buf,
  442. size_t size, loff_t *pos)
  443. {
  444. struct amdgpu_device *adev = f->f_inode->i_private;
  445. int r;
  446. ssize_t result = 0;
  447. uint32_t offset, se, sh, cu, wave, simd, thread, bank, *data;
  448. if (size & 3 || *pos & 3)
  449. return -EINVAL;
  450. /* decode offset */
  451. offset = *pos & GENMASK_ULL(11, 0);
  452. se = (*pos & GENMASK_ULL(19, 12)) >> 12;
  453. sh = (*pos & GENMASK_ULL(27, 20)) >> 20;
  454. cu = (*pos & GENMASK_ULL(35, 28)) >> 28;
  455. wave = (*pos & GENMASK_ULL(43, 36)) >> 36;
  456. simd = (*pos & GENMASK_ULL(51, 44)) >> 44;
  457. thread = (*pos & GENMASK_ULL(59, 52)) >> 52;
  458. bank = (*pos & GENMASK_ULL(61, 60)) >> 60;
  459. data = kmalloc_array(1024, sizeof(*data), GFP_KERNEL);
  460. if (!data)
  461. return -ENOMEM;
  462. /* switch to the specific se/sh/cu */
  463. mutex_lock(&adev->grbm_idx_mutex);
  464. amdgpu_gfx_select_se_sh(adev, se, sh, cu);
  465. if (bank == 0) {
  466. if (adev->gfx.funcs->read_wave_vgprs)
  467. adev->gfx.funcs->read_wave_vgprs(adev, simd, wave, thread, offset, size>>2, data);
  468. } else {
  469. if (adev->gfx.funcs->read_wave_sgprs)
  470. adev->gfx.funcs->read_wave_sgprs(adev, simd, wave, offset, size>>2, data);
  471. }
  472. amdgpu_gfx_select_se_sh(adev, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF);
  473. mutex_unlock(&adev->grbm_idx_mutex);
  474. while (size) {
  475. uint32_t value;
  476. value = data[offset++];
  477. r = put_user(value, (uint32_t *)buf);
  478. if (r) {
  479. result = r;
  480. goto err;
  481. }
  482. result += 4;
  483. buf += 4;
  484. size -= 4;
  485. }
  486. err:
  487. kfree(data);
  488. return result;
  489. }
  490. static const struct file_operations amdgpu_debugfs_regs_fops = {
  491. .owner = THIS_MODULE,
  492. .read = amdgpu_debugfs_regs_read,
  493. .write = amdgpu_debugfs_regs_write,
  494. .llseek = default_llseek
  495. };
  496. static const struct file_operations amdgpu_debugfs_regs_didt_fops = {
  497. .owner = THIS_MODULE,
  498. .read = amdgpu_debugfs_regs_didt_read,
  499. .write = amdgpu_debugfs_regs_didt_write,
  500. .llseek = default_llseek
  501. };
  502. static const struct file_operations amdgpu_debugfs_regs_pcie_fops = {
  503. .owner = THIS_MODULE,
  504. .read = amdgpu_debugfs_regs_pcie_read,
  505. .write = amdgpu_debugfs_regs_pcie_write,
  506. .llseek = default_llseek
  507. };
  508. static const struct file_operations amdgpu_debugfs_regs_smc_fops = {
  509. .owner = THIS_MODULE,
  510. .read = amdgpu_debugfs_regs_smc_read,
  511. .write = amdgpu_debugfs_regs_smc_write,
  512. .llseek = default_llseek
  513. };
  514. static const struct file_operations amdgpu_debugfs_gca_config_fops = {
  515. .owner = THIS_MODULE,
  516. .read = amdgpu_debugfs_gca_config_read,
  517. .llseek = default_llseek
  518. };
  519. static const struct file_operations amdgpu_debugfs_sensors_fops = {
  520. .owner = THIS_MODULE,
  521. .read = amdgpu_debugfs_sensor_read,
  522. .llseek = default_llseek
  523. };
  524. static const struct file_operations amdgpu_debugfs_wave_fops = {
  525. .owner = THIS_MODULE,
  526. .read = amdgpu_debugfs_wave_read,
  527. .llseek = default_llseek
  528. };
  529. static const struct file_operations amdgpu_debugfs_gpr_fops = {
  530. .owner = THIS_MODULE,
  531. .read = amdgpu_debugfs_gpr_read,
  532. .llseek = default_llseek
  533. };
  534. static const struct file_operations *debugfs_regs[] = {
  535. &amdgpu_debugfs_regs_fops,
  536. &amdgpu_debugfs_regs_didt_fops,
  537. &amdgpu_debugfs_regs_pcie_fops,
  538. &amdgpu_debugfs_regs_smc_fops,
  539. &amdgpu_debugfs_gca_config_fops,
  540. &amdgpu_debugfs_sensors_fops,
  541. &amdgpu_debugfs_wave_fops,
  542. &amdgpu_debugfs_gpr_fops,
  543. };
  544. static const char *debugfs_regs_names[] = {
  545. "amdgpu_regs",
  546. "amdgpu_regs_didt",
  547. "amdgpu_regs_pcie",
  548. "amdgpu_regs_smc",
  549. "amdgpu_gca_config",
  550. "amdgpu_sensors",
  551. "amdgpu_wave",
  552. "amdgpu_gpr",
  553. };
  554. int amdgpu_debugfs_regs_init(struct amdgpu_device *adev)
  555. {
  556. struct drm_minor *minor = adev->ddev->primary;
  557. struct dentry *ent, *root = minor->debugfs_root;
  558. unsigned i, j;
  559. for (i = 0; i < ARRAY_SIZE(debugfs_regs); i++) {
  560. ent = debugfs_create_file(debugfs_regs_names[i],
  561. S_IFREG | S_IRUGO, root,
  562. adev, debugfs_regs[i]);
  563. if (IS_ERR(ent)) {
  564. for (j = 0; j < i; j++) {
  565. debugfs_remove(adev->debugfs_regs[i]);
  566. adev->debugfs_regs[i] = NULL;
  567. }
  568. return PTR_ERR(ent);
  569. }
  570. if (!i)
  571. i_size_write(ent->d_inode, adev->rmmio_size);
  572. adev->debugfs_regs[i] = ent;
  573. }
  574. return 0;
  575. }
  576. void amdgpu_debugfs_regs_cleanup(struct amdgpu_device *adev)
  577. {
  578. unsigned i;
  579. for (i = 0; i < ARRAY_SIZE(debugfs_regs); i++) {
  580. if (adev->debugfs_regs[i]) {
  581. debugfs_remove(adev->debugfs_regs[i]);
  582. adev->debugfs_regs[i] = NULL;
  583. }
  584. }
  585. }
  586. static int amdgpu_debugfs_test_ib(struct seq_file *m, void *data)
  587. {
  588. struct drm_info_node *node = (struct drm_info_node *) m->private;
  589. struct drm_device *dev = node->minor->dev;
  590. struct amdgpu_device *adev = dev->dev_private;
  591. int r = 0, i;
  592. /* hold on the scheduler */
  593. for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
  594. struct amdgpu_ring *ring = adev->rings[i];
  595. if (!ring || !ring->sched.thread)
  596. continue;
  597. kthread_park(ring->sched.thread);
  598. }
  599. seq_printf(m, "run ib test:\n");
  600. r = amdgpu_ib_ring_tests(adev);
  601. if (r)
  602. seq_printf(m, "ib ring tests failed (%d).\n", r);
  603. else
  604. seq_printf(m, "ib ring tests passed.\n");
  605. /* go on the scheduler */
  606. for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
  607. struct amdgpu_ring *ring = adev->rings[i];
  608. if (!ring || !ring->sched.thread)
  609. continue;
  610. kthread_unpark(ring->sched.thread);
  611. }
  612. return 0;
  613. }
  614. static int amdgpu_debugfs_get_vbios_dump(struct seq_file *m, void *data)
  615. {
  616. struct drm_info_node *node = (struct drm_info_node *) m->private;
  617. struct drm_device *dev = node->minor->dev;
  618. struct amdgpu_device *adev = dev->dev_private;
  619. seq_write(m, adev->bios, adev->bios_size);
  620. return 0;
  621. }
  622. static int amdgpu_debugfs_evict_vram(struct seq_file *m, void *data)
  623. {
  624. struct drm_info_node *node = (struct drm_info_node *)m->private;
  625. struct drm_device *dev = node->minor->dev;
  626. struct amdgpu_device *adev = dev->dev_private;
  627. seq_printf(m, "(%d)\n", amdgpu_bo_evict_vram(adev));
  628. return 0;
  629. }
  630. static const struct drm_info_list amdgpu_debugfs_list[] = {
  631. {"amdgpu_vbios", amdgpu_debugfs_get_vbios_dump},
  632. {"amdgpu_test_ib", &amdgpu_debugfs_test_ib},
  633. {"amdgpu_evict_vram", &amdgpu_debugfs_evict_vram}
  634. };
  635. int amdgpu_debugfs_init(struct amdgpu_device *adev)
  636. {
  637. return amdgpu_debugfs_add_files(adev, amdgpu_debugfs_list,
  638. ARRAY_SIZE(amdgpu_debugfs_list));
  639. }
  640. #else
  641. int amdgpu_debugfs_init(struct amdgpu_device *adev)
  642. {
  643. return 0;
  644. }
  645. int amdgpu_debugfs_regs_init(struct amdgpu_device *adev)
  646. {
  647. return 0;
  648. }
  649. void amdgpu_debugfs_regs_cleanup(struct amdgpu_device *adev) { }
  650. #endif