cik_sdma.c 38 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412
  1. /*
  2. * Copyright 2013 Advanced Micro Devices, Inc.
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice shall be included in
  12. * all copies or substantial portions of the Software.
  13. *
  14. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  17. * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18. * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19. * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20. * OTHER DEALINGS IN THE SOFTWARE.
  21. *
  22. * Authors: Alex Deucher
  23. */
  24. #include <linux/firmware.h>
  25. #include <drm/drmP.h>
  26. #include "amdgpu.h"
  27. #include "amdgpu_ucode.h"
  28. #include "amdgpu_trace.h"
  29. #include "cikd.h"
  30. #include "cik.h"
  31. #include "bif/bif_4_1_d.h"
  32. #include "bif/bif_4_1_sh_mask.h"
  33. #include "gca/gfx_7_2_d.h"
  34. #include "gca/gfx_7_2_enum.h"
  35. #include "gca/gfx_7_2_sh_mask.h"
  36. #include "gmc/gmc_7_1_d.h"
  37. #include "gmc/gmc_7_1_sh_mask.h"
  38. #include "oss/oss_2_0_d.h"
  39. #include "oss/oss_2_0_sh_mask.h"
  40. static const u32 sdma_offsets[SDMA_MAX_INSTANCE] =
  41. {
  42. SDMA0_REGISTER_OFFSET,
  43. SDMA1_REGISTER_OFFSET
  44. };
  45. static void cik_sdma_set_ring_funcs(struct amdgpu_device *adev);
  46. static void cik_sdma_set_irq_funcs(struct amdgpu_device *adev);
  47. static void cik_sdma_set_buffer_funcs(struct amdgpu_device *adev);
  48. static void cik_sdma_set_vm_pte_funcs(struct amdgpu_device *adev);
  49. MODULE_FIRMWARE("radeon/bonaire_sdma.bin");
  50. MODULE_FIRMWARE("radeon/bonaire_sdma1.bin");
  51. MODULE_FIRMWARE("radeon/hawaii_sdma.bin");
  52. MODULE_FIRMWARE("radeon/hawaii_sdma1.bin");
  53. MODULE_FIRMWARE("radeon/kaveri_sdma.bin");
  54. MODULE_FIRMWARE("radeon/kaveri_sdma1.bin");
  55. MODULE_FIRMWARE("radeon/kabini_sdma.bin");
  56. MODULE_FIRMWARE("radeon/kabini_sdma1.bin");
  57. MODULE_FIRMWARE("radeon/mullins_sdma.bin");
  58. MODULE_FIRMWARE("radeon/mullins_sdma1.bin");
  59. u32 amdgpu_cik_gpu_check_soft_reset(struct amdgpu_device *adev);
  60. /*
  61. * sDMA - System DMA
  62. * Starting with CIK, the GPU has new asynchronous
  63. * DMA engines. These engines are used for compute
  64. * and gfx. There are two DMA engines (SDMA0, SDMA1)
  65. * and each one supports 1 ring buffer used for gfx
  66. * and 2 queues used for compute.
  67. *
  68. * The programming model is very similar to the CP
  69. * (ring buffer, IBs, etc.), but sDMA has it's own
  70. * packet format that is different from the PM4 format
  71. * used by the CP. sDMA supports copying data, writing
  72. * embedded data, solid fills, and a number of other
  73. * things. It also has support for tiling/detiling of
  74. * buffers.
  75. */
  76. /**
  77. * cik_sdma_init_microcode - load ucode images from disk
  78. *
  79. * @adev: amdgpu_device pointer
  80. *
  81. * Use the firmware interface to load the ucode images into
  82. * the driver (not loaded into hw).
  83. * Returns 0 on success, error on failure.
  84. */
  85. static int cik_sdma_init_microcode(struct amdgpu_device *adev)
  86. {
  87. const char *chip_name;
  88. char fw_name[30];
  89. int err, i;
  90. DRM_DEBUG("\n");
  91. switch (adev->asic_type) {
  92. case CHIP_BONAIRE:
  93. chip_name = "bonaire";
  94. break;
  95. case CHIP_HAWAII:
  96. chip_name = "hawaii";
  97. break;
  98. case CHIP_KAVERI:
  99. chip_name = "kaveri";
  100. break;
  101. case CHIP_KABINI:
  102. chip_name = "kabini";
  103. break;
  104. case CHIP_MULLINS:
  105. chip_name = "mullins";
  106. break;
  107. default: BUG();
  108. }
  109. for (i = 0; i < SDMA_MAX_INSTANCE; i++) {
  110. if (i == 0)
  111. snprintf(fw_name, sizeof(fw_name), "radeon/%s_sdma.bin", chip_name);
  112. else
  113. snprintf(fw_name, sizeof(fw_name), "radeon/%s_sdma1.bin", chip_name);
  114. err = request_firmware(&adev->sdma[i].fw, fw_name, adev->dev);
  115. if (err)
  116. goto out;
  117. err = amdgpu_ucode_validate(adev->sdma[i].fw);
  118. }
  119. out:
  120. if (err) {
  121. printk(KERN_ERR
  122. "cik_sdma: Failed to load firmware \"%s\"\n",
  123. fw_name);
  124. for (i = 0; i < SDMA_MAX_INSTANCE; i++) {
  125. release_firmware(adev->sdma[i].fw);
  126. adev->sdma[i].fw = NULL;
  127. }
  128. }
  129. return err;
  130. }
  131. /**
  132. * cik_sdma_ring_get_rptr - get the current read pointer
  133. *
  134. * @ring: amdgpu ring pointer
  135. *
  136. * Get the current rptr from the hardware (CIK+).
  137. */
  138. static uint32_t cik_sdma_ring_get_rptr(struct amdgpu_ring *ring)
  139. {
  140. u32 rptr;
  141. rptr = ring->adev->wb.wb[ring->rptr_offs];
  142. return (rptr & 0x3fffc) >> 2;
  143. }
  144. /**
  145. * cik_sdma_ring_get_wptr - get the current write pointer
  146. *
  147. * @ring: amdgpu ring pointer
  148. *
  149. * Get the current wptr from the hardware (CIK+).
  150. */
  151. static uint32_t cik_sdma_ring_get_wptr(struct amdgpu_ring *ring)
  152. {
  153. struct amdgpu_device *adev = ring->adev;
  154. u32 me = (ring == &adev->sdma[0].ring) ? 0 : 1;
  155. return (RREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[me]) & 0x3fffc) >> 2;
  156. }
  157. /**
  158. * cik_sdma_ring_set_wptr - commit the write pointer
  159. *
  160. * @ring: amdgpu ring pointer
  161. *
  162. * Write the wptr back to the hardware (CIK+).
  163. */
  164. static void cik_sdma_ring_set_wptr(struct amdgpu_ring *ring)
  165. {
  166. struct amdgpu_device *adev = ring->adev;
  167. u32 me = (ring == &adev->sdma[0].ring) ? 0 : 1;
  168. WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[me], (ring->wptr << 2) & 0x3fffc);
  169. }
  170. /**
  171. * cik_sdma_ring_emit_ib - Schedule an IB on the DMA engine
  172. *
  173. * @ring: amdgpu ring pointer
  174. * @ib: IB object to schedule
  175. *
  176. * Schedule an IB in the DMA ring (CIK).
  177. */
  178. static void cik_sdma_ring_emit_ib(struct amdgpu_ring *ring,
  179. struct amdgpu_ib *ib)
  180. {
  181. u32 extra_bits = (ib->vm ? ib->vm->ids[ring->idx].id : 0) & 0xf;
  182. u32 next_rptr = ring->wptr + 5;
  183. while ((next_rptr & 7) != 4)
  184. next_rptr++;
  185. next_rptr += 4;
  186. amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_WRITE, SDMA_WRITE_SUB_OPCODE_LINEAR, 0));
  187. amdgpu_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
  188. amdgpu_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xffffffff);
  189. amdgpu_ring_write(ring, 1); /* number of DWs to follow */
  190. amdgpu_ring_write(ring, next_rptr);
  191. /* IB packet must end on a 8 DW boundary */
  192. while ((ring->wptr & 7) != 4)
  193. amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0));
  194. amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_INDIRECT_BUFFER, 0, extra_bits));
  195. amdgpu_ring_write(ring, ib->gpu_addr & 0xffffffe0); /* base must be 32 byte aligned */
  196. amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xffffffff);
  197. amdgpu_ring_write(ring, ib->length_dw);
  198. }
  199. /**
  200. * cik_sdma_ring_emit_hdp_flush - emit an hdp flush on the DMA ring
  201. *
  202. * @ring: amdgpu ring pointer
  203. *
  204. * Emit an hdp flush packet on the requested DMA ring.
  205. */
  206. static void cik_sdma_ring_emit_hdp_flush(struct amdgpu_ring *ring)
  207. {
  208. u32 extra_bits = (SDMA_POLL_REG_MEM_EXTRA_OP(1) |
  209. SDMA_POLL_REG_MEM_EXTRA_FUNC(3)); /* == */
  210. u32 ref_and_mask;
  211. if (ring == &ring->adev->sdma[0].ring)
  212. ref_and_mask = GPU_HDP_FLUSH_DONE__SDMA0_MASK;
  213. else
  214. ref_and_mask = GPU_HDP_FLUSH_DONE__SDMA1_MASK;
  215. amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_POLL_REG_MEM, 0, extra_bits));
  216. amdgpu_ring_write(ring, mmGPU_HDP_FLUSH_DONE << 2);
  217. amdgpu_ring_write(ring, mmGPU_HDP_FLUSH_REQ << 2);
  218. amdgpu_ring_write(ring, ref_and_mask); /* reference */
  219. amdgpu_ring_write(ring, ref_and_mask); /* mask */
  220. amdgpu_ring_write(ring, (0xfff << 16) | 10); /* retry count, poll interval */
  221. }
  222. /**
  223. * cik_sdma_ring_emit_fence - emit a fence on the DMA ring
  224. *
  225. * @ring: amdgpu ring pointer
  226. * @fence: amdgpu fence object
  227. *
  228. * Add a DMA fence packet to the ring to write
  229. * the fence seq number and DMA trap packet to generate
  230. * an interrupt if needed (CIK).
  231. */
  232. static void cik_sdma_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
  233. unsigned flags)
  234. {
  235. bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT;
  236. /* write the fence */
  237. amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_FENCE, 0, 0));
  238. amdgpu_ring_write(ring, lower_32_bits(addr));
  239. amdgpu_ring_write(ring, upper_32_bits(addr));
  240. amdgpu_ring_write(ring, lower_32_bits(seq));
  241. /* optionally write high bits as well */
  242. if (write64bit) {
  243. addr += 4;
  244. amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_FENCE, 0, 0));
  245. amdgpu_ring_write(ring, lower_32_bits(addr));
  246. amdgpu_ring_write(ring, upper_32_bits(addr));
  247. amdgpu_ring_write(ring, upper_32_bits(seq));
  248. }
  249. /* generate an interrupt */
  250. amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_TRAP, 0, 0));
  251. }
  252. /**
  253. * cik_sdma_ring_emit_semaphore - emit a semaphore on the dma ring
  254. *
  255. * @ring: amdgpu_ring structure holding ring information
  256. * @semaphore: amdgpu semaphore object
  257. * @emit_wait: wait or signal semaphore
  258. *
  259. * Add a DMA semaphore packet to the ring wait on or signal
  260. * other rings (CIK).
  261. */
  262. static bool cik_sdma_ring_emit_semaphore(struct amdgpu_ring *ring,
  263. struct amdgpu_semaphore *semaphore,
  264. bool emit_wait)
  265. {
  266. u64 addr = semaphore->gpu_addr;
  267. u32 extra_bits = emit_wait ? 0 : SDMA_SEMAPHORE_EXTRA_S;
  268. amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SEMAPHORE, 0, extra_bits));
  269. amdgpu_ring_write(ring, addr & 0xfffffff8);
  270. amdgpu_ring_write(ring, upper_32_bits(addr) & 0xffffffff);
  271. return true;
  272. }
  273. /**
  274. * cik_sdma_gfx_stop - stop the gfx async dma engines
  275. *
  276. * @adev: amdgpu_device pointer
  277. *
  278. * Stop the gfx async dma ring buffers (CIK).
  279. */
  280. static void cik_sdma_gfx_stop(struct amdgpu_device *adev)
  281. {
  282. struct amdgpu_ring *sdma0 = &adev->sdma[0].ring;
  283. struct amdgpu_ring *sdma1 = &adev->sdma[1].ring;
  284. u32 rb_cntl;
  285. int i;
  286. if ((adev->mman.buffer_funcs_ring == sdma0) ||
  287. (adev->mman.buffer_funcs_ring == sdma1))
  288. amdgpu_ttm_set_active_vram_size(adev, adev->mc.visible_vram_size);
  289. for (i = 0; i < SDMA_MAX_INSTANCE; i++) {
  290. rb_cntl = RREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i]);
  291. rb_cntl &= ~SDMA0_GFX_RB_CNTL__RB_ENABLE_MASK;
  292. WREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i], rb_cntl);
  293. WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], 0);
  294. }
  295. sdma0->ready = false;
  296. sdma1->ready = false;
  297. }
  298. /**
  299. * cik_sdma_rlc_stop - stop the compute async dma engines
  300. *
  301. * @adev: amdgpu_device pointer
  302. *
  303. * Stop the compute async dma queues (CIK).
  304. */
  305. static void cik_sdma_rlc_stop(struct amdgpu_device *adev)
  306. {
  307. /* XXX todo */
  308. }
  309. /**
  310. * cik_sdma_enable - stop the async dma engines
  311. *
  312. * @adev: amdgpu_device pointer
  313. * @enable: enable/disable the DMA MEs.
  314. *
  315. * Halt or unhalt the async dma engines (CIK).
  316. */
  317. static void cik_sdma_enable(struct amdgpu_device *adev, bool enable)
  318. {
  319. u32 me_cntl;
  320. int i;
  321. if (enable == false) {
  322. cik_sdma_gfx_stop(adev);
  323. cik_sdma_rlc_stop(adev);
  324. }
  325. for (i = 0; i < SDMA_MAX_INSTANCE; i++) {
  326. me_cntl = RREG32(mmSDMA0_F32_CNTL + sdma_offsets[i]);
  327. if (enable)
  328. me_cntl &= ~SDMA0_F32_CNTL__HALT_MASK;
  329. else
  330. me_cntl |= SDMA0_F32_CNTL__HALT_MASK;
  331. WREG32(mmSDMA0_F32_CNTL + sdma_offsets[i], me_cntl);
  332. }
  333. }
  334. /**
  335. * cik_sdma_gfx_resume - setup and start the async dma engines
  336. *
  337. * @adev: amdgpu_device pointer
  338. *
  339. * Set up the gfx DMA ring buffers and enable them (CIK).
  340. * Returns 0 for success, error for failure.
  341. */
  342. static int cik_sdma_gfx_resume(struct amdgpu_device *adev)
  343. {
  344. struct amdgpu_ring *ring;
  345. u32 rb_cntl, ib_cntl;
  346. u32 rb_bufsz;
  347. u32 wb_offset;
  348. int i, j, r;
  349. for (i = 0; i < SDMA_MAX_INSTANCE; i++) {
  350. ring = &adev->sdma[i].ring;
  351. wb_offset = (ring->rptr_offs * 4);
  352. mutex_lock(&adev->srbm_mutex);
  353. for (j = 0; j < 16; j++) {
  354. cik_srbm_select(adev, 0, 0, 0, j);
  355. /* SDMA GFX */
  356. WREG32(mmSDMA0_GFX_VIRTUAL_ADDR + sdma_offsets[i], 0);
  357. WREG32(mmSDMA0_GFX_APE1_CNTL + sdma_offsets[i], 0);
  358. /* XXX SDMA RLC - todo */
  359. }
  360. cik_srbm_select(adev, 0, 0, 0, 0);
  361. mutex_unlock(&adev->srbm_mutex);
  362. WREG32(mmSDMA0_SEM_INCOMPLETE_TIMER_CNTL + sdma_offsets[i], 0);
  363. WREG32(mmSDMA0_SEM_WAIT_FAIL_TIMER_CNTL + sdma_offsets[i], 0);
  364. /* Set ring buffer size in dwords */
  365. rb_bufsz = order_base_2(ring->ring_size / 4);
  366. rb_cntl = rb_bufsz << 1;
  367. #ifdef __BIG_ENDIAN
  368. rb_cntl |= SDMA0_GFX_RB_CNTL__RB_SWAP_ENABLE_MASK |
  369. SDMA0_GFX_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK;
  370. #endif
  371. WREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i], rb_cntl);
  372. /* Initialize the ring buffer's read and write pointers */
  373. WREG32(mmSDMA0_GFX_RB_RPTR + sdma_offsets[i], 0);
  374. WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[i], 0);
  375. /* set the wb address whether it's enabled or not */
  376. WREG32(mmSDMA0_GFX_RB_RPTR_ADDR_HI + sdma_offsets[i],
  377. upper_32_bits(adev->wb.gpu_addr + wb_offset) & 0xFFFFFFFF);
  378. WREG32(mmSDMA0_GFX_RB_RPTR_ADDR_LO + sdma_offsets[i],
  379. ((adev->wb.gpu_addr + wb_offset) & 0xFFFFFFFC));
  380. rb_cntl |= SDMA0_GFX_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK;
  381. WREG32(mmSDMA0_GFX_RB_BASE + sdma_offsets[i], ring->gpu_addr >> 8);
  382. WREG32(mmSDMA0_GFX_RB_BASE_HI + sdma_offsets[i], ring->gpu_addr >> 40);
  383. ring->wptr = 0;
  384. WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[i], ring->wptr << 2);
  385. /* enable DMA RB */
  386. WREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i],
  387. rb_cntl | SDMA0_GFX_RB_CNTL__RB_ENABLE_MASK);
  388. ib_cntl = SDMA0_GFX_IB_CNTL__IB_ENABLE_MASK;
  389. #ifdef __BIG_ENDIAN
  390. ib_cntl |= SDMA0_GFX_IB_CNTL__IB_SWAP_ENABLE_MASK;
  391. #endif
  392. /* enable DMA IBs */
  393. WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], ib_cntl);
  394. ring->ready = true;
  395. r = amdgpu_ring_test_ring(ring);
  396. if (r) {
  397. ring->ready = false;
  398. return r;
  399. }
  400. if (adev->mman.buffer_funcs_ring == ring)
  401. amdgpu_ttm_set_active_vram_size(adev, adev->mc.real_vram_size);
  402. }
  403. return 0;
  404. }
  405. /**
  406. * cik_sdma_rlc_resume - setup and start the async dma engines
  407. *
  408. * @adev: amdgpu_device pointer
  409. *
  410. * Set up the compute DMA queues and enable them (CIK).
  411. * Returns 0 for success, error for failure.
  412. */
  413. static int cik_sdma_rlc_resume(struct amdgpu_device *adev)
  414. {
  415. /* XXX todo */
  416. return 0;
  417. }
  418. /**
  419. * cik_sdma_load_microcode - load the sDMA ME ucode
  420. *
  421. * @adev: amdgpu_device pointer
  422. *
  423. * Loads the sDMA0/1 ucode.
  424. * Returns 0 for success, -EINVAL if the ucode is not available.
  425. */
  426. static int cik_sdma_load_microcode(struct amdgpu_device *adev)
  427. {
  428. const struct sdma_firmware_header_v1_0 *hdr;
  429. const __le32 *fw_data;
  430. u32 fw_size;
  431. int i, j;
  432. if (!adev->sdma[0].fw || !adev->sdma[1].fw)
  433. return -EINVAL;
  434. /* halt the MEs */
  435. cik_sdma_enable(adev, false);
  436. for (i = 0; i < SDMA_MAX_INSTANCE; i++) {
  437. hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma[i].fw->data;
  438. amdgpu_ucode_print_sdma_hdr(&hdr->header);
  439. fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
  440. adev->sdma[i].fw_version = le32_to_cpu(hdr->header.ucode_version);
  441. adev->sdma[i].feature_version = le32_to_cpu(hdr->ucode_feature_version);
  442. if (adev->sdma[i].feature_version >= 20)
  443. adev->sdma[i].burst_nop = true;
  444. fw_data = (const __le32 *)
  445. (adev->sdma[i].fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
  446. WREG32(mmSDMA0_UCODE_ADDR + sdma_offsets[i], 0);
  447. for (j = 0; j < fw_size; j++)
  448. WREG32(mmSDMA0_UCODE_DATA + sdma_offsets[i], le32_to_cpup(fw_data++));
  449. WREG32(mmSDMA0_UCODE_ADDR + sdma_offsets[i], adev->sdma[i].fw_version);
  450. }
  451. return 0;
  452. }
  453. /**
  454. * cik_sdma_start - setup and start the async dma engines
  455. *
  456. * @adev: amdgpu_device pointer
  457. *
  458. * Set up the DMA engines and enable them (CIK).
  459. * Returns 0 for success, error for failure.
  460. */
  461. static int cik_sdma_start(struct amdgpu_device *adev)
  462. {
  463. int r;
  464. r = cik_sdma_load_microcode(adev);
  465. if (r)
  466. return r;
  467. /* unhalt the MEs */
  468. cik_sdma_enable(adev, true);
  469. /* start the gfx rings and rlc compute queues */
  470. r = cik_sdma_gfx_resume(adev);
  471. if (r)
  472. return r;
  473. r = cik_sdma_rlc_resume(adev);
  474. if (r)
  475. return r;
  476. return 0;
  477. }
  478. /**
  479. * cik_sdma_ring_test_ring - simple async dma engine test
  480. *
  481. * @ring: amdgpu_ring structure holding ring information
  482. *
  483. * Test the DMA engine by writing using it to write an
  484. * value to memory. (CIK).
  485. * Returns 0 for success, error for failure.
  486. */
  487. static int cik_sdma_ring_test_ring(struct amdgpu_ring *ring)
  488. {
  489. struct amdgpu_device *adev = ring->adev;
  490. unsigned i;
  491. unsigned index;
  492. int r;
  493. u32 tmp;
  494. u64 gpu_addr;
  495. r = amdgpu_wb_get(adev, &index);
  496. if (r) {
  497. dev_err(adev->dev, "(%d) failed to allocate wb slot\n", r);
  498. return r;
  499. }
  500. gpu_addr = adev->wb.gpu_addr + (index * 4);
  501. tmp = 0xCAFEDEAD;
  502. adev->wb.wb[index] = cpu_to_le32(tmp);
  503. r = amdgpu_ring_lock(ring, 5);
  504. if (r) {
  505. DRM_ERROR("amdgpu: dma failed to lock ring %d (%d).\n", ring->idx, r);
  506. amdgpu_wb_free(adev, index);
  507. return r;
  508. }
  509. amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_WRITE, SDMA_WRITE_SUB_OPCODE_LINEAR, 0));
  510. amdgpu_ring_write(ring, lower_32_bits(gpu_addr));
  511. amdgpu_ring_write(ring, upper_32_bits(gpu_addr));
  512. amdgpu_ring_write(ring, 1); /* number of DWs to follow */
  513. amdgpu_ring_write(ring, 0xDEADBEEF);
  514. amdgpu_ring_unlock_commit(ring);
  515. for (i = 0; i < adev->usec_timeout; i++) {
  516. tmp = le32_to_cpu(adev->wb.wb[index]);
  517. if (tmp == 0xDEADBEEF)
  518. break;
  519. DRM_UDELAY(1);
  520. }
  521. if (i < adev->usec_timeout) {
  522. DRM_INFO("ring test on %d succeeded in %d usecs\n", ring->idx, i);
  523. } else {
  524. DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n",
  525. ring->idx, tmp);
  526. r = -EINVAL;
  527. }
  528. amdgpu_wb_free(adev, index);
  529. return r;
  530. }
  531. /**
  532. * cik_sdma_ring_test_ib - test an IB on the DMA engine
  533. *
  534. * @ring: amdgpu_ring structure holding ring information
  535. *
  536. * Test a simple IB in the DMA ring (CIK).
  537. * Returns 0 on success, error on failure.
  538. */
  539. static int cik_sdma_ring_test_ib(struct amdgpu_ring *ring)
  540. {
  541. struct amdgpu_device *adev = ring->adev;
  542. struct amdgpu_ib ib;
  543. struct fence *f = NULL;
  544. unsigned i;
  545. unsigned index;
  546. int r;
  547. u32 tmp = 0;
  548. u64 gpu_addr;
  549. r = amdgpu_wb_get(adev, &index);
  550. if (r) {
  551. dev_err(adev->dev, "(%d) failed to allocate wb slot\n", r);
  552. return r;
  553. }
  554. gpu_addr = adev->wb.gpu_addr + (index * 4);
  555. tmp = 0xCAFEDEAD;
  556. adev->wb.wb[index] = cpu_to_le32(tmp);
  557. memset(&ib, 0, sizeof(ib));
  558. r = amdgpu_ib_get(ring, NULL, 256, &ib);
  559. if (r) {
  560. DRM_ERROR("amdgpu: failed to get ib (%d).\n", r);
  561. goto err0;
  562. }
  563. ib.ptr[0] = SDMA_PACKET(SDMA_OPCODE_WRITE, SDMA_WRITE_SUB_OPCODE_LINEAR, 0);
  564. ib.ptr[1] = lower_32_bits(gpu_addr);
  565. ib.ptr[2] = upper_32_bits(gpu_addr);
  566. ib.ptr[3] = 1;
  567. ib.ptr[4] = 0xDEADBEEF;
  568. ib.length_dw = 5;
  569. r = amdgpu_sched_ib_submit_kernel_helper(adev, ring, &ib, 1, NULL,
  570. AMDGPU_FENCE_OWNER_UNDEFINED,
  571. &f);
  572. if (r)
  573. goto err1;
  574. r = fence_wait(f, false);
  575. if (r) {
  576. DRM_ERROR("amdgpu: fence wait failed (%d).\n", r);
  577. goto err1;
  578. }
  579. for (i = 0; i < adev->usec_timeout; i++) {
  580. tmp = le32_to_cpu(adev->wb.wb[index]);
  581. if (tmp == 0xDEADBEEF)
  582. break;
  583. DRM_UDELAY(1);
  584. }
  585. if (i < adev->usec_timeout) {
  586. DRM_INFO("ib test on ring %d succeeded in %u usecs\n",
  587. ring->idx, i);
  588. goto err1;
  589. } else {
  590. DRM_ERROR("amdgpu: ib test failed (0x%08X)\n", tmp);
  591. r = -EINVAL;
  592. }
  593. err1:
  594. fence_put(f);
  595. amdgpu_ib_free(adev, &ib);
  596. err0:
  597. amdgpu_wb_free(adev, index);
  598. return r;
  599. }
  600. /**
  601. * cik_sdma_vm_copy_pages - update PTEs by copying them from the GART
  602. *
  603. * @ib: indirect buffer to fill with commands
  604. * @pe: addr of the page entry
  605. * @src: src addr to copy from
  606. * @count: number of page entries to update
  607. *
  608. * Update PTEs by copying them from the GART using sDMA (CIK).
  609. */
  610. static void cik_sdma_vm_copy_pte(struct amdgpu_ib *ib,
  611. uint64_t pe, uint64_t src,
  612. unsigned count)
  613. {
  614. while (count) {
  615. unsigned bytes = count * 8;
  616. if (bytes > 0x1FFFF8)
  617. bytes = 0x1FFFF8;
  618. ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_COPY,
  619. SDMA_WRITE_SUB_OPCODE_LINEAR, 0);
  620. ib->ptr[ib->length_dw++] = bytes;
  621. ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */
  622. ib->ptr[ib->length_dw++] = lower_32_bits(src);
  623. ib->ptr[ib->length_dw++] = upper_32_bits(src);
  624. ib->ptr[ib->length_dw++] = lower_32_bits(pe);
  625. ib->ptr[ib->length_dw++] = upper_32_bits(pe);
  626. pe += bytes;
  627. src += bytes;
  628. count -= bytes / 8;
  629. }
  630. }
  631. /**
  632. * cik_sdma_vm_write_pages - update PTEs by writing them manually
  633. *
  634. * @ib: indirect buffer to fill with commands
  635. * @pe: addr of the page entry
  636. * @addr: dst addr to write into pe
  637. * @count: number of page entries to update
  638. * @incr: increase next addr by incr bytes
  639. * @flags: access flags
  640. *
  641. * Update PTEs by writing them manually using sDMA (CIK).
  642. */
  643. static void cik_sdma_vm_write_pte(struct amdgpu_ib *ib,
  644. uint64_t pe,
  645. uint64_t addr, unsigned count,
  646. uint32_t incr, uint32_t flags)
  647. {
  648. uint64_t value;
  649. unsigned ndw;
  650. while (count) {
  651. ndw = count * 2;
  652. if (ndw > 0xFFFFE)
  653. ndw = 0xFFFFE;
  654. /* for non-physically contiguous pages (system) */
  655. ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_WRITE,
  656. SDMA_WRITE_SUB_OPCODE_LINEAR, 0);
  657. ib->ptr[ib->length_dw++] = pe;
  658. ib->ptr[ib->length_dw++] = upper_32_bits(pe);
  659. ib->ptr[ib->length_dw++] = ndw;
  660. for (; ndw > 0; ndw -= 2, --count, pe += 8) {
  661. if (flags & AMDGPU_PTE_SYSTEM) {
  662. value = amdgpu_vm_map_gart(ib->ring->adev, addr);
  663. value &= 0xFFFFFFFFFFFFF000ULL;
  664. } else if (flags & AMDGPU_PTE_VALID) {
  665. value = addr;
  666. } else {
  667. value = 0;
  668. }
  669. addr += incr;
  670. value |= flags;
  671. ib->ptr[ib->length_dw++] = value;
  672. ib->ptr[ib->length_dw++] = upper_32_bits(value);
  673. }
  674. }
  675. }
  676. /**
  677. * cik_sdma_vm_set_pages - update the page tables using sDMA
  678. *
  679. * @ib: indirect buffer to fill with commands
  680. * @pe: addr of the page entry
  681. * @addr: dst addr to write into pe
  682. * @count: number of page entries to update
  683. * @incr: increase next addr by incr bytes
  684. * @flags: access flags
  685. *
  686. * Update the page tables using sDMA (CIK).
  687. */
  688. static void cik_sdma_vm_set_pte_pde(struct amdgpu_ib *ib,
  689. uint64_t pe,
  690. uint64_t addr, unsigned count,
  691. uint32_t incr, uint32_t flags)
  692. {
  693. uint64_t value;
  694. unsigned ndw;
  695. while (count) {
  696. ndw = count;
  697. if (ndw > 0x7FFFF)
  698. ndw = 0x7FFFF;
  699. if (flags & AMDGPU_PTE_VALID)
  700. value = addr;
  701. else
  702. value = 0;
  703. /* for physically contiguous pages (vram) */
  704. ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_GENERATE_PTE_PDE, 0, 0);
  705. ib->ptr[ib->length_dw++] = pe; /* dst addr */
  706. ib->ptr[ib->length_dw++] = upper_32_bits(pe);
  707. ib->ptr[ib->length_dw++] = flags; /* mask */
  708. ib->ptr[ib->length_dw++] = 0;
  709. ib->ptr[ib->length_dw++] = value; /* value */
  710. ib->ptr[ib->length_dw++] = upper_32_bits(value);
  711. ib->ptr[ib->length_dw++] = incr; /* increment size */
  712. ib->ptr[ib->length_dw++] = 0;
  713. ib->ptr[ib->length_dw++] = ndw; /* number of entries */
  714. pe += ndw * 8;
  715. addr += ndw * incr;
  716. count -= ndw;
  717. }
  718. }
  719. /**
  720. * cik_sdma_vm_pad_ib - pad the IB to the required number of dw
  721. *
  722. * @ib: indirect buffer to fill with padding
  723. *
  724. */
  725. static void cik_sdma_vm_pad_ib(struct amdgpu_ib *ib)
  726. {
  727. while (ib->length_dw & 0x7)
  728. ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0);
  729. }
  730. /**
  731. * cik_sdma_ring_emit_vm_flush - cik vm flush using sDMA
  732. *
  733. * @ring: amdgpu_ring pointer
  734. * @vm: amdgpu_vm pointer
  735. *
  736. * Update the page table base and flush the VM TLB
  737. * using sDMA (CIK).
  738. */
  739. static void cik_sdma_ring_emit_vm_flush(struct amdgpu_ring *ring,
  740. unsigned vm_id, uint64_t pd_addr)
  741. {
  742. u32 extra_bits = (SDMA_POLL_REG_MEM_EXTRA_OP(0) |
  743. SDMA_POLL_REG_MEM_EXTRA_FUNC(0)); /* always */
  744. amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
  745. if (vm_id < 8) {
  746. amdgpu_ring_write(ring, (mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vm_id));
  747. } else {
  748. amdgpu_ring_write(ring, (mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + vm_id - 8));
  749. }
  750. amdgpu_ring_write(ring, pd_addr >> 12);
  751. /* flush TLB */
  752. amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
  753. amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST);
  754. amdgpu_ring_write(ring, 1 << vm_id);
  755. amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_POLL_REG_MEM, 0, extra_bits));
  756. amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST << 2);
  757. amdgpu_ring_write(ring, 0);
  758. amdgpu_ring_write(ring, 0); /* reference */
  759. amdgpu_ring_write(ring, 0); /* mask */
  760. amdgpu_ring_write(ring, (0xfff << 16) | 10); /* retry count, poll interval */
  761. }
  762. static void cik_enable_sdma_mgcg(struct amdgpu_device *adev,
  763. bool enable)
  764. {
  765. u32 orig, data;
  766. if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_SDMA_MGCG)) {
  767. WREG32(mmSDMA0_CLK_CTRL + SDMA0_REGISTER_OFFSET, 0x00000100);
  768. WREG32(mmSDMA0_CLK_CTRL + SDMA1_REGISTER_OFFSET, 0x00000100);
  769. } else {
  770. orig = data = RREG32(mmSDMA0_CLK_CTRL + SDMA0_REGISTER_OFFSET);
  771. data |= 0xff000000;
  772. if (data != orig)
  773. WREG32(mmSDMA0_CLK_CTRL + SDMA0_REGISTER_OFFSET, data);
  774. orig = data = RREG32(mmSDMA0_CLK_CTRL + SDMA1_REGISTER_OFFSET);
  775. data |= 0xff000000;
  776. if (data != orig)
  777. WREG32(mmSDMA0_CLK_CTRL + SDMA1_REGISTER_OFFSET, data);
  778. }
  779. }
  780. static void cik_enable_sdma_mgls(struct amdgpu_device *adev,
  781. bool enable)
  782. {
  783. u32 orig, data;
  784. if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_SDMA_LS)) {
  785. orig = data = RREG32(mmSDMA0_POWER_CNTL + SDMA0_REGISTER_OFFSET);
  786. data |= 0x100;
  787. if (orig != data)
  788. WREG32(mmSDMA0_POWER_CNTL + SDMA0_REGISTER_OFFSET, data);
  789. orig = data = RREG32(mmSDMA0_POWER_CNTL + SDMA1_REGISTER_OFFSET);
  790. data |= 0x100;
  791. if (orig != data)
  792. WREG32(mmSDMA0_POWER_CNTL + SDMA1_REGISTER_OFFSET, data);
  793. } else {
  794. orig = data = RREG32(mmSDMA0_POWER_CNTL + SDMA0_REGISTER_OFFSET);
  795. data &= ~0x100;
  796. if (orig != data)
  797. WREG32(mmSDMA0_POWER_CNTL + SDMA0_REGISTER_OFFSET, data);
  798. orig = data = RREG32(mmSDMA0_POWER_CNTL + SDMA1_REGISTER_OFFSET);
  799. data &= ~0x100;
  800. if (orig != data)
  801. WREG32(mmSDMA0_POWER_CNTL + SDMA1_REGISTER_OFFSET, data);
  802. }
  803. }
  804. static int cik_sdma_early_init(void *handle)
  805. {
  806. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  807. cik_sdma_set_ring_funcs(adev);
  808. cik_sdma_set_irq_funcs(adev);
  809. cik_sdma_set_buffer_funcs(adev);
  810. cik_sdma_set_vm_pte_funcs(adev);
  811. return 0;
  812. }
  813. static int cik_sdma_sw_init(void *handle)
  814. {
  815. struct amdgpu_ring *ring;
  816. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  817. int r;
  818. r = cik_sdma_init_microcode(adev);
  819. if (r) {
  820. DRM_ERROR("Failed to load sdma firmware!\n");
  821. return r;
  822. }
  823. /* SDMA trap event */
  824. r = amdgpu_irq_add_id(adev, 224, &adev->sdma_trap_irq);
  825. if (r)
  826. return r;
  827. /* SDMA Privileged inst */
  828. r = amdgpu_irq_add_id(adev, 241, &adev->sdma_illegal_inst_irq);
  829. if (r)
  830. return r;
  831. /* SDMA Privileged inst */
  832. r = amdgpu_irq_add_id(adev, 247, &adev->sdma_illegal_inst_irq);
  833. if (r)
  834. return r;
  835. ring = &adev->sdma[0].ring;
  836. ring->ring_obj = NULL;
  837. ring = &adev->sdma[1].ring;
  838. ring->ring_obj = NULL;
  839. ring = &adev->sdma[0].ring;
  840. sprintf(ring->name, "sdma0");
  841. r = amdgpu_ring_init(adev, ring, 256 * 1024,
  842. SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0), 0xf,
  843. &adev->sdma_trap_irq, AMDGPU_SDMA_IRQ_TRAP0,
  844. AMDGPU_RING_TYPE_SDMA);
  845. if (r)
  846. return r;
  847. ring = &adev->sdma[1].ring;
  848. sprintf(ring->name, "sdma1");
  849. r = amdgpu_ring_init(adev, ring, 256 * 1024,
  850. SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0), 0xf,
  851. &adev->sdma_trap_irq, AMDGPU_SDMA_IRQ_TRAP1,
  852. AMDGPU_RING_TYPE_SDMA);
  853. if (r)
  854. return r;
  855. return r;
  856. }
  857. static int cik_sdma_sw_fini(void *handle)
  858. {
  859. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  860. amdgpu_ring_fini(&adev->sdma[0].ring);
  861. amdgpu_ring_fini(&adev->sdma[1].ring);
  862. return 0;
  863. }
  864. static int cik_sdma_hw_init(void *handle)
  865. {
  866. int r;
  867. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  868. r = cik_sdma_start(adev);
  869. if (r)
  870. return r;
  871. return r;
  872. }
  873. static int cik_sdma_hw_fini(void *handle)
  874. {
  875. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  876. cik_sdma_enable(adev, false);
  877. return 0;
  878. }
  879. static int cik_sdma_suspend(void *handle)
  880. {
  881. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  882. return cik_sdma_hw_fini(adev);
  883. }
  884. static int cik_sdma_resume(void *handle)
  885. {
  886. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  887. return cik_sdma_hw_init(adev);
  888. }
  889. static bool cik_sdma_is_idle(void *handle)
  890. {
  891. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  892. u32 tmp = RREG32(mmSRBM_STATUS2);
  893. if (tmp & (SRBM_STATUS2__SDMA_BUSY_MASK |
  894. SRBM_STATUS2__SDMA1_BUSY_MASK))
  895. return false;
  896. return true;
  897. }
  898. static int cik_sdma_wait_for_idle(void *handle)
  899. {
  900. unsigned i;
  901. u32 tmp;
  902. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  903. for (i = 0; i < adev->usec_timeout; i++) {
  904. tmp = RREG32(mmSRBM_STATUS2) & (SRBM_STATUS2__SDMA_BUSY_MASK |
  905. SRBM_STATUS2__SDMA1_BUSY_MASK);
  906. if (!tmp)
  907. return 0;
  908. udelay(1);
  909. }
  910. return -ETIMEDOUT;
  911. }
  912. static void cik_sdma_print_status(void *handle)
  913. {
  914. int i, j;
  915. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  916. dev_info(adev->dev, "CIK SDMA registers\n");
  917. dev_info(adev->dev, " SRBM_STATUS2=0x%08X\n",
  918. RREG32(mmSRBM_STATUS2));
  919. for (i = 0; i < SDMA_MAX_INSTANCE; i++) {
  920. dev_info(adev->dev, " SDMA%d_STATUS_REG=0x%08X\n",
  921. i, RREG32(mmSDMA0_STATUS_REG + sdma_offsets[i]));
  922. dev_info(adev->dev, " SDMA%d_ME_CNTL=0x%08X\n",
  923. i, RREG32(mmSDMA0_F32_CNTL + sdma_offsets[i]));
  924. dev_info(adev->dev, " SDMA%d_CNTL=0x%08X\n",
  925. i, RREG32(mmSDMA0_CNTL + sdma_offsets[i]));
  926. dev_info(adev->dev, " SDMA%d_SEM_INCOMPLETE_TIMER_CNTL=0x%08X\n",
  927. i, RREG32(mmSDMA0_SEM_INCOMPLETE_TIMER_CNTL + sdma_offsets[i]));
  928. dev_info(adev->dev, " SDMA%d_SEM_WAIT_FAIL_TIMER_CNTL=0x%08X\n",
  929. i, RREG32(mmSDMA0_SEM_WAIT_FAIL_TIMER_CNTL + sdma_offsets[i]));
  930. dev_info(adev->dev, " SDMA%d_GFX_IB_CNTL=0x%08X\n",
  931. i, RREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i]));
  932. dev_info(adev->dev, " SDMA%d_GFX_RB_CNTL=0x%08X\n",
  933. i, RREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i]));
  934. dev_info(adev->dev, " SDMA%d_GFX_RB_RPTR=0x%08X\n",
  935. i, RREG32(mmSDMA0_GFX_RB_RPTR + sdma_offsets[i]));
  936. dev_info(adev->dev, " SDMA%d_GFX_RB_WPTR=0x%08X\n",
  937. i, RREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[i]));
  938. dev_info(adev->dev, " SDMA%d_GFX_RB_RPTR_ADDR_HI=0x%08X\n",
  939. i, RREG32(mmSDMA0_GFX_RB_RPTR_ADDR_HI + sdma_offsets[i]));
  940. dev_info(adev->dev, " SDMA%d_GFX_RB_RPTR_ADDR_LO=0x%08X\n",
  941. i, RREG32(mmSDMA0_GFX_RB_RPTR_ADDR_LO + sdma_offsets[i]));
  942. dev_info(adev->dev, " SDMA%d_GFX_RB_BASE=0x%08X\n",
  943. i, RREG32(mmSDMA0_GFX_RB_BASE + sdma_offsets[i]));
  944. dev_info(adev->dev, " SDMA%d_GFX_RB_BASE_HI=0x%08X\n",
  945. i, RREG32(mmSDMA0_GFX_RB_BASE_HI + sdma_offsets[i]));
  946. mutex_lock(&adev->srbm_mutex);
  947. for (j = 0; j < 16; j++) {
  948. cik_srbm_select(adev, 0, 0, 0, j);
  949. dev_info(adev->dev, " VM %d:\n", j);
  950. dev_info(adev->dev, " SDMA0_GFX_VIRTUAL_ADDR=0x%08X\n",
  951. RREG32(mmSDMA0_GFX_VIRTUAL_ADDR + sdma_offsets[i]));
  952. dev_info(adev->dev, " SDMA0_GFX_APE1_CNTL=0x%08X\n",
  953. RREG32(mmSDMA0_GFX_APE1_CNTL + sdma_offsets[i]));
  954. }
  955. cik_srbm_select(adev, 0, 0, 0, 0);
  956. mutex_unlock(&adev->srbm_mutex);
  957. }
  958. }
  959. static int cik_sdma_soft_reset(void *handle)
  960. {
  961. u32 srbm_soft_reset = 0;
  962. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  963. u32 tmp = RREG32(mmSRBM_STATUS2);
  964. if (tmp & SRBM_STATUS2__SDMA_BUSY_MASK) {
  965. /* sdma0 */
  966. tmp = RREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET);
  967. tmp |= SDMA0_F32_CNTL__HALT_MASK;
  968. WREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET, tmp);
  969. srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_SDMA_MASK;
  970. }
  971. if (tmp & SRBM_STATUS2__SDMA1_BUSY_MASK) {
  972. /* sdma1 */
  973. tmp = RREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET);
  974. tmp |= SDMA0_F32_CNTL__HALT_MASK;
  975. WREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET, tmp);
  976. srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_SDMA1_MASK;
  977. }
  978. if (srbm_soft_reset) {
  979. cik_sdma_print_status((void *)adev);
  980. tmp = RREG32(mmSRBM_SOFT_RESET);
  981. tmp |= srbm_soft_reset;
  982. dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
  983. WREG32(mmSRBM_SOFT_RESET, tmp);
  984. tmp = RREG32(mmSRBM_SOFT_RESET);
  985. udelay(50);
  986. tmp &= ~srbm_soft_reset;
  987. WREG32(mmSRBM_SOFT_RESET, tmp);
  988. tmp = RREG32(mmSRBM_SOFT_RESET);
  989. /* Wait a little for things to settle down */
  990. udelay(50);
  991. cik_sdma_print_status((void *)adev);
  992. }
  993. return 0;
  994. }
  995. static int cik_sdma_set_trap_irq_state(struct amdgpu_device *adev,
  996. struct amdgpu_irq_src *src,
  997. unsigned type,
  998. enum amdgpu_interrupt_state state)
  999. {
  1000. u32 sdma_cntl;
  1001. switch (type) {
  1002. case AMDGPU_SDMA_IRQ_TRAP0:
  1003. switch (state) {
  1004. case AMDGPU_IRQ_STATE_DISABLE:
  1005. sdma_cntl = RREG32(mmSDMA0_CNTL + SDMA0_REGISTER_OFFSET);
  1006. sdma_cntl &= ~SDMA0_CNTL__TRAP_ENABLE_MASK;
  1007. WREG32(mmSDMA0_CNTL + SDMA0_REGISTER_OFFSET, sdma_cntl);
  1008. break;
  1009. case AMDGPU_IRQ_STATE_ENABLE:
  1010. sdma_cntl = RREG32(mmSDMA0_CNTL + SDMA0_REGISTER_OFFSET);
  1011. sdma_cntl |= SDMA0_CNTL__TRAP_ENABLE_MASK;
  1012. WREG32(mmSDMA0_CNTL + SDMA0_REGISTER_OFFSET, sdma_cntl);
  1013. break;
  1014. default:
  1015. break;
  1016. }
  1017. break;
  1018. case AMDGPU_SDMA_IRQ_TRAP1:
  1019. switch (state) {
  1020. case AMDGPU_IRQ_STATE_DISABLE:
  1021. sdma_cntl = RREG32(mmSDMA0_CNTL + SDMA1_REGISTER_OFFSET);
  1022. sdma_cntl &= ~SDMA0_CNTL__TRAP_ENABLE_MASK;
  1023. WREG32(mmSDMA0_CNTL + SDMA1_REGISTER_OFFSET, sdma_cntl);
  1024. break;
  1025. case AMDGPU_IRQ_STATE_ENABLE:
  1026. sdma_cntl = RREG32(mmSDMA0_CNTL + SDMA1_REGISTER_OFFSET);
  1027. sdma_cntl |= SDMA0_CNTL__TRAP_ENABLE_MASK;
  1028. WREG32(mmSDMA0_CNTL + SDMA1_REGISTER_OFFSET, sdma_cntl);
  1029. break;
  1030. default:
  1031. break;
  1032. }
  1033. break;
  1034. default:
  1035. break;
  1036. }
  1037. return 0;
  1038. }
  1039. static int cik_sdma_process_trap_irq(struct amdgpu_device *adev,
  1040. struct amdgpu_irq_src *source,
  1041. struct amdgpu_iv_entry *entry)
  1042. {
  1043. u8 instance_id, queue_id;
  1044. instance_id = (entry->ring_id & 0x3) >> 0;
  1045. queue_id = (entry->ring_id & 0xc) >> 2;
  1046. DRM_DEBUG("IH: SDMA trap\n");
  1047. switch (instance_id) {
  1048. case 0:
  1049. switch (queue_id) {
  1050. case 0:
  1051. amdgpu_fence_process(&adev->sdma[0].ring);
  1052. break;
  1053. case 1:
  1054. /* XXX compute */
  1055. break;
  1056. case 2:
  1057. /* XXX compute */
  1058. break;
  1059. }
  1060. break;
  1061. case 1:
  1062. switch (queue_id) {
  1063. case 0:
  1064. amdgpu_fence_process(&adev->sdma[1].ring);
  1065. break;
  1066. case 1:
  1067. /* XXX compute */
  1068. break;
  1069. case 2:
  1070. /* XXX compute */
  1071. break;
  1072. }
  1073. break;
  1074. }
  1075. return 0;
  1076. }
  1077. static int cik_sdma_process_illegal_inst_irq(struct amdgpu_device *adev,
  1078. struct amdgpu_irq_src *source,
  1079. struct amdgpu_iv_entry *entry)
  1080. {
  1081. DRM_ERROR("Illegal instruction in SDMA command stream\n");
  1082. schedule_work(&adev->reset_work);
  1083. return 0;
  1084. }
  1085. static int cik_sdma_set_clockgating_state(void *handle,
  1086. enum amd_clockgating_state state)
  1087. {
  1088. bool gate = false;
  1089. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  1090. if (state == AMD_CG_STATE_GATE)
  1091. gate = true;
  1092. cik_enable_sdma_mgcg(adev, gate);
  1093. cik_enable_sdma_mgls(adev, gate);
  1094. return 0;
  1095. }
  1096. static int cik_sdma_set_powergating_state(void *handle,
  1097. enum amd_powergating_state state)
  1098. {
  1099. return 0;
  1100. }
  1101. const struct amd_ip_funcs cik_sdma_ip_funcs = {
  1102. .early_init = cik_sdma_early_init,
  1103. .late_init = NULL,
  1104. .sw_init = cik_sdma_sw_init,
  1105. .sw_fini = cik_sdma_sw_fini,
  1106. .hw_init = cik_sdma_hw_init,
  1107. .hw_fini = cik_sdma_hw_fini,
  1108. .suspend = cik_sdma_suspend,
  1109. .resume = cik_sdma_resume,
  1110. .is_idle = cik_sdma_is_idle,
  1111. .wait_for_idle = cik_sdma_wait_for_idle,
  1112. .soft_reset = cik_sdma_soft_reset,
  1113. .print_status = cik_sdma_print_status,
  1114. .set_clockgating_state = cik_sdma_set_clockgating_state,
  1115. .set_powergating_state = cik_sdma_set_powergating_state,
  1116. };
  1117. /**
  1118. * cik_sdma_ring_is_lockup - Check if the DMA engine is locked up
  1119. *
  1120. * @ring: amdgpu_ring structure holding ring information
  1121. *
  1122. * Check if the async DMA engine is locked up (CIK).
  1123. * Returns true if the engine appears to be locked up, false if not.
  1124. */
  1125. static bool cik_sdma_ring_is_lockup(struct amdgpu_ring *ring)
  1126. {
  1127. if (cik_sdma_is_idle(ring->adev)) {
  1128. amdgpu_ring_lockup_update(ring);
  1129. return false;
  1130. }
  1131. return amdgpu_ring_test_lockup(ring);
  1132. }
  1133. static const struct amdgpu_ring_funcs cik_sdma_ring_funcs = {
  1134. .get_rptr = cik_sdma_ring_get_rptr,
  1135. .get_wptr = cik_sdma_ring_get_wptr,
  1136. .set_wptr = cik_sdma_ring_set_wptr,
  1137. .parse_cs = NULL,
  1138. .emit_ib = cik_sdma_ring_emit_ib,
  1139. .emit_fence = cik_sdma_ring_emit_fence,
  1140. .emit_semaphore = cik_sdma_ring_emit_semaphore,
  1141. .emit_vm_flush = cik_sdma_ring_emit_vm_flush,
  1142. .emit_hdp_flush = cik_sdma_ring_emit_hdp_flush,
  1143. .test_ring = cik_sdma_ring_test_ring,
  1144. .test_ib = cik_sdma_ring_test_ib,
  1145. .is_lockup = cik_sdma_ring_is_lockup,
  1146. };
  1147. static void cik_sdma_set_ring_funcs(struct amdgpu_device *adev)
  1148. {
  1149. adev->sdma[0].ring.funcs = &cik_sdma_ring_funcs;
  1150. adev->sdma[1].ring.funcs = &cik_sdma_ring_funcs;
  1151. }
  1152. static const struct amdgpu_irq_src_funcs cik_sdma_trap_irq_funcs = {
  1153. .set = cik_sdma_set_trap_irq_state,
  1154. .process = cik_sdma_process_trap_irq,
  1155. };
  1156. static const struct amdgpu_irq_src_funcs cik_sdma_illegal_inst_irq_funcs = {
  1157. .process = cik_sdma_process_illegal_inst_irq,
  1158. };
  1159. static void cik_sdma_set_irq_funcs(struct amdgpu_device *adev)
  1160. {
  1161. adev->sdma_trap_irq.num_types = AMDGPU_SDMA_IRQ_LAST;
  1162. adev->sdma_trap_irq.funcs = &cik_sdma_trap_irq_funcs;
  1163. adev->sdma_illegal_inst_irq.funcs = &cik_sdma_illegal_inst_irq_funcs;
  1164. }
  1165. /**
  1166. * cik_sdma_emit_copy_buffer - copy buffer using the sDMA engine
  1167. *
  1168. * @ring: amdgpu_ring structure holding ring information
  1169. * @src_offset: src GPU address
  1170. * @dst_offset: dst GPU address
  1171. * @byte_count: number of bytes to xfer
  1172. *
  1173. * Copy GPU buffers using the DMA engine (CIK).
  1174. * Used by the amdgpu ttm implementation to move pages if
  1175. * registered as the asic copy callback.
  1176. */
  1177. static void cik_sdma_emit_copy_buffer(struct amdgpu_ib *ib,
  1178. uint64_t src_offset,
  1179. uint64_t dst_offset,
  1180. uint32_t byte_count)
  1181. {
  1182. ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_COPY, SDMA_COPY_SUB_OPCODE_LINEAR, 0);
  1183. ib->ptr[ib->length_dw++] = byte_count;
  1184. ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */
  1185. ib->ptr[ib->length_dw++] = lower_32_bits(src_offset);
  1186. ib->ptr[ib->length_dw++] = upper_32_bits(src_offset);
  1187. ib->ptr[ib->length_dw++] = lower_32_bits(dst_offset);
  1188. ib->ptr[ib->length_dw++] = upper_32_bits(dst_offset);
  1189. }
  1190. /**
  1191. * cik_sdma_emit_fill_buffer - fill buffer using the sDMA engine
  1192. *
  1193. * @ring: amdgpu_ring structure holding ring information
  1194. * @src_data: value to write to buffer
  1195. * @dst_offset: dst GPU address
  1196. * @byte_count: number of bytes to xfer
  1197. *
  1198. * Fill GPU buffers using the DMA engine (CIK).
  1199. */
  1200. static void cik_sdma_emit_fill_buffer(struct amdgpu_ib *ib,
  1201. uint32_t src_data,
  1202. uint64_t dst_offset,
  1203. uint32_t byte_count)
  1204. {
  1205. ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_CONSTANT_FILL, 0, 0);
  1206. ib->ptr[ib->length_dw++] = lower_32_bits(dst_offset);
  1207. ib->ptr[ib->length_dw++] = upper_32_bits(dst_offset);
  1208. ib->ptr[ib->length_dw++] = src_data;
  1209. ib->ptr[ib->length_dw++] = byte_count;
  1210. }
  1211. static const struct amdgpu_buffer_funcs cik_sdma_buffer_funcs = {
  1212. .copy_max_bytes = 0x1fffff,
  1213. .copy_num_dw = 7,
  1214. .emit_copy_buffer = cik_sdma_emit_copy_buffer,
  1215. .fill_max_bytes = 0x1fffff,
  1216. .fill_num_dw = 5,
  1217. .emit_fill_buffer = cik_sdma_emit_fill_buffer,
  1218. };
  1219. static void cik_sdma_set_buffer_funcs(struct amdgpu_device *adev)
  1220. {
  1221. if (adev->mman.buffer_funcs == NULL) {
  1222. adev->mman.buffer_funcs = &cik_sdma_buffer_funcs;
  1223. adev->mman.buffer_funcs_ring = &adev->sdma[0].ring;
  1224. }
  1225. }
  1226. static const struct amdgpu_vm_pte_funcs cik_sdma_vm_pte_funcs = {
  1227. .copy_pte = cik_sdma_vm_copy_pte,
  1228. .write_pte = cik_sdma_vm_write_pte,
  1229. .set_pte_pde = cik_sdma_vm_set_pte_pde,
  1230. .pad_ib = cik_sdma_vm_pad_ib,
  1231. };
  1232. static void cik_sdma_set_vm_pte_funcs(struct amdgpu_device *adev)
  1233. {
  1234. if (adev->vm_manager.vm_pte_funcs == NULL) {
  1235. adev->vm_manager.vm_pte_funcs = &cik_sdma_vm_pte_funcs;
  1236. adev->vm_manager.vm_pte_funcs_ring = &adev->sdma[0].ring;
  1237. adev->vm_manager.vm_pte_funcs_ring->is_pte_ring = true;
  1238. }
  1239. }