cik_sdma.c 37 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356
  1. /*
  2. * Copyright 2013 Advanced Micro Devices, Inc.
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice shall be included in
  12. * all copies or substantial portions of the Software.
  13. *
  14. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  17. * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18. * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19. * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20. * OTHER DEALINGS IN THE SOFTWARE.
  21. *
  22. * Authors: Alex Deucher
  23. */
  24. #include <linux/firmware.h>
  25. #include <drm/drmP.h>
  26. #include "amdgpu.h"
  27. #include "amdgpu_ucode.h"
  28. #include "amdgpu_trace.h"
  29. #include "cikd.h"
  30. #include "cik.h"
  31. #include "bif/bif_4_1_d.h"
  32. #include "bif/bif_4_1_sh_mask.h"
  33. #include "gca/gfx_7_2_d.h"
  34. #include "gca/gfx_7_2_enum.h"
  35. #include "gca/gfx_7_2_sh_mask.h"
  36. #include "gmc/gmc_7_1_d.h"
  37. #include "gmc/gmc_7_1_sh_mask.h"
  38. #include "oss/oss_2_0_d.h"
  39. #include "oss/oss_2_0_sh_mask.h"
  40. static const u32 sdma_offsets[SDMA_MAX_INSTANCE] =
  41. {
  42. SDMA0_REGISTER_OFFSET,
  43. SDMA1_REGISTER_OFFSET
  44. };
  45. static void cik_sdma_set_ring_funcs(struct amdgpu_device *adev);
  46. static void cik_sdma_set_irq_funcs(struct amdgpu_device *adev);
  47. static void cik_sdma_set_buffer_funcs(struct amdgpu_device *adev);
  48. static void cik_sdma_set_vm_pte_funcs(struct amdgpu_device *adev);
  49. static int cik_sdma_soft_reset(void *handle);
  50. MODULE_FIRMWARE("radeon/bonaire_sdma.bin");
  51. MODULE_FIRMWARE("radeon/bonaire_sdma1.bin");
  52. MODULE_FIRMWARE("radeon/hawaii_sdma.bin");
  53. MODULE_FIRMWARE("radeon/hawaii_sdma1.bin");
  54. MODULE_FIRMWARE("radeon/kaveri_sdma.bin");
  55. MODULE_FIRMWARE("radeon/kaveri_sdma1.bin");
  56. MODULE_FIRMWARE("radeon/kabini_sdma.bin");
  57. MODULE_FIRMWARE("radeon/kabini_sdma1.bin");
  58. MODULE_FIRMWARE("radeon/mullins_sdma.bin");
  59. MODULE_FIRMWARE("radeon/mullins_sdma1.bin");
  60. u32 amdgpu_cik_gpu_check_soft_reset(struct amdgpu_device *adev);
  61. static void cik_sdma_free_microcode(struct amdgpu_device *adev)
  62. {
  63. int i;
  64. for (i = 0; i < adev->sdma.num_instances; i++) {
  65. release_firmware(adev->sdma.instance[i].fw);
  66. adev->sdma.instance[i].fw = NULL;
  67. }
  68. }
  69. /*
  70. * sDMA - System DMA
  71. * Starting with CIK, the GPU has new asynchronous
  72. * DMA engines. These engines are used for compute
  73. * and gfx. There are two DMA engines (SDMA0, SDMA1)
  74. * and each one supports 1 ring buffer used for gfx
  75. * and 2 queues used for compute.
  76. *
  77. * The programming model is very similar to the CP
  78. * (ring buffer, IBs, etc.), but sDMA has it's own
  79. * packet format that is different from the PM4 format
  80. * used by the CP. sDMA supports copying data, writing
  81. * embedded data, solid fills, and a number of other
  82. * things. It also has support for tiling/detiling of
  83. * buffers.
  84. */
  85. /**
  86. * cik_sdma_init_microcode - load ucode images from disk
  87. *
  88. * @adev: amdgpu_device pointer
  89. *
  90. * Use the firmware interface to load the ucode images into
  91. * the driver (not loaded into hw).
  92. * Returns 0 on success, error on failure.
  93. */
  94. static int cik_sdma_init_microcode(struct amdgpu_device *adev)
  95. {
  96. const char *chip_name;
  97. char fw_name[30];
  98. int err = 0, i;
  99. DRM_DEBUG("\n");
  100. switch (adev->asic_type) {
  101. case CHIP_BONAIRE:
  102. chip_name = "bonaire";
  103. break;
  104. case CHIP_HAWAII:
  105. chip_name = "hawaii";
  106. break;
  107. case CHIP_KAVERI:
  108. chip_name = "kaveri";
  109. break;
  110. case CHIP_KABINI:
  111. chip_name = "kabini";
  112. break;
  113. case CHIP_MULLINS:
  114. chip_name = "mullins";
  115. break;
  116. default: BUG();
  117. }
  118. for (i = 0; i < adev->sdma.num_instances; i++) {
  119. if (i == 0)
  120. snprintf(fw_name, sizeof(fw_name), "radeon/%s_sdma.bin", chip_name);
  121. else
  122. snprintf(fw_name, sizeof(fw_name), "radeon/%s_sdma1.bin", chip_name);
  123. err = request_firmware(&adev->sdma.instance[i].fw, fw_name, adev->dev);
  124. if (err)
  125. goto out;
  126. err = amdgpu_ucode_validate(adev->sdma.instance[i].fw);
  127. }
  128. out:
  129. if (err) {
  130. pr_err("cik_sdma: Failed to load firmware \"%s\"\n", fw_name);
  131. for (i = 0; i < adev->sdma.num_instances; i++) {
  132. release_firmware(adev->sdma.instance[i].fw);
  133. adev->sdma.instance[i].fw = NULL;
  134. }
  135. }
  136. return err;
  137. }
  138. /**
  139. * cik_sdma_ring_get_rptr - get the current read pointer
  140. *
  141. * @ring: amdgpu ring pointer
  142. *
  143. * Get the current rptr from the hardware (CIK+).
  144. */
  145. static uint64_t cik_sdma_ring_get_rptr(struct amdgpu_ring *ring)
  146. {
  147. u32 rptr;
  148. rptr = ring->adev->wb.wb[ring->rptr_offs];
  149. return (rptr & 0x3fffc) >> 2;
  150. }
  151. /**
  152. * cik_sdma_ring_get_wptr - get the current write pointer
  153. *
  154. * @ring: amdgpu ring pointer
  155. *
  156. * Get the current wptr from the hardware (CIK+).
  157. */
  158. static uint64_t cik_sdma_ring_get_wptr(struct amdgpu_ring *ring)
  159. {
  160. struct amdgpu_device *adev = ring->adev;
  161. u32 me = (ring == &adev->sdma.instance[0].ring) ? 0 : 1;
  162. return (RREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[me]) & 0x3fffc) >> 2;
  163. }
  164. /**
  165. * cik_sdma_ring_set_wptr - commit the write pointer
  166. *
  167. * @ring: amdgpu ring pointer
  168. *
  169. * Write the wptr back to the hardware (CIK+).
  170. */
  171. static void cik_sdma_ring_set_wptr(struct amdgpu_ring *ring)
  172. {
  173. struct amdgpu_device *adev = ring->adev;
  174. u32 me = (ring == &adev->sdma.instance[0].ring) ? 0 : 1;
  175. WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[me],
  176. (lower_32_bits(ring->wptr) << 2) & 0x3fffc);
  177. }
  178. static void cik_sdma_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
  179. {
  180. struct amdgpu_sdma_instance *sdma = amdgpu_get_sdma_instance(ring);
  181. int i;
  182. for (i = 0; i < count; i++)
  183. if (sdma && sdma->burst_nop && (i == 0))
  184. amdgpu_ring_write(ring, ring->funcs->nop |
  185. SDMA_NOP_COUNT(count - 1));
  186. else
  187. amdgpu_ring_write(ring, ring->funcs->nop);
  188. }
  189. /**
  190. * cik_sdma_ring_emit_ib - Schedule an IB on the DMA engine
  191. *
  192. * @ring: amdgpu ring pointer
  193. * @ib: IB object to schedule
  194. *
  195. * Schedule an IB in the DMA ring (CIK).
  196. */
  197. static void cik_sdma_ring_emit_ib(struct amdgpu_ring *ring,
  198. struct amdgpu_ib *ib,
  199. unsigned vm_id, bool ctx_switch)
  200. {
  201. u32 extra_bits = vm_id & 0xf;
  202. /* IB packet must end on a 8 DW boundary */
  203. cik_sdma_ring_insert_nop(ring, (12 - (lower_32_bits(ring->wptr) & 7)) % 8);
  204. amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_INDIRECT_BUFFER, 0, extra_bits));
  205. amdgpu_ring_write(ring, ib->gpu_addr & 0xffffffe0); /* base must be 32 byte aligned */
  206. amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xffffffff);
  207. amdgpu_ring_write(ring, ib->length_dw);
  208. }
  209. /**
  210. * cik_sdma_ring_emit_hdp_flush - emit an hdp flush on the DMA ring
  211. *
  212. * @ring: amdgpu ring pointer
  213. *
  214. * Emit an hdp flush packet on the requested DMA ring.
  215. */
  216. static void cik_sdma_ring_emit_hdp_flush(struct amdgpu_ring *ring)
  217. {
  218. u32 extra_bits = (SDMA_POLL_REG_MEM_EXTRA_OP(1) |
  219. SDMA_POLL_REG_MEM_EXTRA_FUNC(3)); /* == */
  220. u32 ref_and_mask;
  221. if (ring == &ring->adev->sdma.instance[0].ring)
  222. ref_and_mask = GPU_HDP_FLUSH_DONE__SDMA0_MASK;
  223. else
  224. ref_and_mask = GPU_HDP_FLUSH_DONE__SDMA1_MASK;
  225. amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_POLL_REG_MEM, 0, extra_bits));
  226. amdgpu_ring_write(ring, mmGPU_HDP_FLUSH_DONE << 2);
  227. amdgpu_ring_write(ring, mmGPU_HDP_FLUSH_REQ << 2);
  228. amdgpu_ring_write(ring, ref_and_mask); /* reference */
  229. amdgpu_ring_write(ring, ref_and_mask); /* mask */
  230. amdgpu_ring_write(ring, (0xfff << 16) | 10); /* retry count, poll interval */
  231. }
  232. static void cik_sdma_ring_emit_hdp_invalidate(struct amdgpu_ring *ring)
  233. {
  234. amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
  235. amdgpu_ring_write(ring, mmHDP_DEBUG0);
  236. amdgpu_ring_write(ring, 1);
  237. }
  238. /**
  239. * cik_sdma_ring_emit_fence - emit a fence on the DMA ring
  240. *
  241. * @ring: amdgpu ring pointer
  242. * @fence: amdgpu fence object
  243. *
  244. * Add a DMA fence packet to the ring to write
  245. * the fence seq number and DMA trap packet to generate
  246. * an interrupt if needed (CIK).
  247. */
  248. static void cik_sdma_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
  249. unsigned flags)
  250. {
  251. bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT;
  252. /* write the fence */
  253. amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_FENCE, 0, 0));
  254. amdgpu_ring_write(ring, lower_32_bits(addr));
  255. amdgpu_ring_write(ring, upper_32_bits(addr));
  256. amdgpu_ring_write(ring, lower_32_bits(seq));
  257. /* optionally write high bits as well */
  258. if (write64bit) {
  259. addr += 4;
  260. amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_FENCE, 0, 0));
  261. amdgpu_ring_write(ring, lower_32_bits(addr));
  262. amdgpu_ring_write(ring, upper_32_bits(addr));
  263. amdgpu_ring_write(ring, upper_32_bits(seq));
  264. }
  265. /* generate an interrupt */
  266. amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_TRAP, 0, 0));
  267. }
  268. /**
  269. * cik_sdma_gfx_stop - stop the gfx async dma engines
  270. *
  271. * @adev: amdgpu_device pointer
  272. *
  273. * Stop the gfx async dma ring buffers (CIK).
  274. */
  275. static void cik_sdma_gfx_stop(struct amdgpu_device *adev)
  276. {
  277. struct amdgpu_ring *sdma0 = &adev->sdma.instance[0].ring;
  278. struct amdgpu_ring *sdma1 = &adev->sdma.instance[1].ring;
  279. u32 rb_cntl;
  280. int i;
  281. if ((adev->mman.buffer_funcs_ring == sdma0) ||
  282. (adev->mman.buffer_funcs_ring == sdma1))
  283. amdgpu_ttm_set_active_vram_size(adev, adev->mc.visible_vram_size);
  284. for (i = 0; i < adev->sdma.num_instances; i++) {
  285. rb_cntl = RREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i]);
  286. rb_cntl &= ~SDMA0_GFX_RB_CNTL__RB_ENABLE_MASK;
  287. WREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i], rb_cntl);
  288. WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], 0);
  289. }
  290. sdma0->ready = false;
  291. sdma1->ready = false;
  292. }
  293. /**
  294. * cik_sdma_rlc_stop - stop the compute async dma engines
  295. *
  296. * @adev: amdgpu_device pointer
  297. *
  298. * Stop the compute async dma queues (CIK).
  299. */
  300. static void cik_sdma_rlc_stop(struct amdgpu_device *adev)
  301. {
  302. /* XXX todo */
  303. }
  304. /**
  305. * cik_sdma_enable - stop the async dma engines
  306. *
  307. * @adev: amdgpu_device pointer
  308. * @enable: enable/disable the DMA MEs.
  309. *
  310. * Halt or unhalt the async dma engines (CIK).
  311. */
  312. static void cik_sdma_enable(struct amdgpu_device *adev, bool enable)
  313. {
  314. u32 me_cntl;
  315. int i;
  316. if (!enable) {
  317. cik_sdma_gfx_stop(adev);
  318. cik_sdma_rlc_stop(adev);
  319. }
  320. for (i = 0; i < adev->sdma.num_instances; i++) {
  321. me_cntl = RREG32(mmSDMA0_F32_CNTL + sdma_offsets[i]);
  322. if (enable)
  323. me_cntl &= ~SDMA0_F32_CNTL__HALT_MASK;
  324. else
  325. me_cntl |= SDMA0_F32_CNTL__HALT_MASK;
  326. WREG32(mmSDMA0_F32_CNTL + sdma_offsets[i], me_cntl);
  327. }
  328. }
  329. /**
  330. * cik_sdma_gfx_resume - setup and start the async dma engines
  331. *
  332. * @adev: amdgpu_device pointer
  333. *
  334. * Set up the gfx DMA ring buffers and enable them (CIK).
  335. * Returns 0 for success, error for failure.
  336. */
  337. static int cik_sdma_gfx_resume(struct amdgpu_device *adev)
  338. {
  339. struct amdgpu_ring *ring;
  340. u32 rb_cntl, ib_cntl;
  341. u32 rb_bufsz;
  342. u32 wb_offset;
  343. int i, j, r;
  344. for (i = 0; i < adev->sdma.num_instances; i++) {
  345. ring = &adev->sdma.instance[i].ring;
  346. wb_offset = (ring->rptr_offs * 4);
  347. mutex_lock(&adev->srbm_mutex);
  348. for (j = 0; j < 16; j++) {
  349. cik_srbm_select(adev, 0, 0, 0, j);
  350. /* SDMA GFX */
  351. WREG32(mmSDMA0_GFX_VIRTUAL_ADDR + sdma_offsets[i], 0);
  352. WREG32(mmSDMA0_GFX_APE1_CNTL + sdma_offsets[i], 0);
  353. /* XXX SDMA RLC - todo */
  354. }
  355. cik_srbm_select(adev, 0, 0, 0, 0);
  356. mutex_unlock(&adev->srbm_mutex);
  357. WREG32(mmSDMA0_TILING_CONFIG + sdma_offsets[i],
  358. adev->gfx.config.gb_addr_config & 0x70);
  359. WREG32(mmSDMA0_SEM_INCOMPLETE_TIMER_CNTL + sdma_offsets[i], 0);
  360. WREG32(mmSDMA0_SEM_WAIT_FAIL_TIMER_CNTL + sdma_offsets[i], 0);
  361. /* Set ring buffer size in dwords */
  362. rb_bufsz = order_base_2(ring->ring_size / 4);
  363. rb_cntl = rb_bufsz << 1;
  364. #ifdef __BIG_ENDIAN
  365. rb_cntl |= SDMA0_GFX_RB_CNTL__RB_SWAP_ENABLE_MASK |
  366. SDMA0_GFX_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK;
  367. #endif
  368. WREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i], rb_cntl);
  369. /* Initialize the ring buffer's read and write pointers */
  370. WREG32(mmSDMA0_GFX_RB_RPTR + sdma_offsets[i], 0);
  371. WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[i], 0);
  372. WREG32(mmSDMA0_GFX_IB_RPTR + sdma_offsets[i], 0);
  373. WREG32(mmSDMA0_GFX_IB_OFFSET + sdma_offsets[i], 0);
  374. /* set the wb address whether it's enabled or not */
  375. WREG32(mmSDMA0_GFX_RB_RPTR_ADDR_HI + sdma_offsets[i],
  376. upper_32_bits(adev->wb.gpu_addr + wb_offset) & 0xFFFFFFFF);
  377. WREG32(mmSDMA0_GFX_RB_RPTR_ADDR_LO + sdma_offsets[i],
  378. ((adev->wb.gpu_addr + wb_offset) & 0xFFFFFFFC));
  379. rb_cntl |= SDMA0_GFX_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK;
  380. WREG32(mmSDMA0_GFX_RB_BASE + sdma_offsets[i], ring->gpu_addr >> 8);
  381. WREG32(mmSDMA0_GFX_RB_BASE_HI + sdma_offsets[i], ring->gpu_addr >> 40);
  382. ring->wptr = 0;
  383. WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[i], lower_32_bits(ring->wptr) << 2);
  384. /* enable DMA RB */
  385. WREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i],
  386. rb_cntl | SDMA0_GFX_RB_CNTL__RB_ENABLE_MASK);
  387. ib_cntl = SDMA0_GFX_IB_CNTL__IB_ENABLE_MASK;
  388. #ifdef __BIG_ENDIAN
  389. ib_cntl |= SDMA0_GFX_IB_CNTL__IB_SWAP_ENABLE_MASK;
  390. #endif
  391. /* enable DMA IBs */
  392. WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], ib_cntl);
  393. ring->ready = true;
  394. }
  395. cik_sdma_enable(adev, true);
  396. for (i = 0; i < adev->sdma.num_instances; i++) {
  397. ring = &adev->sdma.instance[i].ring;
  398. r = amdgpu_ring_test_ring(ring);
  399. if (r) {
  400. ring->ready = false;
  401. return r;
  402. }
  403. if (adev->mman.buffer_funcs_ring == ring)
  404. amdgpu_ttm_set_active_vram_size(adev, adev->mc.real_vram_size);
  405. }
  406. return 0;
  407. }
  408. /**
  409. * cik_sdma_rlc_resume - setup and start the async dma engines
  410. *
  411. * @adev: amdgpu_device pointer
  412. *
  413. * Set up the compute DMA queues and enable them (CIK).
  414. * Returns 0 for success, error for failure.
  415. */
  416. static int cik_sdma_rlc_resume(struct amdgpu_device *adev)
  417. {
  418. /* XXX todo */
  419. return 0;
  420. }
  421. /**
  422. * cik_sdma_load_microcode - load the sDMA ME ucode
  423. *
  424. * @adev: amdgpu_device pointer
  425. *
  426. * Loads the sDMA0/1 ucode.
  427. * Returns 0 for success, -EINVAL if the ucode is not available.
  428. */
  429. static int cik_sdma_load_microcode(struct amdgpu_device *adev)
  430. {
  431. const struct sdma_firmware_header_v1_0 *hdr;
  432. const __le32 *fw_data;
  433. u32 fw_size;
  434. int i, j;
  435. /* halt the MEs */
  436. cik_sdma_enable(adev, false);
  437. for (i = 0; i < adev->sdma.num_instances; i++) {
  438. if (!adev->sdma.instance[i].fw)
  439. return -EINVAL;
  440. hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma.instance[i].fw->data;
  441. amdgpu_ucode_print_sdma_hdr(&hdr->header);
  442. fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
  443. adev->sdma.instance[i].fw_version = le32_to_cpu(hdr->header.ucode_version);
  444. adev->sdma.instance[i].feature_version = le32_to_cpu(hdr->ucode_feature_version);
  445. if (adev->sdma.instance[i].feature_version >= 20)
  446. adev->sdma.instance[i].burst_nop = true;
  447. fw_data = (const __le32 *)
  448. (adev->sdma.instance[i].fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
  449. WREG32(mmSDMA0_UCODE_ADDR + sdma_offsets[i], 0);
  450. for (j = 0; j < fw_size; j++)
  451. WREG32(mmSDMA0_UCODE_DATA + sdma_offsets[i], le32_to_cpup(fw_data++));
  452. WREG32(mmSDMA0_UCODE_ADDR + sdma_offsets[i], adev->sdma.instance[i].fw_version);
  453. }
  454. return 0;
  455. }
  456. /**
  457. * cik_sdma_start - setup and start the async dma engines
  458. *
  459. * @adev: amdgpu_device pointer
  460. *
  461. * Set up the DMA engines and enable them (CIK).
  462. * Returns 0 for success, error for failure.
  463. */
  464. static int cik_sdma_start(struct amdgpu_device *adev)
  465. {
  466. int r;
  467. r = cik_sdma_load_microcode(adev);
  468. if (r)
  469. return r;
  470. /* halt the engine before programing */
  471. cik_sdma_enable(adev, false);
  472. /* start the gfx rings and rlc compute queues */
  473. r = cik_sdma_gfx_resume(adev);
  474. if (r)
  475. return r;
  476. r = cik_sdma_rlc_resume(adev);
  477. if (r)
  478. return r;
  479. return 0;
  480. }
  481. /**
  482. * cik_sdma_ring_test_ring - simple async dma engine test
  483. *
  484. * @ring: amdgpu_ring structure holding ring information
  485. *
  486. * Test the DMA engine by writing using it to write an
  487. * value to memory. (CIK).
  488. * Returns 0 for success, error for failure.
  489. */
  490. static int cik_sdma_ring_test_ring(struct amdgpu_ring *ring)
  491. {
  492. struct amdgpu_device *adev = ring->adev;
  493. unsigned i;
  494. unsigned index;
  495. int r;
  496. u32 tmp;
  497. u64 gpu_addr;
  498. r = amdgpu_wb_get(adev, &index);
  499. if (r) {
  500. dev_err(adev->dev, "(%d) failed to allocate wb slot\n", r);
  501. return r;
  502. }
  503. gpu_addr = adev->wb.gpu_addr + (index * 4);
  504. tmp = 0xCAFEDEAD;
  505. adev->wb.wb[index] = cpu_to_le32(tmp);
  506. r = amdgpu_ring_alloc(ring, 5);
  507. if (r) {
  508. DRM_ERROR("amdgpu: dma failed to lock ring %d (%d).\n", ring->idx, r);
  509. amdgpu_wb_free(adev, index);
  510. return r;
  511. }
  512. amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_WRITE, SDMA_WRITE_SUB_OPCODE_LINEAR, 0));
  513. amdgpu_ring_write(ring, lower_32_bits(gpu_addr));
  514. amdgpu_ring_write(ring, upper_32_bits(gpu_addr));
  515. amdgpu_ring_write(ring, 1); /* number of DWs to follow */
  516. amdgpu_ring_write(ring, 0xDEADBEEF);
  517. amdgpu_ring_commit(ring);
  518. for (i = 0; i < adev->usec_timeout; i++) {
  519. tmp = le32_to_cpu(adev->wb.wb[index]);
  520. if (tmp == 0xDEADBEEF)
  521. break;
  522. DRM_UDELAY(1);
  523. }
  524. if (i < adev->usec_timeout) {
  525. DRM_INFO("ring test on %d succeeded in %d usecs\n", ring->idx, i);
  526. } else {
  527. DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n",
  528. ring->idx, tmp);
  529. r = -EINVAL;
  530. }
  531. amdgpu_wb_free(adev, index);
  532. return r;
  533. }
  534. /**
  535. * cik_sdma_ring_test_ib - test an IB on the DMA engine
  536. *
  537. * @ring: amdgpu_ring structure holding ring information
  538. *
  539. * Test a simple IB in the DMA ring (CIK).
  540. * Returns 0 on success, error on failure.
  541. */
  542. static int cik_sdma_ring_test_ib(struct amdgpu_ring *ring, long timeout)
  543. {
  544. struct amdgpu_device *adev = ring->adev;
  545. struct amdgpu_ib ib;
  546. struct dma_fence *f = NULL;
  547. unsigned index;
  548. u32 tmp = 0;
  549. u64 gpu_addr;
  550. long r;
  551. r = amdgpu_wb_get(adev, &index);
  552. if (r) {
  553. dev_err(adev->dev, "(%ld) failed to allocate wb slot\n", r);
  554. return r;
  555. }
  556. gpu_addr = adev->wb.gpu_addr + (index * 4);
  557. tmp = 0xCAFEDEAD;
  558. adev->wb.wb[index] = cpu_to_le32(tmp);
  559. memset(&ib, 0, sizeof(ib));
  560. r = amdgpu_ib_get(adev, NULL, 256, &ib);
  561. if (r) {
  562. DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r);
  563. goto err0;
  564. }
  565. ib.ptr[0] = SDMA_PACKET(SDMA_OPCODE_WRITE,
  566. SDMA_WRITE_SUB_OPCODE_LINEAR, 0);
  567. ib.ptr[1] = lower_32_bits(gpu_addr);
  568. ib.ptr[2] = upper_32_bits(gpu_addr);
  569. ib.ptr[3] = 1;
  570. ib.ptr[4] = 0xDEADBEEF;
  571. ib.length_dw = 5;
  572. r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
  573. if (r)
  574. goto err1;
  575. r = dma_fence_wait_timeout(f, false, timeout);
  576. if (r == 0) {
  577. DRM_ERROR("amdgpu: IB test timed out\n");
  578. r = -ETIMEDOUT;
  579. goto err1;
  580. } else if (r < 0) {
  581. DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
  582. goto err1;
  583. }
  584. tmp = le32_to_cpu(adev->wb.wb[index]);
  585. if (tmp == 0xDEADBEEF) {
  586. DRM_INFO("ib test on ring %d succeeded\n", ring->idx);
  587. r = 0;
  588. } else {
  589. DRM_ERROR("amdgpu: ib test failed (0x%08X)\n", tmp);
  590. r = -EINVAL;
  591. }
  592. err1:
  593. amdgpu_ib_free(adev, &ib, NULL);
  594. dma_fence_put(f);
  595. err0:
  596. amdgpu_wb_free(adev, index);
  597. return r;
  598. }
  599. /**
  600. * cik_sdma_vm_copy_pages - update PTEs by copying them from the GART
  601. *
  602. * @ib: indirect buffer to fill with commands
  603. * @pe: addr of the page entry
  604. * @src: src addr to copy from
  605. * @count: number of page entries to update
  606. *
  607. * Update PTEs by copying them from the GART using sDMA (CIK).
  608. */
  609. static void cik_sdma_vm_copy_pte(struct amdgpu_ib *ib,
  610. uint64_t pe, uint64_t src,
  611. unsigned count)
  612. {
  613. unsigned bytes = count * 8;
  614. ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_COPY,
  615. SDMA_WRITE_SUB_OPCODE_LINEAR, 0);
  616. ib->ptr[ib->length_dw++] = bytes;
  617. ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */
  618. ib->ptr[ib->length_dw++] = lower_32_bits(src);
  619. ib->ptr[ib->length_dw++] = upper_32_bits(src);
  620. ib->ptr[ib->length_dw++] = lower_32_bits(pe);
  621. ib->ptr[ib->length_dw++] = upper_32_bits(pe);
  622. }
  623. /**
  624. * cik_sdma_vm_write_pages - update PTEs by writing them manually
  625. *
  626. * @ib: indirect buffer to fill with commands
  627. * @pe: addr of the page entry
  628. * @value: dst addr to write into pe
  629. * @count: number of page entries to update
  630. * @incr: increase next addr by incr bytes
  631. *
  632. * Update PTEs by writing them manually using sDMA (CIK).
  633. */
  634. static void cik_sdma_vm_write_pte(struct amdgpu_ib *ib, uint64_t pe,
  635. uint64_t value, unsigned count,
  636. uint32_t incr)
  637. {
  638. unsigned ndw = count * 2;
  639. ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_WRITE,
  640. SDMA_WRITE_SUB_OPCODE_LINEAR, 0);
  641. ib->ptr[ib->length_dw++] = lower_32_bits(pe);
  642. ib->ptr[ib->length_dw++] = upper_32_bits(pe);
  643. ib->ptr[ib->length_dw++] = ndw;
  644. for (; ndw > 0; ndw -= 2) {
  645. ib->ptr[ib->length_dw++] = lower_32_bits(value);
  646. ib->ptr[ib->length_dw++] = upper_32_bits(value);
  647. value += incr;
  648. }
  649. }
  650. /**
  651. * cik_sdma_vm_set_pages - update the page tables using sDMA
  652. *
  653. * @ib: indirect buffer to fill with commands
  654. * @pe: addr of the page entry
  655. * @addr: dst addr to write into pe
  656. * @count: number of page entries to update
  657. * @incr: increase next addr by incr bytes
  658. * @flags: access flags
  659. *
  660. * Update the page tables using sDMA (CIK).
  661. */
  662. static void cik_sdma_vm_set_pte_pde(struct amdgpu_ib *ib, uint64_t pe,
  663. uint64_t addr, unsigned count,
  664. uint32_t incr, uint32_t flags)
  665. {
  666. /* for physically contiguous pages (vram) */
  667. ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_GENERATE_PTE_PDE, 0, 0);
  668. ib->ptr[ib->length_dw++] = lower_32_bits(pe); /* dst addr */
  669. ib->ptr[ib->length_dw++] = upper_32_bits(pe);
  670. ib->ptr[ib->length_dw++] = flags; /* mask */
  671. ib->ptr[ib->length_dw++] = 0;
  672. ib->ptr[ib->length_dw++] = lower_32_bits(addr); /* value */
  673. ib->ptr[ib->length_dw++] = upper_32_bits(addr);
  674. ib->ptr[ib->length_dw++] = incr; /* increment size */
  675. ib->ptr[ib->length_dw++] = 0;
  676. ib->ptr[ib->length_dw++] = count; /* number of entries */
  677. }
  678. /**
  679. * cik_sdma_vm_pad_ib - pad the IB to the required number of dw
  680. *
  681. * @ib: indirect buffer to fill with padding
  682. *
  683. */
  684. static void cik_sdma_ring_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib)
  685. {
  686. struct amdgpu_sdma_instance *sdma = amdgpu_get_sdma_instance(ring);
  687. u32 pad_count;
  688. int i;
  689. pad_count = (8 - (ib->length_dw & 0x7)) % 8;
  690. for (i = 0; i < pad_count; i++)
  691. if (sdma && sdma->burst_nop && (i == 0))
  692. ib->ptr[ib->length_dw++] =
  693. SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0) |
  694. SDMA_NOP_COUNT(pad_count - 1);
  695. else
  696. ib->ptr[ib->length_dw++] =
  697. SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0);
  698. }
  699. /**
  700. * cik_sdma_ring_emit_pipeline_sync - sync the pipeline
  701. *
  702. * @ring: amdgpu_ring pointer
  703. *
  704. * Make sure all previous operations are completed (CIK).
  705. */
  706. static void cik_sdma_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
  707. {
  708. uint32_t seq = ring->fence_drv.sync_seq;
  709. uint64_t addr = ring->fence_drv.gpu_addr;
  710. /* wait for idle */
  711. amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_POLL_REG_MEM, 0,
  712. SDMA_POLL_REG_MEM_EXTRA_OP(0) |
  713. SDMA_POLL_REG_MEM_EXTRA_FUNC(3) | /* equal */
  714. SDMA_POLL_REG_MEM_EXTRA_M));
  715. amdgpu_ring_write(ring, addr & 0xfffffffc);
  716. amdgpu_ring_write(ring, upper_32_bits(addr) & 0xffffffff);
  717. amdgpu_ring_write(ring, seq); /* reference */
  718. amdgpu_ring_write(ring, 0xfffffff); /* mask */
  719. amdgpu_ring_write(ring, (0xfff << 16) | 4); /* retry count, poll interval */
  720. }
  721. /**
  722. * cik_sdma_ring_emit_vm_flush - cik vm flush using sDMA
  723. *
  724. * @ring: amdgpu_ring pointer
  725. * @vm: amdgpu_vm pointer
  726. *
  727. * Update the page table base and flush the VM TLB
  728. * using sDMA (CIK).
  729. */
  730. static void cik_sdma_ring_emit_vm_flush(struct amdgpu_ring *ring,
  731. unsigned vm_id, uint64_t pd_addr)
  732. {
  733. u32 extra_bits = (SDMA_POLL_REG_MEM_EXTRA_OP(0) |
  734. SDMA_POLL_REG_MEM_EXTRA_FUNC(0)); /* always */
  735. amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
  736. if (vm_id < 8) {
  737. amdgpu_ring_write(ring, (mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vm_id));
  738. } else {
  739. amdgpu_ring_write(ring, (mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + vm_id - 8));
  740. }
  741. amdgpu_ring_write(ring, pd_addr >> 12);
  742. /* flush TLB */
  743. amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
  744. amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST);
  745. amdgpu_ring_write(ring, 1 << vm_id);
  746. amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_POLL_REG_MEM, 0, extra_bits));
  747. amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST << 2);
  748. amdgpu_ring_write(ring, 0);
  749. amdgpu_ring_write(ring, 0); /* reference */
  750. amdgpu_ring_write(ring, 0); /* mask */
  751. amdgpu_ring_write(ring, (0xfff << 16) | 10); /* retry count, poll interval */
  752. }
  753. static void cik_enable_sdma_mgcg(struct amdgpu_device *adev,
  754. bool enable)
  755. {
  756. u32 orig, data;
  757. if (enable && (adev->cg_flags & AMD_CG_SUPPORT_SDMA_MGCG)) {
  758. WREG32(mmSDMA0_CLK_CTRL + SDMA0_REGISTER_OFFSET, 0x00000100);
  759. WREG32(mmSDMA0_CLK_CTRL + SDMA1_REGISTER_OFFSET, 0x00000100);
  760. } else {
  761. orig = data = RREG32(mmSDMA0_CLK_CTRL + SDMA0_REGISTER_OFFSET);
  762. data |= 0xff000000;
  763. if (data != orig)
  764. WREG32(mmSDMA0_CLK_CTRL + SDMA0_REGISTER_OFFSET, data);
  765. orig = data = RREG32(mmSDMA0_CLK_CTRL + SDMA1_REGISTER_OFFSET);
  766. data |= 0xff000000;
  767. if (data != orig)
  768. WREG32(mmSDMA0_CLK_CTRL + SDMA1_REGISTER_OFFSET, data);
  769. }
  770. }
  771. static void cik_enable_sdma_mgls(struct amdgpu_device *adev,
  772. bool enable)
  773. {
  774. u32 orig, data;
  775. if (enable && (adev->cg_flags & AMD_CG_SUPPORT_SDMA_LS)) {
  776. orig = data = RREG32(mmSDMA0_POWER_CNTL + SDMA0_REGISTER_OFFSET);
  777. data |= 0x100;
  778. if (orig != data)
  779. WREG32(mmSDMA0_POWER_CNTL + SDMA0_REGISTER_OFFSET, data);
  780. orig = data = RREG32(mmSDMA0_POWER_CNTL + SDMA1_REGISTER_OFFSET);
  781. data |= 0x100;
  782. if (orig != data)
  783. WREG32(mmSDMA0_POWER_CNTL + SDMA1_REGISTER_OFFSET, data);
  784. } else {
  785. orig = data = RREG32(mmSDMA0_POWER_CNTL + SDMA0_REGISTER_OFFSET);
  786. data &= ~0x100;
  787. if (orig != data)
  788. WREG32(mmSDMA0_POWER_CNTL + SDMA0_REGISTER_OFFSET, data);
  789. orig = data = RREG32(mmSDMA0_POWER_CNTL + SDMA1_REGISTER_OFFSET);
  790. data &= ~0x100;
  791. if (orig != data)
  792. WREG32(mmSDMA0_POWER_CNTL + SDMA1_REGISTER_OFFSET, data);
  793. }
  794. }
  795. static int cik_sdma_early_init(void *handle)
  796. {
  797. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  798. adev->sdma.num_instances = SDMA_MAX_INSTANCE;
  799. cik_sdma_set_ring_funcs(adev);
  800. cik_sdma_set_irq_funcs(adev);
  801. cik_sdma_set_buffer_funcs(adev);
  802. cik_sdma_set_vm_pte_funcs(adev);
  803. return 0;
  804. }
  805. static int cik_sdma_sw_init(void *handle)
  806. {
  807. struct amdgpu_ring *ring;
  808. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  809. int r, i;
  810. r = cik_sdma_init_microcode(adev);
  811. if (r) {
  812. DRM_ERROR("Failed to load sdma firmware!\n");
  813. return r;
  814. }
  815. /* SDMA trap event */
  816. r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 224,
  817. &adev->sdma.trap_irq);
  818. if (r)
  819. return r;
  820. /* SDMA Privileged inst */
  821. r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 241,
  822. &adev->sdma.illegal_inst_irq);
  823. if (r)
  824. return r;
  825. /* SDMA Privileged inst */
  826. r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 247,
  827. &adev->sdma.illegal_inst_irq);
  828. if (r)
  829. return r;
  830. for (i = 0; i < adev->sdma.num_instances; i++) {
  831. ring = &adev->sdma.instance[i].ring;
  832. ring->ring_obj = NULL;
  833. sprintf(ring->name, "sdma%d", i);
  834. r = amdgpu_ring_init(adev, ring, 1024,
  835. &adev->sdma.trap_irq,
  836. (i == 0) ?
  837. AMDGPU_SDMA_IRQ_TRAP0 :
  838. AMDGPU_SDMA_IRQ_TRAP1);
  839. if (r)
  840. return r;
  841. }
  842. return r;
  843. }
  844. static int cik_sdma_sw_fini(void *handle)
  845. {
  846. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  847. int i;
  848. for (i = 0; i < adev->sdma.num_instances; i++)
  849. amdgpu_ring_fini(&adev->sdma.instance[i].ring);
  850. cik_sdma_free_microcode(adev);
  851. return 0;
  852. }
  853. static int cik_sdma_hw_init(void *handle)
  854. {
  855. int r;
  856. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  857. r = cik_sdma_start(adev);
  858. if (r)
  859. return r;
  860. return r;
  861. }
  862. static int cik_sdma_hw_fini(void *handle)
  863. {
  864. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  865. cik_sdma_enable(adev, false);
  866. return 0;
  867. }
  868. static int cik_sdma_suspend(void *handle)
  869. {
  870. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  871. return cik_sdma_hw_fini(adev);
  872. }
  873. static int cik_sdma_resume(void *handle)
  874. {
  875. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  876. cik_sdma_soft_reset(handle);
  877. return cik_sdma_hw_init(adev);
  878. }
  879. static bool cik_sdma_is_idle(void *handle)
  880. {
  881. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  882. u32 tmp = RREG32(mmSRBM_STATUS2);
  883. if (tmp & (SRBM_STATUS2__SDMA_BUSY_MASK |
  884. SRBM_STATUS2__SDMA1_BUSY_MASK))
  885. return false;
  886. return true;
  887. }
  888. static int cik_sdma_wait_for_idle(void *handle)
  889. {
  890. unsigned i;
  891. u32 tmp;
  892. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  893. for (i = 0; i < adev->usec_timeout; i++) {
  894. tmp = RREG32(mmSRBM_STATUS2) & (SRBM_STATUS2__SDMA_BUSY_MASK |
  895. SRBM_STATUS2__SDMA1_BUSY_MASK);
  896. if (!tmp)
  897. return 0;
  898. udelay(1);
  899. }
  900. return -ETIMEDOUT;
  901. }
  902. static int cik_sdma_soft_reset(void *handle)
  903. {
  904. u32 srbm_soft_reset = 0;
  905. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  906. u32 tmp = RREG32(mmSRBM_STATUS2);
  907. if (tmp & SRBM_STATUS2__SDMA_BUSY_MASK) {
  908. /* sdma0 */
  909. tmp = RREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET);
  910. tmp |= SDMA0_F32_CNTL__HALT_MASK;
  911. WREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET, tmp);
  912. srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_SDMA_MASK;
  913. }
  914. if (tmp & SRBM_STATUS2__SDMA1_BUSY_MASK) {
  915. /* sdma1 */
  916. tmp = RREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET);
  917. tmp |= SDMA0_F32_CNTL__HALT_MASK;
  918. WREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET, tmp);
  919. srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_SDMA1_MASK;
  920. }
  921. if (srbm_soft_reset) {
  922. tmp = RREG32(mmSRBM_SOFT_RESET);
  923. tmp |= srbm_soft_reset;
  924. dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
  925. WREG32(mmSRBM_SOFT_RESET, tmp);
  926. tmp = RREG32(mmSRBM_SOFT_RESET);
  927. udelay(50);
  928. tmp &= ~srbm_soft_reset;
  929. WREG32(mmSRBM_SOFT_RESET, tmp);
  930. tmp = RREG32(mmSRBM_SOFT_RESET);
  931. /* Wait a little for things to settle down */
  932. udelay(50);
  933. }
  934. return 0;
  935. }
  936. static int cik_sdma_set_trap_irq_state(struct amdgpu_device *adev,
  937. struct amdgpu_irq_src *src,
  938. unsigned type,
  939. enum amdgpu_interrupt_state state)
  940. {
  941. u32 sdma_cntl;
  942. switch (type) {
  943. case AMDGPU_SDMA_IRQ_TRAP0:
  944. switch (state) {
  945. case AMDGPU_IRQ_STATE_DISABLE:
  946. sdma_cntl = RREG32(mmSDMA0_CNTL + SDMA0_REGISTER_OFFSET);
  947. sdma_cntl &= ~SDMA0_CNTL__TRAP_ENABLE_MASK;
  948. WREG32(mmSDMA0_CNTL + SDMA0_REGISTER_OFFSET, sdma_cntl);
  949. break;
  950. case AMDGPU_IRQ_STATE_ENABLE:
  951. sdma_cntl = RREG32(mmSDMA0_CNTL + SDMA0_REGISTER_OFFSET);
  952. sdma_cntl |= SDMA0_CNTL__TRAP_ENABLE_MASK;
  953. WREG32(mmSDMA0_CNTL + SDMA0_REGISTER_OFFSET, sdma_cntl);
  954. break;
  955. default:
  956. break;
  957. }
  958. break;
  959. case AMDGPU_SDMA_IRQ_TRAP1:
  960. switch (state) {
  961. case AMDGPU_IRQ_STATE_DISABLE:
  962. sdma_cntl = RREG32(mmSDMA0_CNTL + SDMA1_REGISTER_OFFSET);
  963. sdma_cntl &= ~SDMA0_CNTL__TRAP_ENABLE_MASK;
  964. WREG32(mmSDMA0_CNTL + SDMA1_REGISTER_OFFSET, sdma_cntl);
  965. break;
  966. case AMDGPU_IRQ_STATE_ENABLE:
  967. sdma_cntl = RREG32(mmSDMA0_CNTL + SDMA1_REGISTER_OFFSET);
  968. sdma_cntl |= SDMA0_CNTL__TRAP_ENABLE_MASK;
  969. WREG32(mmSDMA0_CNTL + SDMA1_REGISTER_OFFSET, sdma_cntl);
  970. break;
  971. default:
  972. break;
  973. }
  974. break;
  975. default:
  976. break;
  977. }
  978. return 0;
  979. }
  980. static int cik_sdma_process_trap_irq(struct amdgpu_device *adev,
  981. struct amdgpu_irq_src *source,
  982. struct amdgpu_iv_entry *entry)
  983. {
  984. u8 instance_id, queue_id;
  985. instance_id = (entry->ring_id & 0x3) >> 0;
  986. queue_id = (entry->ring_id & 0xc) >> 2;
  987. DRM_DEBUG("IH: SDMA trap\n");
  988. switch (instance_id) {
  989. case 0:
  990. switch (queue_id) {
  991. case 0:
  992. amdgpu_fence_process(&adev->sdma.instance[0].ring);
  993. break;
  994. case 1:
  995. /* XXX compute */
  996. break;
  997. case 2:
  998. /* XXX compute */
  999. break;
  1000. }
  1001. break;
  1002. case 1:
  1003. switch (queue_id) {
  1004. case 0:
  1005. amdgpu_fence_process(&adev->sdma.instance[1].ring);
  1006. break;
  1007. case 1:
  1008. /* XXX compute */
  1009. break;
  1010. case 2:
  1011. /* XXX compute */
  1012. break;
  1013. }
  1014. break;
  1015. }
  1016. return 0;
  1017. }
  1018. static int cik_sdma_process_illegal_inst_irq(struct amdgpu_device *adev,
  1019. struct amdgpu_irq_src *source,
  1020. struct amdgpu_iv_entry *entry)
  1021. {
  1022. DRM_ERROR("Illegal instruction in SDMA command stream\n");
  1023. schedule_work(&adev->reset_work);
  1024. return 0;
  1025. }
  1026. static int cik_sdma_set_clockgating_state(void *handle,
  1027. enum amd_clockgating_state state)
  1028. {
  1029. bool gate = false;
  1030. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  1031. if (state == AMD_CG_STATE_GATE)
  1032. gate = true;
  1033. cik_enable_sdma_mgcg(adev, gate);
  1034. cik_enable_sdma_mgls(adev, gate);
  1035. return 0;
  1036. }
  1037. static int cik_sdma_set_powergating_state(void *handle,
  1038. enum amd_powergating_state state)
  1039. {
  1040. return 0;
  1041. }
  1042. static const struct amd_ip_funcs cik_sdma_ip_funcs = {
  1043. .name = "cik_sdma",
  1044. .early_init = cik_sdma_early_init,
  1045. .late_init = NULL,
  1046. .sw_init = cik_sdma_sw_init,
  1047. .sw_fini = cik_sdma_sw_fini,
  1048. .hw_init = cik_sdma_hw_init,
  1049. .hw_fini = cik_sdma_hw_fini,
  1050. .suspend = cik_sdma_suspend,
  1051. .resume = cik_sdma_resume,
  1052. .is_idle = cik_sdma_is_idle,
  1053. .wait_for_idle = cik_sdma_wait_for_idle,
  1054. .soft_reset = cik_sdma_soft_reset,
  1055. .set_clockgating_state = cik_sdma_set_clockgating_state,
  1056. .set_powergating_state = cik_sdma_set_powergating_state,
  1057. };
  1058. static const struct amdgpu_ring_funcs cik_sdma_ring_funcs = {
  1059. .type = AMDGPU_RING_TYPE_SDMA,
  1060. .align_mask = 0xf,
  1061. .nop = SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0),
  1062. .support_64bit_ptrs = false,
  1063. .get_rptr = cik_sdma_ring_get_rptr,
  1064. .get_wptr = cik_sdma_ring_get_wptr,
  1065. .set_wptr = cik_sdma_ring_set_wptr,
  1066. .emit_frame_size =
  1067. 6 + /* cik_sdma_ring_emit_hdp_flush */
  1068. 3 + /* cik_sdma_ring_emit_hdp_invalidate */
  1069. 6 + /* cik_sdma_ring_emit_pipeline_sync */
  1070. 12 + /* cik_sdma_ring_emit_vm_flush */
  1071. 9 + 9 + 9, /* cik_sdma_ring_emit_fence x3 for user fence, vm fence */
  1072. .emit_ib_size = 7 + 4, /* cik_sdma_ring_emit_ib */
  1073. .emit_ib = cik_sdma_ring_emit_ib,
  1074. .emit_fence = cik_sdma_ring_emit_fence,
  1075. .emit_pipeline_sync = cik_sdma_ring_emit_pipeline_sync,
  1076. .emit_vm_flush = cik_sdma_ring_emit_vm_flush,
  1077. .emit_hdp_flush = cik_sdma_ring_emit_hdp_flush,
  1078. .emit_hdp_invalidate = cik_sdma_ring_emit_hdp_invalidate,
  1079. .test_ring = cik_sdma_ring_test_ring,
  1080. .test_ib = cik_sdma_ring_test_ib,
  1081. .insert_nop = cik_sdma_ring_insert_nop,
  1082. .pad_ib = cik_sdma_ring_pad_ib,
  1083. };
  1084. static void cik_sdma_set_ring_funcs(struct amdgpu_device *adev)
  1085. {
  1086. int i;
  1087. for (i = 0; i < adev->sdma.num_instances; i++)
  1088. adev->sdma.instance[i].ring.funcs = &cik_sdma_ring_funcs;
  1089. }
  1090. static const struct amdgpu_irq_src_funcs cik_sdma_trap_irq_funcs = {
  1091. .set = cik_sdma_set_trap_irq_state,
  1092. .process = cik_sdma_process_trap_irq,
  1093. };
  1094. static const struct amdgpu_irq_src_funcs cik_sdma_illegal_inst_irq_funcs = {
  1095. .process = cik_sdma_process_illegal_inst_irq,
  1096. };
  1097. static void cik_sdma_set_irq_funcs(struct amdgpu_device *adev)
  1098. {
  1099. adev->sdma.trap_irq.num_types = AMDGPU_SDMA_IRQ_LAST;
  1100. adev->sdma.trap_irq.funcs = &cik_sdma_trap_irq_funcs;
  1101. adev->sdma.illegal_inst_irq.funcs = &cik_sdma_illegal_inst_irq_funcs;
  1102. }
  1103. /**
  1104. * cik_sdma_emit_copy_buffer - copy buffer using the sDMA engine
  1105. *
  1106. * @ring: amdgpu_ring structure holding ring information
  1107. * @src_offset: src GPU address
  1108. * @dst_offset: dst GPU address
  1109. * @byte_count: number of bytes to xfer
  1110. *
  1111. * Copy GPU buffers using the DMA engine (CIK).
  1112. * Used by the amdgpu ttm implementation to move pages if
  1113. * registered as the asic copy callback.
  1114. */
  1115. static void cik_sdma_emit_copy_buffer(struct amdgpu_ib *ib,
  1116. uint64_t src_offset,
  1117. uint64_t dst_offset,
  1118. uint32_t byte_count)
  1119. {
  1120. ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_COPY, SDMA_COPY_SUB_OPCODE_LINEAR, 0);
  1121. ib->ptr[ib->length_dw++] = byte_count;
  1122. ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */
  1123. ib->ptr[ib->length_dw++] = lower_32_bits(src_offset);
  1124. ib->ptr[ib->length_dw++] = upper_32_bits(src_offset);
  1125. ib->ptr[ib->length_dw++] = lower_32_bits(dst_offset);
  1126. ib->ptr[ib->length_dw++] = upper_32_bits(dst_offset);
  1127. }
  1128. /**
  1129. * cik_sdma_emit_fill_buffer - fill buffer using the sDMA engine
  1130. *
  1131. * @ring: amdgpu_ring structure holding ring information
  1132. * @src_data: value to write to buffer
  1133. * @dst_offset: dst GPU address
  1134. * @byte_count: number of bytes to xfer
  1135. *
  1136. * Fill GPU buffers using the DMA engine (CIK).
  1137. */
  1138. static void cik_sdma_emit_fill_buffer(struct amdgpu_ib *ib,
  1139. uint32_t src_data,
  1140. uint64_t dst_offset,
  1141. uint32_t byte_count)
  1142. {
  1143. ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_CONSTANT_FILL, 0, 0);
  1144. ib->ptr[ib->length_dw++] = lower_32_bits(dst_offset);
  1145. ib->ptr[ib->length_dw++] = upper_32_bits(dst_offset);
  1146. ib->ptr[ib->length_dw++] = src_data;
  1147. ib->ptr[ib->length_dw++] = byte_count;
  1148. }
  1149. static const struct amdgpu_buffer_funcs cik_sdma_buffer_funcs = {
  1150. .copy_max_bytes = 0x1fffff,
  1151. .copy_num_dw = 7,
  1152. .emit_copy_buffer = cik_sdma_emit_copy_buffer,
  1153. .fill_max_bytes = 0x1fffff,
  1154. .fill_num_dw = 5,
  1155. .emit_fill_buffer = cik_sdma_emit_fill_buffer,
  1156. };
  1157. static void cik_sdma_set_buffer_funcs(struct amdgpu_device *adev)
  1158. {
  1159. if (adev->mman.buffer_funcs == NULL) {
  1160. adev->mman.buffer_funcs = &cik_sdma_buffer_funcs;
  1161. adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].ring;
  1162. }
  1163. }
  1164. static const struct amdgpu_vm_pte_funcs cik_sdma_vm_pte_funcs = {
  1165. .copy_pte = cik_sdma_vm_copy_pte,
  1166. .write_pte = cik_sdma_vm_write_pte,
  1167. .set_pte_pde = cik_sdma_vm_set_pte_pde,
  1168. };
  1169. static void cik_sdma_set_vm_pte_funcs(struct amdgpu_device *adev)
  1170. {
  1171. unsigned i;
  1172. if (adev->vm_manager.vm_pte_funcs == NULL) {
  1173. adev->vm_manager.vm_pte_funcs = &cik_sdma_vm_pte_funcs;
  1174. for (i = 0; i < adev->sdma.num_instances; i++)
  1175. adev->vm_manager.vm_pte_rings[i] =
  1176. &adev->sdma.instance[i].ring;
  1177. adev->vm_manager.vm_pte_num_rings = adev->sdma.num_instances;
  1178. }
  1179. }
  1180. const struct amdgpu_ip_block_version cik_sdma_ip_block =
  1181. {
  1182. .type = AMD_IP_BLOCK_TYPE_SDMA,
  1183. .major = 2,
  1184. .minor = 0,
  1185. .rev = 0,
  1186. .funcs = &cik_sdma_ip_funcs,
  1187. };