rs400.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582
  1. /*
  2. * Copyright 2008 Advanced Micro Devices, Inc.
  3. * Copyright 2008 Red Hat Inc.
  4. * Copyright 2009 Jerome Glisse.
  5. *
  6. * Permission is hereby granted, free of charge, to any person obtaining a
  7. * copy of this software and associated documentation files (the "Software"),
  8. * to deal in the Software without restriction, including without limitation
  9. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  10. * and/or sell copies of the Software, and to permit persons to whom the
  11. * Software is furnished to do so, subject to the following conditions:
  12. *
  13. * The above copyright notice and this permission notice shall be included in
  14. * all copies or substantial portions of the Software.
  15. *
  16. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  17. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  18. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  19. * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  20. * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  21. * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  22. * OTHER DEALINGS IN THE SOFTWARE.
  23. *
  24. * Authors: Dave Airlie
  25. * Alex Deucher
  26. * Jerome Glisse
  27. */
  28. #include <linux/seq_file.h>
  29. #include <linux/slab.h>
  30. #include <drm/drmP.h>
  31. #include "radeon.h"
  32. #include "radeon_asic.h"
  33. #include "rs400d.h"
  34. /* This files gather functions specifics to : rs400,rs480 */
  35. static int rs400_debugfs_pcie_gart_info_init(struct radeon_device *rdev);
  36. void rs400_gart_adjust_size(struct radeon_device *rdev)
  37. {
  38. /* Check gart size */
  39. switch (rdev->mc.gtt_size/(1024*1024)) {
  40. case 32:
  41. case 64:
  42. case 128:
  43. case 256:
  44. case 512:
  45. case 1024:
  46. case 2048:
  47. break;
  48. default:
  49. DRM_ERROR("Unable to use IGP GART size %uM\n",
  50. (unsigned)(rdev->mc.gtt_size >> 20));
  51. DRM_ERROR("Valid GART size for IGP are 32M,64M,128M,256M,512M,1G,2G\n");
  52. DRM_ERROR("Forcing to 32M GART size\n");
  53. rdev->mc.gtt_size = 32 * 1024 * 1024;
  54. return;
  55. }
  56. }
  57. void rs400_gart_tlb_flush(struct radeon_device *rdev)
  58. {
  59. uint32_t tmp;
  60. unsigned int timeout = rdev->usec_timeout;
  61. WREG32_MC(RS480_GART_CACHE_CNTRL, RS480_GART_CACHE_INVALIDATE);
  62. do {
  63. tmp = RREG32_MC(RS480_GART_CACHE_CNTRL);
  64. if ((tmp & RS480_GART_CACHE_INVALIDATE) == 0)
  65. break;
  66. DRM_UDELAY(1);
  67. timeout--;
  68. } while (timeout > 0);
  69. WREG32_MC(RS480_GART_CACHE_CNTRL, 0);
  70. }
  71. int rs400_gart_init(struct radeon_device *rdev)
  72. {
  73. int r;
  74. if (rdev->gart.ptr) {
  75. WARN(1, "RS400 GART already initialized\n");
  76. return 0;
  77. }
  78. /* Check gart size */
  79. switch(rdev->mc.gtt_size / (1024 * 1024)) {
  80. case 32:
  81. case 64:
  82. case 128:
  83. case 256:
  84. case 512:
  85. case 1024:
  86. case 2048:
  87. break;
  88. default:
  89. return -EINVAL;
  90. }
  91. /* Initialize common gart structure */
  92. r = radeon_gart_init(rdev);
  93. if (r)
  94. return r;
  95. if (rs400_debugfs_pcie_gart_info_init(rdev))
  96. DRM_ERROR("Failed to register debugfs file for RS400 GART !\n");
  97. rdev->gart.table_size = rdev->gart.num_gpu_pages * 4;
  98. return radeon_gart_table_ram_alloc(rdev);
  99. }
  100. int rs400_gart_enable(struct radeon_device *rdev)
  101. {
  102. uint32_t size_reg;
  103. uint32_t tmp;
  104. tmp = RREG32_MC(RS690_AIC_CTRL_SCRATCH);
  105. tmp |= RS690_DIS_OUT_OF_PCI_GART_ACCESS;
  106. WREG32_MC(RS690_AIC_CTRL_SCRATCH, tmp);
  107. /* Check gart size */
  108. switch(rdev->mc.gtt_size / (1024 * 1024)) {
  109. case 32:
  110. size_reg = RS480_VA_SIZE_32MB;
  111. break;
  112. case 64:
  113. size_reg = RS480_VA_SIZE_64MB;
  114. break;
  115. case 128:
  116. size_reg = RS480_VA_SIZE_128MB;
  117. break;
  118. case 256:
  119. size_reg = RS480_VA_SIZE_256MB;
  120. break;
  121. case 512:
  122. size_reg = RS480_VA_SIZE_512MB;
  123. break;
  124. case 1024:
  125. size_reg = RS480_VA_SIZE_1GB;
  126. break;
  127. case 2048:
  128. size_reg = RS480_VA_SIZE_2GB;
  129. break;
  130. default:
  131. return -EINVAL;
  132. }
  133. /* It should be fine to program it to max value */
  134. if (rdev->family == CHIP_RS690 || (rdev->family == CHIP_RS740)) {
  135. WREG32_MC(RS690_MCCFG_AGP_BASE, 0xFFFFFFFF);
  136. WREG32_MC(RS690_MCCFG_AGP_BASE_2, 0);
  137. } else {
  138. WREG32(RADEON_AGP_BASE, 0xFFFFFFFF);
  139. WREG32(RS480_AGP_BASE_2, 0);
  140. }
  141. tmp = REG_SET(RS690_MC_AGP_TOP, rdev->mc.gtt_end >> 16);
  142. tmp |= REG_SET(RS690_MC_AGP_START, rdev->mc.gtt_start >> 16);
  143. if ((rdev->family == CHIP_RS690) || (rdev->family == CHIP_RS740)) {
  144. WREG32_MC(RS690_MCCFG_AGP_LOCATION, tmp);
  145. tmp = RREG32(RADEON_BUS_CNTL) & ~RS600_BUS_MASTER_DIS;
  146. WREG32(RADEON_BUS_CNTL, tmp);
  147. } else {
  148. WREG32(RADEON_MC_AGP_LOCATION, tmp);
  149. tmp = RREG32(RADEON_BUS_CNTL) & ~RADEON_BUS_MASTER_DIS;
  150. WREG32(RADEON_BUS_CNTL, tmp);
  151. }
  152. /* Table should be in 32bits address space so ignore bits above. */
  153. tmp = (u32)rdev->gart.table_addr & 0xfffff000;
  154. tmp |= (upper_32_bits(rdev->gart.table_addr) & 0xff) << 4;
  155. WREG32_MC(RS480_GART_BASE, tmp);
  156. /* TODO: more tweaking here */
  157. WREG32_MC(RS480_GART_FEATURE_ID,
  158. (RS480_TLB_ENABLE |
  159. RS480_GTW_LAC_EN | RS480_1LEVEL_GART));
  160. /* Disable snooping */
  161. WREG32_MC(RS480_AGP_MODE_CNTL,
  162. (1 << RS480_REQ_TYPE_SNOOP_SHIFT) | RS480_REQ_TYPE_SNOOP_DIS);
  163. /* Disable AGP mode */
  164. /* FIXME: according to doc we should set HIDE_MMCFG_BAR=0,
  165. * AGPMODE30=0 & AGP30ENHANCED=0 in NB_CNTL */
  166. if ((rdev->family == CHIP_RS690) || (rdev->family == CHIP_RS740)) {
  167. tmp = RREG32_MC(RS480_MC_MISC_CNTL);
  168. tmp |= RS480_GART_INDEX_REG_EN | RS690_BLOCK_GFX_D3_EN;
  169. WREG32_MC(RS480_MC_MISC_CNTL, tmp);
  170. } else {
  171. tmp = RREG32_MC(RS480_MC_MISC_CNTL);
  172. tmp |= RS480_GART_INDEX_REG_EN;
  173. WREG32_MC(RS480_MC_MISC_CNTL, tmp);
  174. }
  175. /* Enable gart */
  176. WREG32_MC(RS480_AGP_ADDRESS_SPACE_SIZE, (RS480_GART_EN | size_reg));
  177. rs400_gart_tlb_flush(rdev);
  178. DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
  179. (unsigned)(rdev->mc.gtt_size >> 20),
  180. (unsigned long long)rdev->gart.table_addr);
  181. rdev->gart.ready = true;
  182. return 0;
  183. }
  184. void rs400_gart_disable(struct radeon_device *rdev)
  185. {
  186. uint32_t tmp;
  187. tmp = RREG32_MC(RS690_AIC_CTRL_SCRATCH);
  188. tmp |= RS690_DIS_OUT_OF_PCI_GART_ACCESS;
  189. WREG32_MC(RS690_AIC_CTRL_SCRATCH, tmp);
  190. WREG32_MC(RS480_AGP_ADDRESS_SPACE_SIZE, 0);
  191. }
  192. void rs400_gart_fini(struct radeon_device *rdev)
  193. {
  194. radeon_gart_fini(rdev);
  195. rs400_gart_disable(rdev);
  196. radeon_gart_table_ram_free(rdev);
  197. }
  198. #define RS400_PTE_UNSNOOPED (1 << 0)
  199. #define RS400_PTE_WRITEABLE (1 << 2)
  200. #define RS400_PTE_READABLE (1 << 3)
  201. void rs400_gart_set_page(struct radeon_device *rdev, unsigned i,
  202. uint64_t addr, uint32_t flags)
  203. {
  204. uint32_t entry;
  205. u32 *gtt = rdev->gart.ptr;
  206. entry = (lower_32_bits(addr) & PAGE_MASK) |
  207. ((upper_32_bits(addr) & 0xff) << 4);
  208. if (flags & RADEON_GART_PAGE_READ)
  209. entry |= RS400_PTE_READABLE;
  210. if (flags & RADEON_GART_PAGE_WRITE)
  211. entry |= RS400_PTE_WRITEABLE;
  212. if (!(flags & RADEON_GART_PAGE_SNOOP))
  213. entry |= RS400_PTE_UNSNOOPED;
  214. entry = cpu_to_le32(entry);
  215. gtt[i] = entry;
  216. }
  217. int rs400_mc_wait_for_idle(struct radeon_device *rdev)
  218. {
  219. unsigned i;
  220. uint32_t tmp;
  221. for (i = 0; i < rdev->usec_timeout; i++) {
  222. /* read MC_STATUS */
  223. tmp = RREG32(RADEON_MC_STATUS);
  224. if (tmp & RADEON_MC_IDLE) {
  225. return 0;
  226. }
  227. DRM_UDELAY(1);
  228. }
  229. return -1;
  230. }
  231. static void rs400_gpu_init(struct radeon_device *rdev)
  232. {
  233. /* FIXME: is this correct ? */
  234. r420_pipes_init(rdev);
  235. if (rs400_mc_wait_for_idle(rdev)) {
  236. printk(KERN_WARNING "rs400: Failed to wait MC idle while "
  237. "programming pipes. Bad things might happen. %08x\n", RREG32(RADEON_MC_STATUS));
  238. }
  239. }
  240. static void rs400_mc_init(struct radeon_device *rdev)
  241. {
  242. u64 base;
  243. rs400_gart_adjust_size(rdev);
  244. rdev->mc.igp_sideport_enabled = radeon_combios_sideport_present(rdev);
  245. /* DDR for all card after R300 & IGP */
  246. rdev->mc.vram_is_ddr = true;
  247. rdev->mc.vram_width = 128;
  248. r100_vram_init_sizes(rdev);
  249. base = (RREG32(RADEON_NB_TOM) & 0xffff) << 16;
  250. radeon_vram_location(rdev, &rdev->mc, base);
  251. rdev->mc.gtt_base_align = rdev->mc.gtt_size - 1;
  252. radeon_gtt_location(rdev, &rdev->mc);
  253. radeon_update_bandwidth_info(rdev);
  254. }
  255. uint32_t rs400_mc_rreg(struct radeon_device *rdev, uint32_t reg)
  256. {
  257. unsigned long flags;
  258. uint32_t r;
  259. spin_lock_irqsave(&rdev->mc_idx_lock, flags);
  260. WREG32(RS480_NB_MC_INDEX, reg & 0xff);
  261. r = RREG32(RS480_NB_MC_DATA);
  262. WREG32(RS480_NB_MC_INDEX, 0xff);
  263. spin_unlock_irqrestore(&rdev->mc_idx_lock, flags);
  264. return r;
  265. }
  266. void rs400_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
  267. {
  268. unsigned long flags;
  269. spin_lock_irqsave(&rdev->mc_idx_lock, flags);
  270. WREG32(RS480_NB_MC_INDEX, ((reg) & 0xff) | RS480_NB_MC_IND_WR_EN);
  271. WREG32(RS480_NB_MC_DATA, (v));
  272. WREG32(RS480_NB_MC_INDEX, 0xff);
  273. spin_unlock_irqrestore(&rdev->mc_idx_lock, flags);
  274. }
  275. #if defined(CONFIG_DEBUG_FS)
  276. static int rs400_debugfs_gart_info(struct seq_file *m, void *data)
  277. {
  278. struct drm_info_node *node = (struct drm_info_node *) m->private;
  279. struct drm_device *dev = node->minor->dev;
  280. struct radeon_device *rdev = dev->dev_private;
  281. uint32_t tmp;
  282. tmp = RREG32(RADEON_HOST_PATH_CNTL);
  283. seq_printf(m, "HOST_PATH_CNTL 0x%08x\n", tmp);
  284. tmp = RREG32(RADEON_BUS_CNTL);
  285. seq_printf(m, "BUS_CNTL 0x%08x\n", tmp);
  286. tmp = RREG32_MC(RS690_AIC_CTRL_SCRATCH);
  287. seq_printf(m, "AIC_CTRL_SCRATCH 0x%08x\n", tmp);
  288. if (rdev->family == CHIP_RS690 || (rdev->family == CHIP_RS740)) {
  289. tmp = RREG32_MC(RS690_MCCFG_AGP_BASE);
  290. seq_printf(m, "MCCFG_AGP_BASE 0x%08x\n", tmp);
  291. tmp = RREG32_MC(RS690_MCCFG_AGP_BASE_2);
  292. seq_printf(m, "MCCFG_AGP_BASE_2 0x%08x\n", tmp);
  293. tmp = RREG32_MC(RS690_MCCFG_AGP_LOCATION);
  294. seq_printf(m, "MCCFG_AGP_LOCATION 0x%08x\n", tmp);
  295. tmp = RREG32_MC(RS690_MCCFG_FB_LOCATION);
  296. seq_printf(m, "MCCFG_FB_LOCATION 0x%08x\n", tmp);
  297. tmp = RREG32(RS690_HDP_FB_LOCATION);
  298. seq_printf(m, "HDP_FB_LOCATION 0x%08x\n", tmp);
  299. } else {
  300. tmp = RREG32(RADEON_AGP_BASE);
  301. seq_printf(m, "AGP_BASE 0x%08x\n", tmp);
  302. tmp = RREG32(RS480_AGP_BASE_2);
  303. seq_printf(m, "AGP_BASE_2 0x%08x\n", tmp);
  304. tmp = RREG32(RADEON_MC_AGP_LOCATION);
  305. seq_printf(m, "MC_AGP_LOCATION 0x%08x\n", tmp);
  306. }
  307. tmp = RREG32_MC(RS480_GART_BASE);
  308. seq_printf(m, "GART_BASE 0x%08x\n", tmp);
  309. tmp = RREG32_MC(RS480_GART_FEATURE_ID);
  310. seq_printf(m, "GART_FEATURE_ID 0x%08x\n", tmp);
  311. tmp = RREG32_MC(RS480_AGP_MODE_CNTL);
  312. seq_printf(m, "AGP_MODE_CONTROL 0x%08x\n", tmp);
  313. tmp = RREG32_MC(RS480_MC_MISC_CNTL);
  314. seq_printf(m, "MC_MISC_CNTL 0x%08x\n", tmp);
  315. tmp = RREG32_MC(0x5F);
  316. seq_printf(m, "MC_MISC_UMA_CNTL 0x%08x\n", tmp);
  317. tmp = RREG32_MC(RS480_AGP_ADDRESS_SPACE_SIZE);
  318. seq_printf(m, "AGP_ADDRESS_SPACE_SIZE 0x%08x\n", tmp);
  319. tmp = RREG32_MC(RS480_GART_CACHE_CNTRL);
  320. seq_printf(m, "GART_CACHE_CNTRL 0x%08x\n", tmp);
  321. tmp = RREG32_MC(0x3B);
  322. seq_printf(m, "MC_GART_ERROR_ADDRESS 0x%08x\n", tmp);
  323. tmp = RREG32_MC(0x3C);
  324. seq_printf(m, "MC_GART_ERROR_ADDRESS_HI 0x%08x\n", tmp);
  325. tmp = RREG32_MC(0x30);
  326. seq_printf(m, "GART_ERROR_0 0x%08x\n", tmp);
  327. tmp = RREG32_MC(0x31);
  328. seq_printf(m, "GART_ERROR_1 0x%08x\n", tmp);
  329. tmp = RREG32_MC(0x32);
  330. seq_printf(m, "GART_ERROR_2 0x%08x\n", tmp);
  331. tmp = RREG32_MC(0x33);
  332. seq_printf(m, "GART_ERROR_3 0x%08x\n", tmp);
  333. tmp = RREG32_MC(0x34);
  334. seq_printf(m, "GART_ERROR_4 0x%08x\n", tmp);
  335. tmp = RREG32_MC(0x35);
  336. seq_printf(m, "GART_ERROR_5 0x%08x\n", tmp);
  337. tmp = RREG32_MC(0x36);
  338. seq_printf(m, "GART_ERROR_6 0x%08x\n", tmp);
  339. tmp = RREG32_MC(0x37);
  340. seq_printf(m, "GART_ERROR_7 0x%08x\n", tmp);
  341. return 0;
  342. }
  343. static struct drm_info_list rs400_gart_info_list[] = {
  344. {"rs400_gart_info", rs400_debugfs_gart_info, 0, NULL},
  345. };
  346. #endif
  347. static int rs400_debugfs_pcie_gart_info_init(struct radeon_device *rdev)
  348. {
  349. #if defined(CONFIG_DEBUG_FS)
  350. return radeon_debugfs_add_files(rdev, rs400_gart_info_list, 1);
  351. #else
  352. return 0;
  353. #endif
  354. }
  355. static void rs400_mc_program(struct radeon_device *rdev)
  356. {
  357. struct r100_mc_save save;
  358. /* Stops all mc clients */
  359. r100_mc_stop(rdev, &save);
  360. /* Wait for mc idle */
  361. if (rs400_mc_wait_for_idle(rdev))
  362. dev_warn(rdev->dev, "rs400: Wait MC idle timeout before updating MC.\n");
  363. WREG32(R_000148_MC_FB_LOCATION,
  364. S_000148_MC_FB_START(rdev->mc.vram_start >> 16) |
  365. S_000148_MC_FB_TOP(rdev->mc.vram_end >> 16));
  366. r100_mc_resume(rdev, &save);
  367. }
  368. static int rs400_startup(struct radeon_device *rdev)
  369. {
  370. int r;
  371. r100_set_common_regs(rdev);
  372. rs400_mc_program(rdev);
  373. /* Resume clock */
  374. r300_clock_startup(rdev);
  375. /* Initialize GPU configuration (# pipes, ...) */
  376. rs400_gpu_init(rdev);
  377. r100_enable_bm(rdev);
  378. /* Initialize GART (initialize after TTM so we can allocate
  379. * memory through TTM but finalize after TTM) */
  380. r = rs400_gart_enable(rdev);
  381. if (r)
  382. return r;
  383. /* allocate wb buffer */
  384. r = radeon_wb_init(rdev);
  385. if (r)
  386. return r;
  387. r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX);
  388. if (r) {
  389. dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
  390. return r;
  391. }
  392. /* Enable IRQ */
  393. if (!rdev->irq.installed) {
  394. r = radeon_irq_kms_init(rdev);
  395. if (r)
  396. return r;
  397. }
  398. r100_irq_set(rdev);
  399. rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
  400. /* 1M ring buffer */
  401. r = r100_cp_init(rdev, 1024 * 1024);
  402. if (r) {
  403. dev_err(rdev->dev, "failed initializing CP (%d).\n", r);
  404. return r;
  405. }
  406. r = radeon_ib_pool_init(rdev);
  407. if (r) {
  408. dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
  409. return r;
  410. }
  411. return 0;
  412. }
  413. int rs400_resume(struct radeon_device *rdev)
  414. {
  415. int r;
  416. /* Make sur GART are not working */
  417. rs400_gart_disable(rdev);
  418. /* Resume clock before doing reset */
  419. r300_clock_startup(rdev);
  420. /* setup MC before calling post tables */
  421. rs400_mc_program(rdev);
  422. /* Reset gpu before posting otherwise ATOM will enter infinite loop */
  423. if (radeon_asic_reset(rdev)) {
  424. dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
  425. RREG32(R_000E40_RBBM_STATUS),
  426. RREG32(R_0007C0_CP_STAT));
  427. }
  428. /* post */
  429. radeon_combios_asic_init(rdev->ddev);
  430. /* Resume clock after posting */
  431. r300_clock_startup(rdev);
  432. /* Initialize surface registers */
  433. radeon_surface_init(rdev);
  434. rdev->accel_working = true;
  435. r = rs400_startup(rdev);
  436. if (r) {
  437. rdev->accel_working = false;
  438. }
  439. return r;
  440. }
  441. int rs400_suspend(struct radeon_device *rdev)
  442. {
  443. radeon_pm_suspend(rdev);
  444. r100_cp_disable(rdev);
  445. radeon_wb_disable(rdev);
  446. r100_irq_disable(rdev);
  447. rs400_gart_disable(rdev);
  448. return 0;
  449. }
  450. void rs400_fini(struct radeon_device *rdev)
  451. {
  452. radeon_pm_fini(rdev);
  453. r100_cp_fini(rdev);
  454. radeon_wb_fini(rdev);
  455. radeon_ib_pool_fini(rdev);
  456. radeon_gem_fini(rdev);
  457. rs400_gart_fini(rdev);
  458. radeon_irq_kms_fini(rdev);
  459. radeon_fence_driver_fini(rdev);
  460. radeon_bo_fini(rdev);
  461. radeon_atombios_fini(rdev);
  462. kfree(rdev->bios);
  463. rdev->bios = NULL;
  464. }
  465. int rs400_init(struct radeon_device *rdev)
  466. {
  467. int r;
  468. /* Disable VGA */
  469. r100_vga_render_disable(rdev);
  470. /* Initialize scratch registers */
  471. radeon_scratch_init(rdev);
  472. /* Initialize surface registers */
  473. radeon_surface_init(rdev);
  474. /* TODO: disable VGA need to use VGA request */
  475. /* restore some register to sane defaults */
  476. r100_restore_sanity(rdev);
  477. /* BIOS*/
  478. if (!radeon_get_bios(rdev)) {
  479. if (ASIC_IS_AVIVO(rdev))
  480. return -EINVAL;
  481. }
  482. if (rdev->is_atom_bios) {
  483. dev_err(rdev->dev, "Expecting combios for RS400/RS480 GPU\n");
  484. return -EINVAL;
  485. } else {
  486. r = radeon_combios_init(rdev);
  487. if (r)
  488. return r;
  489. }
  490. /* Reset gpu before posting otherwise ATOM will enter infinite loop */
  491. if (radeon_asic_reset(rdev)) {
  492. dev_warn(rdev->dev,
  493. "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
  494. RREG32(R_000E40_RBBM_STATUS),
  495. RREG32(R_0007C0_CP_STAT));
  496. }
  497. /* check if cards are posted or not */
  498. if (radeon_boot_test_post_card(rdev) == false)
  499. return -EINVAL;
  500. /* Initialize clocks */
  501. radeon_get_clock_info(rdev->ddev);
  502. /* initialize memory controller */
  503. rs400_mc_init(rdev);
  504. /* Fence driver */
  505. r = radeon_fence_driver_init(rdev);
  506. if (r)
  507. return r;
  508. /* Memory manager */
  509. r = radeon_bo_init(rdev);
  510. if (r)
  511. return r;
  512. r = rs400_gart_init(rdev);
  513. if (r)
  514. return r;
  515. r300_set_reg_safe(rdev);
  516. /* Initialize power management */
  517. radeon_pm_init(rdev);
  518. rdev->accel_working = true;
  519. r = rs400_startup(rdev);
  520. if (r) {
  521. /* Somethings want wront with the accel init stop accel */
  522. dev_err(rdev->dev, "Disabling GPU acceleration\n");
  523. r100_cp_fini(rdev);
  524. radeon_wb_fini(rdev);
  525. radeon_ib_pool_fini(rdev);
  526. rs400_gart_fini(rdev);
  527. radeon_irq_kms_fini(rdev);
  528. rdev->accel_working = false;
  529. }
  530. return 0;
  531. }