amdgpu_cgs.c 30 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174
  1. /*
  2. * Copyright 2015 Advanced Micro Devices, Inc.
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice shall be included in
  12. * all copies or substantial portions of the Software.
  13. *
  14. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  17. * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18. * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19. * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20. * OTHER DEALINGS IN THE SOFTWARE.
  21. *
  22. *
  23. */
  24. #include <linux/list.h>
  25. #include <linux/slab.h>
  26. #include <linux/pci.h>
  27. #include <linux/acpi.h>
  28. #include <drm/drmP.h>
  29. #include <linux/firmware.h>
  30. #include <drm/amdgpu_drm.h>
  31. #include "amdgpu.h"
  32. #include "cgs_linux.h"
  33. #include "atom.h"
  34. #include "amdgpu_ucode.h"
  35. struct amdgpu_cgs_device {
  36. struct cgs_device base;
  37. struct amdgpu_device *adev;
  38. };
  39. #define CGS_FUNC_ADEV \
  40. struct amdgpu_device *adev = \
  41. ((struct amdgpu_cgs_device *)cgs_device)->adev
  42. static int amdgpu_cgs_gpu_mem_info(struct cgs_device *cgs_device, enum cgs_gpu_mem_type type,
  43. uint64_t *mc_start, uint64_t *mc_size,
  44. uint64_t *mem_size)
  45. {
  46. CGS_FUNC_ADEV;
  47. switch(type) {
  48. case CGS_GPU_MEM_TYPE__VISIBLE_CONTIG_FB:
  49. case CGS_GPU_MEM_TYPE__VISIBLE_FB:
  50. *mc_start = 0;
  51. *mc_size = adev->mc.visible_vram_size;
  52. *mem_size = adev->mc.visible_vram_size - adev->vram_pin_size;
  53. break;
  54. case CGS_GPU_MEM_TYPE__INVISIBLE_CONTIG_FB:
  55. case CGS_GPU_MEM_TYPE__INVISIBLE_FB:
  56. *mc_start = adev->mc.visible_vram_size;
  57. *mc_size = adev->mc.real_vram_size - adev->mc.visible_vram_size;
  58. *mem_size = *mc_size;
  59. break;
  60. case CGS_GPU_MEM_TYPE__GART_CACHEABLE:
  61. case CGS_GPU_MEM_TYPE__GART_WRITECOMBINE:
  62. *mc_start = adev->mc.gtt_start;
  63. *mc_size = adev->mc.gtt_size;
  64. *mem_size = adev->mc.gtt_size - adev->gart_pin_size;
  65. break;
  66. default:
  67. return -EINVAL;
  68. }
  69. return 0;
  70. }
  71. static int amdgpu_cgs_gmap_kmem(struct cgs_device *cgs_device, void *kmem,
  72. uint64_t size,
  73. uint64_t min_offset, uint64_t max_offset,
  74. cgs_handle_t *kmem_handle, uint64_t *mcaddr)
  75. {
  76. CGS_FUNC_ADEV;
  77. int ret;
  78. struct amdgpu_bo *bo;
  79. struct page *kmem_page = vmalloc_to_page(kmem);
  80. int npages = ALIGN(size, PAGE_SIZE) >> PAGE_SHIFT;
  81. struct sg_table *sg = drm_prime_pages_to_sg(&kmem_page, npages);
  82. ret = amdgpu_bo_create(adev, size, PAGE_SIZE, false,
  83. AMDGPU_GEM_DOMAIN_GTT, 0, sg, NULL, &bo);
  84. if (ret)
  85. return ret;
  86. ret = amdgpu_bo_reserve(bo, false);
  87. if (unlikely(ret != 0))
  88. return ret;
  89. /* pin buffer into GTT */
  90. ret = amdgpu_bo_pin_restricted(bo, AMDGPU_GEM_DOMAIN_GTT,
  91. min_offset, max_offset, mcaddr);
  92. amdgpu_bo_unreserve(bo);
  93. *kmem_handle = (cgs_handle_t)bo;
  94. return ret;
  95. }
  96. static int amdgpu_cgs_gunmap_kmem(struct cgs_device *cgs_device, cgs_handle_t kmem_handle)
  97. {
  98. struct amdgpu_bo *obj = (struct amdgpu_bo *)kmem_handle;
  99. if (obj) {
  100. int r = amdgpu_bo_reserve(obj, false);
  101. if (likely(r == 0)) {
  102. amdgpu_bo_unpin(obj);
  103. amdgpu_bo_unreserve(obj);
  104. }
  105. amdgpu_bo_unref(&obj);
  106. }
  107. return 0;
  108. }
  109. static int amdgpu_cgs_alloc_gpu_mem(struct cgs_device *cgs_device,
  110. enum cgs_gpu_mem_type type,
  111. uint64_t size, uint64_t align,
  112. uint64_t min_offset, uint64_t max_offset,
  113. cgs_handle_t *handle)
  114. {
  115. CGS_FUNC_ADEV;
  116. uint16_t flags = 0;
  117. int ret = 0;
  118. uint32_t domain = 0;
  119. struct amdgpu_bo *obj;
  120. struct ttm_placement placement;
  121. struct ttm_place place;
  122. if (min_offset > max_offset) {
  123. BUG_ON(1);
  124. return -EINVAL;
  125. }
  126. /* fail if the alignment is not a power of 2 */
  127. if (((align != 1) && (align & (align - 1)))
  128. || size == 0 || align == 0)
  129. return -EINVAL;
  130. switch(type) {
  131. case CGS_GPU_MEM_TYPE__VISIBLE_CONTIG_FB:
  132. case CGS_GPU_MEM_TYPE__VISIBLE_FB:
  133. flags = AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
  134. domain = AMDGPU_GEM_DOMAIN_VRAM;
  135. if (max_offset > adev->mc.real_vram_size)
  136. return -EINVAL;
  137. place.fpfn = min_offset >> PAGE_SHIFT;
  138. place.lpfn = max_offset >> PAGE_SHIFT;
  139. place.flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED |
  140. TTM_PL_FLAG_VRAM;
  141. break;
  142. case CGS_GPU_MEM_TYPE__INVISIBLE_CONTIG_FB:
  143. case CGS_GPU_MEM_TYPE__INVISIBLE_FB:
  144. flags = AMDGPU_GEM_CREATE_NO_CPU_ACCESS;
  145. domain = AMDGPU_GEM_DOMAIN_VRAM;
  146. if (adev->mc.visible_vram_size < adev->mc.real_vram_size) {
  147. place.fpfn =
  148. max(min_offset, adev->mc.visible_vram_size) >> PAGE_SHIFT;
  149. place.lpfn =
  150. min(max_offset, adev->mc.real_vram_size) >> PAGE_SHIFT;
  151. place.flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED |
  152. TTM_PL_FLAG_VRAM;
  153. }
  154. break;
  155. case CGS_GPU_MEM_TYPE__GART_CACHEABLE:
  156. domain = AMDGPU_GEM_DOMAIN_GTT;
  157. place.fpfn = min_offset >> PAGE_SHIFT;
  158. place.lpfn = max_offset >> PAGE_SHIFT;
  159. place.flags = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_TT;
  160. break;
  161. case CGS_GPU_MEM_TYPE__GART_WRITECOMBINE:
  162. flags = AMDGPU_GEM_CREATE_CPU_GTT_USWC;
  163. domain = AMDGPU_GEM_DOMAIN_GTT;
  164. place.fpfn = min_offset >> PAGE_SHIFT;
  165. place.lpfn = max_offset >> PAGE_SHIFT;
  166. place.flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_TT |
  167. TTM_PL_FLAG_UNCACHED;
  168. break;
  169. default:
  170. return -EINVAL;
  171. }
  172. *handle = 0;
  173. placement.placement = &place;
  174. placement.num_placement = 1;
  175. placement.busy_placement = &place;
  176. placement.num_busy_placement = 1;
  177. ret = amdgpu_bo_create_restricted(adev, size, PAGE_SIZE,
  178. true, domain, flags,
  179. NULL, &placement, NULL,
  180. &obj);
  181. if (ret) {
  182. DRM_ERROR("(%d) bo create failed\n", ret);
  183. return ret;
  184. }
  185. *handle = (cgs_handle_t)obj;
  186. return ret;
  187. }
  188. static int amdgpu_cgs_free_gpu_mem(struct cgs_device *cgs_device, cgs_handle_t handle)
  189. {
  190. struct amdgpu_bo *obj = (struct amdgpu_bo *)handle;
  191. if (obj) {
  192. int r = amdgpu_bo_reserve(obj, false);
  193. if (likely(r == 0)) {
  194. amdgpu_bo_kunmap(obj);
  195. amdgpu_bo_unpin(obj);
  196. amdgpu_bo_unreserve(obj);
  197. }
  198. amdgpu_bo_unref(&obj);
  199. }
  200. return 0;
  201. }
  202. static int amdgpu_cgs_gmap_gpu_mem(struct cgs_device *cgs_device, cgs_handle_t handle,
  203. uint64_t *mcaddr)
  204. {
  205. int r;
  206. u64 min_offset, max_offset;
  207. struct amdgpu_bo *obj = (struct amdgpu_bo *)handle;
  208. WARN_ON_ONCE(obj->placement.num_placement > 1);
  209. min_offset = obj->placements[0].fpfn << PAGE_SHIFT;
  210. max_offset = obj->placements[0].lpfn << PAGE_SHIFT;
  211. r = amdgpu_bo_reserve(obj, false);
  212. if (unlikely(r != 0))
  213. return r;
  214. r = amdgpu_bo_pin_restricted(obj, AMDGPU_GEM_DOMAIN_GTT,
  215. min_offset, max_offset, mcaddr);
  216. amdgpu_bo_unreserve(obj);
  217. return r;
  218. }
  219. static int amdgpu_cgs_gunmap_gpu_mem(struct cgs_device *cgs_device, cgs_handle_t handle)
  220. {
  221. int r;
  222. struct amdgpu_bo *obj = (struct amdgpu_bo *)handle;
  223. r = amdgpu_bo_reserve(obj, false);
  224. if (unlikely(r != 0))
  225. return r;
  226. r = amdgpu_bo_unpin(obj);
  227. amdgpu_bo_unreserve(obj);
  228. return r;
  229. }
  230. static int amdgpu_cgs_kmap_gpu_mem(struct cgs_device *cgs_device, cgs_handle_t handle,
  231. void **map)
  232. {
  233. int r;
  234. struct amdgpu_bo *obj = (struct amdgpu_bo *)handle;
  235. r = amdgpu_bo_reserve(obj, false);
  236. if (unlikely(r != 0))
  237. return r;
  238. r = amdgpu_bo_kmap(obj, map);
  239. amdgpu_bo_unreserve(obj);
  240. return r;
  241. }
  242. static int amdgpu_cgs_kunmap_gpu_mem(struct cgs_device *cgs_device, cgs_handle_t handle)
  243. {
  244. int r;
  245. struct amdgpu_bo *obj = (struct amdgpu_bo *)handle;
  246. r = amdgpu_bo_reserve(obj, false);
  247. if (unlikely(r != 0))
  248. return r;
  249. amdgpu_bo_kunmap(obj);
  250. amdgpu_bo_unreserve(obj);
  251. return r;
  252. }
  253. static uint32_t amdgpu_cgs_read_register(struct cgs_device *cgs_device, unsigned offset)
  254. {
  255. CGS_FUNC_ADEV;
  256. return RREG32(offset);
  257. }
  258. static void amdgpu_cgs_write_register(struct cgs_device *cgs_device, unsigned offset,
  259. uint32_t value)
  260. {
  261. CGS_FUNC_ADEV;
  262. WREG32(offset, value);
  263. }
  264. static uint32_t amdgpu_cgs_read_ind_register(struct cgs_device *cgs_device,
  265. enum cgs_ind_reg space,
  266. unsigned index)
  267. {
  268. CGS_FUNC_ADEV;
  269. switch (space) {
  270. case CGS_IND_REG__MMIO:
  271. return RREG32_IDX(index);
  272. case CGS_IND_REG__PCIE:
  273. return RREG32_PCIE(index);
  274. case CGS_IND_REG__SMC:
  275. return RREG32_SMC(index);
  276. case CGS_IND_REG__UVD_CTX:
  277. return RREG32_UVD_CTX(index);
  278. case CGS_IND_REG__DIDT:
  279. return RREG32_DIDT(index);
  280. case CGS_IND_REG__AUDIO_ENDPT:
  281. DRM_ERROR("audio endpt register access not implemented.\n");
  282. return 0;
  283. }
  284. WARN(1, "Invalid indirect register space");
  285. return 0;
  286. }
  287. static void amdgpu_cgs_write_ind_register(struct cgs_device *cgs_device,
  288. enum cgs_ind_reg space,
  289. unsigned index, uint32_t value)
  290. {
  291. CGS_FUNC_ADEV;
  292. switch (space) {
  293. case CGS_IND_REG__MMIO:
  294. return WREG32_IDX(index, value);
  295. case CGS_IND_REG__PCIE:
  296. return WREG32_PCIE(index, value);
  297. case CGS_IND_REG__SMC:
  298. return WREG32_SMC(index, value);
  299. case CGS_IND_REG__UVD_CTX:
  300. return WREG32_UVD_CTX(index, value);
  301. case CGS_IND_REG__DIDT:
  302. return WREG32_DIDT(index, value);
  303. case CGS_IND_REG__AUDIO_ENDPT:
  304. DRM_ERROR("audio endpt register access not implemented.\n");
  305. return;
  306. }
  307. WARN(1, "Invalid indirect register space");
  308. }
  309. static uint8_t amdgpu_cgs_read_pci_config_byte(struct cgs_device *cgs_device, unsigned addr)
  310. {
  311. CGS_FUNC_ADEV;
  312. uint8_t val;
  313. int ret = pci_read_config_byte(adev->pdev, addr, &val);
  314. if (WARN(ret, "pci_read_config_byte error"))
  315. return 0;
  316. return val;
  317. }
  318. static uint16_t amdgpu_cgs_read_pci_config_word(struct cgs_device *cgs_device, unsigned addr)
  319. {
  320. CGS_FUNC_ADEV;
  321. uint16_t val;
  322. int ret = pci_read_config_word(adev->pdev, addr, &val);
  323. if (WARN(ret, "pci_read_config_word error"))
  324. return 0;
  325. return val;
  326. }
  327. static uint32_t amdgpu_cgs_read_pci_config_dword(struct cgs_device *cgs_device,
  328. unsigned addr)
  329. {
  330. CGS_FUNC_ADEV;
  331. uint32_t val;
  332. int ret = pci_read_config_dword(adev->pdev, addr, &val);
  333. if (WARN(ret, "pci_read_config_dword error"))
  334. return 0;
  335. return val;
  336. }
  337. static void amdgpu_cgs_write_pci_config_byte(struct cgs_device *cgs_device, unsigned addr,
  338. uint8_t value)
  339. {
  340. CGS_FUNC_ADEV;
  341. int ret = pci_write_config_byte(adev->pdev, addr, value);
  342. WARN(ret, "pci_write_config_byte error");
  343. }
  344. static void amdgpu_cgs_write_pci_config_word(struct cgs_device *cgs_device, unsigned addr,
  345. uint16_t value)
  346. {
  347. CGS_FUNC_ADEV;
  348. int ret = pci_write_config_word(adev->pdev, addr, value);
  349. WARN(ret, "pci_write_config_word error");
  350. }
  351. static void amdgpu_cgs_write_pci_config_dword(struct cgs_device *cgs_device, unsigned addr,
  352. uint32_t value)
  353. {
  354. CGS_FUNC_ADEV;
  355. int ret = pci_write_config_dword(adev->pdev, addr, value);
  356. WARN(ret, "pci_write_config_dword error");
  357. }
  358. static int amdgpu_cgs_get_pci_resource(struct cgs_device *cgs_device,
  359. enum cgs_resource_type resource_type,
  360. uint64_t size,
  361. uint64_t offset,
  362. uint64_t *resource_base)
  363. {
  364. CGS_FUNC_ADEV;
  365. if (resource_base == NULL)
  366. return -EINVAL;
  367. switch (resource_type) {
  368. case CGS_RESOURCE_TYPE_MMIO:
  369. if (adev->rmmio_size == 0)
  370. return -ENOENT;
  371. if ((offset + size) > adev->rmmio_size)
  372. return -EINVAL;
  373. *resource_base = adev->rmmio_base;
  374. return 0;
  375. case CGS_RESOURCE_TYPE_DOORBELL:
  376. if (adev->doorbell.size == 0)
  377. return -ENOENT;
  378. if ((offset + size) > adev->doorbell.size)
  379. return -EINVAL;
  380. *resource_base = adev->doorbell.base;
  381. return 0;
  382. case CGS_RESOURCE_TYPE_FB:
  383. case CGS_RESOURCE_TYPE_IO:
  384. case CGS_RESOURCE_TYPE_ROM:
  385. default:
  386. return -EINVAL;
  387. }
  388. }
  389. static const void *amdgpu_cgs_atom_get_data_table(struct cgs_device *cgs_device,
  390. unsigned table, uint16_t *size,
  391. uint8_t *frev, uint8_t *crev)
  392. {
  393. CGS_FUNC_ADEV;
  394. uint16_t data_start;
  395. if (amdgpu_atom_parse_data_header(
  396. adev->mode_info.atom_context, table, size,
  397. frev, crev, &data_start))
  398. return (uint8_t*)adev->mode_info.atom_context->bios +
  399. data_start;
  400. return NULL;
  401. }
  402. static int amdgpu_cgs_atom_get_cmd_table_revs(struct cgs_device *cgs_device, unsigned table,
  403. uint8_t *frev, uint8_t *crev)
  404. {
  405. CGS_FUNC_ADEV;
  406. if (amdgpu_atom_parse_cmd_header(
  407. adev->mode_info.atom_context, table,
  408. frev, crev))
  409. return 0;
  410. return -EINVAL;
  411. }
  412. static int amdgpu_cgs_atom_exec_cmd_table(struct cgs_device *cgs_device, unsigned table,
  413. void *args)
  414. {
  415. CGS_FUNC_ADEV;
  416. return amdgpu_atom_execute_table(
  417. adev->mode_info.atom_context, table, args);
  418. }
  419. static int amdgpu_cgs_create_pm_request(struct cgs_device *cgs_device, cgs_handle_t *request)
  420. {
  421. /* TODO */
  422. return 0;
  423. }
  424. static int amdgpu_cgs_destroy_pm_request(struct cgs_device *cgs_device, cgs_handle_t request)
  425. {
  426. /* TODO */
  427. return 0;
  428. }
  429. static int amdgpu_cgs_set_pm_request(struct cgs_device *cgs_device, cgs_handle_t request,
  430. int active)
  431. {
  432. /* TODO */
  433. return 0;
  434. }
  435. static int amdgpu_cgs_pm_request_clock(struct cgs_device *cgs_device, cgs_handle_t request,
  436. enum cgs_clock clock, unsigned freq)
  437. {
  438. /* TODO */
  439. return 0;
  440. }
  441. static int amdgpu_cgs_pm_request_engine(struct cgs_device *cgs_device, cgs_handle_t request,
  442. enum cgs_engine engine, int powered)
  443. {
  444. /* TODO */
  445. return 0;
  446. }
  447. static int amdgpu_cgs_pm_query_clock_limits(struct cgs_device *cgs_device,
  448. enum cgs_clock clock,
  449. struct cgs_clock_limits *limits)
  450. {
  451. /* TODO */
  452. return 0;
  453. }
  454. static int amdgpu_cgs_set_camera_voltages(struct cgs_device *cgs_device, uint32_t mask,
  455. const uint32_t *voltages)
  456. {
  457. DRM_ERROR("not implemented");
  458. return -EPERM;
  459. }
  460. struct cgs_irq_params {
  461. unsigned src_id;
  462. cgs_irq_source_set_func_t set;
  463. cgs_irq_handler_func_t handler;
  464. void *private_data;
  465. };
  466. static int cgs_set_irq_state(struct amdgpu_device *adev,
  467. struct amdgpu_irq_src *src,
  468. unsigned type,
  469. enum amdgpu_interrupt_state state)
  470. {
  471. struct cgs_irq_params *irq_params =
  472. (struct cgs_irq_params *)src->data;
  473. if (!irq_params)
  474. return -EINVAL;
  475. if (!irq_params->set)
  476. return -EINVAL;
  477. return irq_params->set(irq_params->private_data,
  478. irq_params->src_id,
  479. type,
  480. (int)state);
  481. }
  482. static int cgs_process_irq(struct amdgpu_device *adev,
  483. struct amdgpu_irq_src *source,
  484. struct amdgpu_iv_entry *entry)
  485. {
  486. struct cgs_irq_params *irq_params =
  487. (struct cgs_irq_params *)source->data;
  488. if (!irq_params)
  489. return -EINVAL;
  490. if (!irq_params->handler)
  491. return -EINVAL;
  492. return irq_params->handler(irq_params->private_data,
  493. irq_params->src_id,
  494. entry->iv_entry);
  495. }
  496. static const struct amdgpu_irq_src_funcs cgs_irq_funcs = {
  497. .set = cgs_set_irq_state,
  498. .process = cgs_process_irq,
  499. };
  500. static int amdgpu_cgs_add_irq_source(struct cgs_device *cgs_device, unsigned src_id,
  501. unsigned num_types,
  502. cgs_irq_source_set_func_t set,
  503. cgs_irq_handler_func_t handler,
  504. void *private_data)
  505. {
  506. CGS_FUNC_ADEV;
  507. int ret = 0;
  508. struct cgs_irq_params *irq_params;
  509. struct amdgpu_irq_src *source =
  510. kzalloc(sizeof(struct amdgpu_irq_src), GFP_KERNEL);
  511. if (!source)
  512. return -ENOMEM;
  513. irq_params =
  514. kzalloc(sizeof(struct cgs_irq_params), GFP_KERNEL);
  515. if (!irq_params) {
  516. kfree(source);
  517. return -ENOMEM;
  518. }
  519. source->num_types = num_types;
  520. source->funcs = &cgs_irq_funcs;
  521. irq_params->src_id = src_id;
  522. irq_params->set = set;
  523. irq_params->handler = handler;
  524. irq_params->private_data = private_data;
  525. source->data = (void *)irq_params;
  526. ret = amdgpu_irq_add_id(adev, src_id, source);
  527. if (ret) {
  528. kfree(irq_params);
  529. kfree(source);
  530. }
  531. return ret;
  532. }
  533. static int amdgpu_cgs_irq_get(struct cgs_device *cgs_device, unsigned src_id, unsigned type)
  534. {
  535. CGS_FUNC_ADEV;
  536. return amdgpu_irq_get(adev, adev->irq.sources[src_id], type);
  537. }
  538. static int amdgpu_cgs_irq_put(struct cgs_device *cgs_device, unsigned src_id, unsigned type)
  539. {
  540. CGS_FUNC_ADEV;
  541. return amdgpu_irq_put(adev, adev->irq.sources[src_id], type);
  542. }
  543. int amdgpu_cgs_set_clockgating_state(struct cgs_device *cgs_device,
  544. enum amd_ip_block_type block_type,
  545. enum amd_clockgating_state state)
  546. {
  547. CGS_FUNC_ADEV;
  548. int i, r = -1;
  549. for (i = 0; i < adev->num_ip_blocks; i++) {
  550. if (!adev->ip_block_status[i].valid)
  551. continue;
  552. if (adev->ip_blocks[i].type == block_type) {
  553. r = adev->ip_blocks[i].funcs->set_clockgating_state(
  554. (void *)adev,
  555. state);
  556. break;
  557. }
  558. }
  559. return r;
  560. }
  561. int amdgpu_cgs_set_powergating_state(struct cgs_device *cgs_device,
  562. enum amd_ip_block_type block_type,
  563. enum amd_powergating_state state)
  564. {
  565. CGS_FUNC_ADEV;
  566. int i, r = -1;
  567. for (i = 0; i < adev->num_ip_blocks; i++) {
  568. if (!adev->ip_block_status[i].valid)
  569. continue;
  570. if (adev->ip_blocks[i].type == block_type) {
  571. r = adev->ip_blocks[i].funcs->set_powergating_state(
  572. (void *)adev,
  573. state);
  574. break;
  575. }
  576. }
  577. return r;
  578. }
  579. static uint32_t fw_type_convert(struct cgs_device *cgs_device, uint32_t fw_type)
  580. {
  581. CGS_FUNC_ADEV;
  582. enum AMDGPU_UCODE_ID result = AMDGPU_UCODE_ID_MAXIMUM;
  583. switch (fw_type) {
  584. case CGS_UCODE_ID_SDMA0:
  585. result = AMDGPU_UCODE_ID_SDMA0;
  586. break;
  587. case CGS_UCODE_ID_SDMA1:
  588. result = AMDGPU_UCODE_ID_SDMA1;
  589. break;
  590. case CGS_UCODE_ID_CP_CE:
  591. result = AMDGPU_UCODE_ID_CP_CE;
  592. break;
  593. case CGS_UCODE_ID_CP_PFP:
  594. result = AMDGPU_UCODE_ID_CP_PFP;
  595. break;
  596. case CGS_UCODE_ID_CP_ME:
  597. result = AMDGPU_UCODE_ID_CP_ME;
  598. break;
  599. case CGS_UCODE_ID_CP_MEC:
  600. case CGS_UCODE_ID_CP_MEC_JT1:
  601. result = AMDGPU_UCODE_ID_CP_MEC1;
  602. break;
  603. case CGS_UCODE_ID_CP_MEC_JT2:
  604. if (adev->asic_type == CHIP_TONGA || adev->asic_type == CHIP_POLARIS11
  605. || adev->asic_type == CHIP_POLARIS10)
  606. result = AMDGPU_UCODE_ID_CP_MEC2;
  607. else
  608. result = AMDGPU_UCODE_ID_CP_MEC1;
  609. break;
  610. case CGS_UCODE_ID_RLC_G:
  611. result = AMDGPU_UCODE_ID_RLC_G;
  612. break;
  613. default:
  614. DRM_ERROR("Firmware type not supported\n");
  615. }
  616. return result;
  617. }
  618. static int amdgpu_cgs_rel_firmware(struct cgs_device *cgs_device, enum cgs_ucode_id type)
  619. {
  620. CGS_FUNC_ADEV;
  621. if ((CGS_UCODE_ID_SMU == type) || (CGS_UCODE_ID_SMU_SK == type)) {
  622. release_firmware(adev->pm.fw);
  623. return 0;
  624. }
  625. /* cannot release other firmware because they are not created by cgs */
  626. return -EINVAL;
  627. }
  628. static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device,
  629. enum cgs_ucode_id type,
  630. struct cgs_firmware_info *info)
  631. {
  632. CGS_FUNC_ADEV;
  633. if ((CGS_UCODE_ID_SMU != type) && (CGS_UCODE_ID_SMU_SK != type)) {
  634. uint64_t gpu_addr;
  635. uint32_t data_size;
  636. const struct gfx_firmware_header_v1_0 *header;
  637. enum AMDGPU_UCODE_ID id;
  638. struct amdgpu_firmware_info *ucode;
  639. id = fw_type_convert(cgs_device, type);
  640. ucode = &adev->firmware.ucode[id];
  641. if (ucode->fw == NULL)
  642. return -EINVAL;
  643. gpu_addr = ucode->mc_addr;
  644. header = (const struct gfx_firmware_header_v1_0 *)ucode->fw->data;
  645. data_size = le32_to_cpu(header->header.ucode_size_bytes);
  646. if ((type == CGS_UCODE_ID_CP_MEC_JT1) ||
  647. (type == CGS_UCODE_ID_CP_MEC_JT2)) {
  648. gpu_addr += le32_to_cpu(header->jt_offset) << 2;
  649. data_size = le32_to_cpu(header->jt_size) << 2;
  650. }
  651. info->mc_addr = gpu_addr;
  652. info->image_size = data_size;
  653. info->version = (uint16_t)le32_to_cpu(header->header.ucode_version);
  654. info->feature_version = (uint16_t)le32_to_cpu(header->ucode_feature_version);
  655. } else {
  656. char fw_name[30] = {0};
  657. int err = 0;
  658. uint32_t ucode_size;
  659. uint32_t ucode_start_address;
  660. const uint8_t *src;
  661. const struct smc_firmware_header_v1_0 *hdr;
  662. if (!adev->pm.fw) {
  663. switch (adev->asic_type) {
  664. case CHIP_TONGA:
  665. strcpy(fw_name, "amdgpu/tonga_smc.bin");
  666. break;
  667. case CHIP_FIJI:
  668. strcpy(fw_name, "amdgpu/fiji_smc.bin");
  669. break;
  670. case CHIP_POLARIS11:
  671. if (type == CGS_UCODE_ID_SMU)
  672. strcpy(fw_name, "amdgpu/polaris11_smc.bin");
  673. else if (type == CGS_UCODE_ID_SMU_SK)
  674. strcpy(fw_name, "amdgpu/polaris11_smc_sk.bin");
  675. break;
  676. case CHIP_POLARIS10:
  677. if (type == CGS_UCODE_ID_SMU)
  678. strcpy(fw_name, "amdgpu/polaris10_smc.bin");
  679. else if (type == CGS_UCODE_ID_SMU_SK)
  680. strcpy(fw_name, "amdgpu/polaris10_smc_sk.bin");
  681. break;
  682. default:
  683. DRM_ERROR("SMC firmware not supported\n");
  684. return -EINVAL;
  685. }
  686. err = request_firmware(&adev->pm.fw, fw_name, adev->dev);
  687. if (err) {
  688. DRM_ERROR("Failed to request firmware\n");
  689. return err;
  690. }
  691. err = amdgpu_ucode_validate(adev->pm.fw);
  692. if (err) {
  693. DRM_ERROR("Failed to load firmware \"%s\"", fw_name);
  694. release_firmware(adev->pm.fw);
  695. adev->pm.fw = NULL;
  696. return err;
  697. }
  698. }
  699. hdr = (const struct smc_firmware_header_v1_0 *) adev->pm.fw->data;
  700. adev->pm.fw_version = le32_to_cpu(hdr->header.ucode_version);
  701. ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes);
  702. ucode_start_address = le32_to_cpu(hdr->ucode_start_addr);
  703. src = (const uint8_t *)(adev->pm.fw->data +
  704. le32_to_cpu(hdr->header.ucode_array_offset_bytes));
  705. info->version = adev->pm.fw_version;
  706. info->image_size = ucode_size;
  707. info->kptr = (void *)src;
  708. }
  709. return 0;
  710. }
  711. static int amdgpu_cgs_query_system_info(struct cgs_device *cgs_device,
  712. struct cgs_system_info *sys_info)
  713. {
  714. CGS_FUNC_ADEV;
  715. if (NULL == sys_info)
  716. return -ENODEV;
  717. if (sizeof(struct cgs_system_info) != sys_info->size)
  718. return -ENODEV;
  719. switch (sys_info->info_id) {
  720. case CGS_SYSTEM_INFO_ADAPTER_BDF_ID:
  721. sys_info->value = adev->pdev->devfn | (adev->pdev->bus->number << 8);
  722. break;
  723. case CGS_SYSTEM_INFO_PCIE_GEN_INFO:
  724. sys_info->value = adev->pm.pcie_gen_mask;
  725. break;
  726. case CGS_SYSTEM_INFO_PCIE_MLW:
  727. sys_info->value = adev->pm.pcie_mlw_mask;
  728. break;
  729. case CGS_SYSTEM_INFO_CG_FLAGS:
  730. sys_info->value = adev->cg_flags;
  731. break;
  732. case CGS_SYSTEM_INFO_PG_FLAGS:
  733. sys_info->value = adev->pg_flags;
  734. break;
  735. case CGS_SYSTEM_INFO_GFX_CU_INFO:
  736. sys_info->value = adev->gfx.cu_info.number;
  737. break;
  738. default:
  739. return -ENODEV;
  740. }
  741. return 0;
  742. }
  743. static int amdgpu_cgs_get_active_displays_info(struct cgs_device *cgs_device,
  744. struct cgs_display_info *info)
  745. {
  746. CGS_FUNC_ADEV;
  747. struct amdgpu_crtc *amdgpu_crtc;
  748. struct drm_device *ddev = adev->ddev;
  749. struct drm_crtc *crtc;
  750. uint32_t line_time_us, vblank_lines;
  751. struct cgs_mode_info *mode_info;
  752. if (info == NULL)
  753. return -EINVAL;
  754. mode_info = info->mode_info;
  755. if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) {
  756. list_for_each_entry(crtc,
  757. &ddev->mode_config.crtc_list, head) {
  758. amdgpu_crtc = to_amdgpu_crtc(crtc);
  759. if (crtc->enabled) {
  760. info->active_display_mask |= (1 << amdgpu_crtc->crtc_id);
  761. info->display_count++;
  762. }
  763. if (mode_info != NULL &&
  764. crtc->enabled && amdgpu_crtc->enabled &&
  765. amdgpu_crtc->hw_mode.clock) {
  766. line_time_us = (amdgpu_crtc->hw_mode.crtc_htotal * 1000) /
  767. amdgpu_crtc->hw_mode.clock;
  768. vblank_lines = amdgpu_crtc->hw_mode.crtc_vblank_end -
  769. amdgpu_crtc->hw_mode.crtc_vdisplay +
  770. (amdgpu_crtc->v_border * 2);
  771. mode_info->vblank_time_us = vblank_lines * line_time_us;
  772. mode_info->refresh_rate = drm_mode_vrefresh(&amdgpu_crtc->hw_mode);
  773. mode_info->ref_clock = adev->clock.spll.reference_freq;
  774. mode_info = NULL;
  775. }
  776. }
  777. }
  778. return 0;
  779. }
  780. static int amdgpu_cgs_notify_dpm_enabled(struct cgs_device *cgs_device, bool enabled)
  781. {
  782. CGS_FUNC_ADEV;
  783. adev->pm.dpm_enabled = enabled;
  784. return 0;
  785. }
  786. /** \brief evaluate acpi namespace object, handle or pathname must be valid
  787. * \param cgs_device
  788. * \param info input/output arguments for the control method
  789. * \return status
  790. */
  791. #if defined(CONFIG_ACPI)
  792. static int amdgpu_cgs_acpi_eval_object(struct cgs_device *cgs_device,
  793. struct cgs_acpi_method_info *info)
  794. {
  795. CGS_FUNC_ADEV;
  796. acpi_handle handle;
  797. struct acpi_object_list input;
  798. struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
  799. union acpi_object *params = NULL;
  800. union acpi_object *obj = NULL;
  801. uint8_t name[5] = {'\0'};
  802. struct cgs_acpi_method_argument *argument = NULL;
  803. uint32_t i, count;
  804. acpi_status status;
  805. int result = 0;
  806. uint32_t func_no = 0xFFFFFFFF;
  807. handle = ACPI_HANDLE(&adev->pdev->dev);
  808. if (!handle)
  809. return -ENODEV;
  810. memset(&input, 0, sizeof(struct acpi_object_list));
  811. /* validate input info */
  812. if (info->size != sizeof(struct cgs_acpi_method_info))
  813. return -EINVAL;
  814. input.count = info->input_count;
  815. if (info->input_count > 0) {
  816. if (info->pinput_argument == NULL)
  817. return -EINVAL;
  818. argument = info->pinput_argument;
  819. func_no = argument->value;
  820. for (i = 0; i < info->input_count; i++) {
  821. if (((argument->type == ACPI_TYPE_STRING) ||
  822. (argument->type == ACPI_TYPE_BUFFER)) &&
  823. (argument->pointer == NULL))
  824. return -EINVAL;
  825. argument++;
  826. }
  827. }
  828. if (info->output_count > 0) {
  829. if (info->poutput_argument == NULL)
  830. return -EINVAL;
  831. argument = info->poutput_argument;
  832. for (i = 0; i < info->output_count; i++) {
  833. if (((argument->type == ACPI_TYPE_STRING) ||
  834. (argument->type == ACPI_TYPE_BUFFER))
  835. && (argument->pointer == NULL))
  836. return -EINVAL;
  837. argument++;
  838. }
  839. }
  840. /* The path name passed to acpi_evaluate_object should be null terminated */
  841. if ((info->field & CGS_ACPI_FIELD_METHOD_NAME) != 0) {
  842. strncpy(name, (char *)&(info->name), sizeof(uint32_t));
  843. name[4] = '\0';
  844. }
  845. /* parse input parameters */
  846. if (input.count > 0) {
  847. input.pointer = params =
  848. kzalloc(sizeof(union acpi_object) * input.count, GFP_KERNEL);
  849. if (params == NULL)
  850. return -EINVAL;
  851. argument = info->pinput_argument;
  852. for (i = 0; i < input.count; i++) {
  853. params->type = argument->type;
  854. switch (params->type) {
  855. case ACPI_TYPE_INTEGER:
  856. params->integer.value = argument->value;
  857. break;
  858. case ACPI_TYPE_STRING:
  859. params->string.length = argument->method_length;
  860. params->string.pointer = argument->pointer;
  861. break;
  862. case ACPI_TYPE_BUFFER:
  863. params->buffer.length = argument->method_length;
  864. params->buffer.pointer = argument->pointer;
  865. break;
  866. default:
  867. break;
  868. }
  869. params++;
  870. argument++;
  871. }
  872. }
  873. /* parse output info */
  874. count = info->output_count;
  875. argument = info->poutput_argument;
  876. /* evaluate the acpi method */
  877. status = acpi_evaluate_object(handle, name, &input, &output);
  878. if (ACPI_FAILURE(status)) {
  879. result = -EIO;
  880. goto error;
  881. }
  882. /* return the output info */
  883. obj = output.pointer;
  884. if (count > 1) {
  885. if ((obj->type != ACPI_TYPE_PACKAGE) ||
  886. (obj->package.count != count)) {
  887. result = -EIO;
  888. goto error;
  889. }
  890. params = obj->package.elements;
  891. } else
  892. params = obj;
  893. if (params == NULL) {
  894. result = -EIO;
  895. goto error;
  896. }
  897. for (i = 0; i < count; i++) {
  898. if (argument->type != params->type) {
  899. result = -EIO;
  900. goto error;
  901. }
  902. switch (params->type) {
  903. case ACPI_TYPE_INTEGER:
  904. argument->value = params->integer.value;
  905. break;
  906. case ACPI_TYPE_STRING:
  907. if ((params->string.length != argument->data_length) ||
  908. (params->string.pointer == NULL)) {
  909. result = -EIO;
  910. goto error;
  911. }
  912. strncpy(argument->pointer,
  913. params->string.pointer,
  914. params->string.length);
  915. break;
  916. case ACPI_TYPE_BUFFER:
  917. if (params->buffer.pointer == NULL) {
  918. result = -EIO;
  919. goto error;
  920. }
  921. memcpy(argument->pointer,
  922. params->buffer.pointer,
  923. argument->data_length);
  924. break;
  925. default:
  926. break;
  927. }
  928. argument++;
  929. params++;
  930. }
  931. error:
  932. if (obj != NULL)
  933. kfree(obj);
  934. kfree((void *)input.pointer);
  935. return result;
  936. }
  937. #else
  938. static int amdgpu_cgs_acpi_eval_object(struct cgs_device *cgs_device,
  939. struct cgs_acpi_method_info *info)
  940. {
  941. return -EIO;
  942. }
  943. #endif
  944. int amdgpu_cgs_call_acpi_method(struct cgs_device *cgs_device,
  945. uint32_t acpi_method,
  946. uint32_t acpi_function,
  947. void *pinput, void *poutput,
  948. uint32_t output_count,
  949. uint32_t input_size,
  950. uint32_t output_size)
  951. {
  952. struct cgs_acpi_method_argument acpi_input[2] = { {0}, {0} };
  953. struct cgs_acpi_method_argument acpi_output = {0};
  954. struct cgs_acpi_method_info info = {0};
  955. acpi_input[0].type = CGS_ACPI_TYPE_INTEGER;
  956. acpi_input[0].method_length = sizeof(uint32_t);
  957. acpi_input[0].data_length = sizeof(uint32_t);
  958. acpi_input[0].value = acpi_function;
  959. acpi_input[1].type = CGS_ACPI_TYPE_BUFFER;
  960. acpi_input[1].method_length = CGS_ACPI_MAX_BUFFER_SIZE;
  961. acpi_input[1].data_length = input_size;
  962. acpi_input[1].pointer = pinput;
  963. acpi_output.type = CGS_ACPI_TYPE_BUFFER;
  964. acpi_output.method_length = CGS_ACPI_MAX_BUFFER_SIZE;
  965. acpi_output.data_length = output_size;
  966. acpi_output.pointer = poutput;
  967. info.size = sizeof(struct cgs_acpi_method_info);
  968. info.field = CGS_ACPI_FIELD_METHOD_NAME | CGS_ACPI_FIELD_INPUT_ARGUMENT_COUNT;
  969. info.input_count = 2;
  970. info.name = acpi_method;
  971. info.pinput_argument = acpi_input;
  972. info.output_count = output_count;
  973. info.poutput_argument = &acpi_output;
  974. return amdgpu_cgs_acpi_eval_object(cgs_device, &info);
  975. }
  976. static const struct cgs_ops amdgpu_cgs_ops = {
  977. amdgpu_cgs_gpu_mem_info,
  978. amdgpu_cgs_gmap_kmem,
  979. amdgpu_cgs_gunmap_kmem,
  980. amdgpu_cgs_alloc_gpu_mem,
  981. amdgpu_cgs_free_gpu_mem,
  982. amdgpu_cgs_gmap_gpu_mem,
  983. amdgpu_cgs_gunmap_gpu_mem,
  984. amdgpu_cgs_kmap_gpu_mem,
  985. amdgpu_cgs_kunmap_gpu_mem,
  986. amdgpu_cgs_read_register,
  987. amdgpu_cgs_write_register,
  988. amdgpu_cgs_read_ind_register,
  989. amdgpu_cgs_write_ind_register,
  990. amdgpu_cgs_read_pci_config_byte,
  991. amdgpu_cgs_read_pci_config_word,
  992. amdgpu_cgs_read_pci_config_dword,
  993. amdgpu_cgs_write_pci_config_byte,
  994. amdgpu_cgs_write_pci_config_word,
  995. amdgpu_cgs_write_pci_config_dword,
  996. amdgpu_cgs_get_pci_resource,
  997. amdgpu_cgs_atom_get_data_table,
  998. amdgpu_cgs_atom_get_cmd_table_revs,
  999. amdgpu_cgs_atom_exec_cmd_table,
  1000. amdgpu_cgs_create_pm_request,
  1001. amdgpu_cgs_destroy_pm_request,
  1002. amdgpu_cgs_set_pm_request,
  1003. amdgpu_cgs_pm_request_clock,
  1004. amdgpu_cgs_pm_request_engine,
  1005. amdgpu_cgs_pm_query_clock_limits,
  1006. amdgpu_cgs_set_camera_voltages,
  1007. amdgpu_cgs_get_firmware_info,
  1008. amdgpu_cgs_rel_firmware,
  1009. amdgpu_cgs_set_powergating_state,
  1010. amdgpu_cgs_set_clockgating_state,
  1011. amdgpu_cgs_get_active_displays_info,
  1012. amdgpu_cgs_notify_dpm_enabled,
  1013. amdgpu_cgs_call_acpi_method,
  1014. amdgpu_cgs_query_system_info,
  1015. };
  1016. static const struct cgs_os_ops amdgpu_cgs_os_ops = {
  1017. amdgpu_cgs_add_irq_source,
  1018. amdgpu_cgs_irq_get,
  1019. amdgpu_cgs_irq_put
  1020. };
  1021. struct cgs_device *amdgpu_cgs_create_device(struct amdgpu_device *adev)
  1022. {
  1023. struct amdgpu_cgs_device *cgs_device =
  1024. kmalloc(sizeof(*cgs_device), GFP_KERNEL);
  1025. if (!cgs_device) {
  1026. DRM_ERROR("Couldn't allocate CGS device structure\n");
  1027. return NULL;
  1028. }
  1029. cgs_device->base.ops = &amdgpu_cgs_ops;
  1030. cgs_device->base.os_ops = &amdgpu_cgs_os_ops;
  1031. cgs_device->adev = adev;
  1032. return (struct cgs_device *)cgs_device;
  1033. }
  1034. void amdgpu_cgs_destroy_device(struct cgs_device *cgs_device)
  1035. {
  1036. kfree(cgs_device);
  1037. }