amdgpu_cgs.c 27 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088
  1. /*
  2. * Copyright 2015 Advanced Micro Devices, Inc.
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice shall be included in
  12. * all copies or substantial portions of the Software.
  13. *
  14. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  17. * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18. * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19. * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20. * OTHER DEALINGS IN THE SOFTWARE.
  21. *
  22. *
  23. */
  24. #include <linux/list.h>
  25. #include <linux/slab.h>
  26. #include <linux/pci.h>
  27. #include <linux/acpi.h>
  28. #include <drm/drmP.h>
  29. #include <linux/firmware.h>
  30. #include <drm/amdgpu_drm.h>
  31. #include "amdgpu.h"
  32. #include "cgs_linux.h"
  33. #include "atom.h"
  34. #include "amdgpu_ucode.h"
  35. struct amdgpu_cgs_device {
  36. struct cgs_device base;
  37. struct amdgpu_device *adev;
  38. };
  39. #define CGS_FUNC_ADEV \
  40. struct amdgpu_device *adev = \
  41. ((struct amdgpu_cgs_device *)cgs_device)->adev
  42. static int amdgpu_cgs_gpu_mem_info(void *cgs_device, enum cgs_gpu_mem_type type,
  43. uint64_t *mc_start, uint64_t *mc_size,
  44. uint64_t *mem_size)
  45. {
  46. CGS_FUNC_ADEV;
  47. switch(type) {
  48. case CGS_GPU_MEM_TYPE__VISIBLE_CONTIG_FB:
  49. case CGS_GPU_MEM_TYPE__VISIBLE_FB:
  50. *mc_start = 0;
  51. *mc_size = adev->mc.visible_vram_size;
  52. *mem_size = adev->mc.visible_vram_size - adev->vram_pin_size;
  53. break;
  54. case CGS_GPU_MEM_TYPE__INVISIBLE_CONTIG_FB:
  55. case CGS_GPU_MEM_TYPE__INVISIBLE_FB:
  56. *mc_start = adev->mc.visible_vram_size;
  57. *mc_size = adev->mc.real_vram_size - adev->mc.visible_vram_size;
  58. *mem_size = *mc_size;
  59. break;
  60. case CGS_GPU_MEM_TYPE__GART_CACHEABLE:
  61. case CGS_GPU_MEM_TYPE__GART_WRITECOMBINE:
  62. *mc_start = adev->mc.gtt_start;
  63. *mc_size = adev->mc.gtt_size;
  64. *mem_size = adev->mc.gtt_size - adev->gart_pin_size;
  65. break;
  66. default:
  67. return -EINVAL;
  68. }
  69. return 0;
  70. }
  71. static int amdgpu_cgs_gmap_kmem(void *cgs_device, void *kmem,
  72. uint64_t size,
  73. uint64_t min_offset, uint64_t max_offset,
  74. cgs_handle_t *kmem_handle, uint64_t *mcaddr)
  75. {
  76. CGS_FUNC_ADEV;
  77. int ret;
  78. struct amdgpu_bo *bo;
  79. struct page *kmem_page = vmalloc_to_page(kmem);
  80. int npages = ALIGN(size, PAGE_SIZE) >> PAGE_SHIFT;
  81. struct sg_table *sg = drm_prime_pages_to_sg(&kmem_page, npages);
  82. ret = amdgpu_bo_create(adev, size, PAGE_SIZE, false,
  83. AMDGPU_GEM_DOMAIN_GTT, 0, sg, NULL, &bo);
  84. if (ret)
  85. return ret;
  86. ret = amdgpu_bo_reserve(bo, false);
  87. if (unlikely(ret != 0))
  88. return ret;
  89. /* pin buffer into GTT */
  90. ret = amdgpu_bo_pin_restricted(bo, AMDGPU_GEM_DOMAIN_GTT,
  91. min_offset, max_offset, mcaddr);
  92. amdgpu_bo_unreserve(bo);
  93. *kmem_handle = (cgs_handle_t)bo;
  94. return ret;
  95. }
  96. static int amdgpu_cgs_gunmap_kmem(void *cgs_device, cgs_handle_t kmem_handle)
  97. {
  98. struct amdgpu_bo *obj = (struct amdgpu_bo *)kmem_handle;
  99. if (obj) {
  100. int r = amdgpu_bo_reserve(obj, false);
  101. if (likely(r == 0)) {
  102. amdgpu_bo_unpin(obj);
  103. amdgpu_bo_unreserve(obj);
  104. }
  105. amdgpu_bo_unref(&obj);
  106. }
  107. return 0;
  108. }
  109. static int amdgpu_cgs_alloc_gpu_mem(void *cgs_device,
  110. enum cgs_gpu_mem_type type,
  111. uint64_t size, uint64_t align,
  112. uint64_t min_offset, uint64_t max_offset,
  113. cgs_handle_t *handle)
  114. {
  115. CGS_FUNC_ADEV;
  116. uint16_t flags = 0;
  117. int ret = 0;
  118. uint32_t domain = 0;
  119. struct amdgpu_bo *obj;
  120. struct ttm_placement placement;
  121. struct ttm_place place;
  122. if (min_offset > max_offset) {
  123. BUG_ON(1);
  124. return -EINVAL;
  125. }
  126. /* fail if the alignment is not a power of 2 */
  127. if (((align != 1) && (align & (align - 1)))
  128. || size == 0 || align == 0)
  129. return -EINVAL;
  130. switch(type) {
  131. case CGS_GPU_MEM_TYPE__VISIBLE_CONTIG_FB:
  132. case CGS_GPU_MEM_TYPE__VISIBLE_FB:
  133. flags = AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
  134. domain = AMDGPU_GEM_DOMAIN_VRAM;
  135. if (max_offset > adev->mc.real_vram_size)
  136. return -EINVAL;
  137. place.fpfn = min_offset >> PAGE_SHIFT;
  138. place.lpfn = max_offset >> PAGE_SHIFT;
  139. place.flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED |
  140. TTM_PL_FLAG_VRAM;
  141. break;
  142. case CGS_GPU_MEM_TYPE__INVISIBLE_CONTIG_FB:
  143. case CGS_GPU_MEM_TYPE__INVISIBLE_FB:
  144. flags = AMDGPU_GEM_CREATE_NO_CPU_ACCESS;
  145. domain = AMDGPU_GEM_DOMAIN_VRAM;
  146. if (adev->mc.visible_vram_size < adev->mc.real_vram_size) {
  147. place.fpfn =
  148. max(min_offset, adev->mc.visible_vram_size) >> PAGE_SHIFT;
  149. place.lpfn =
  150. min(max_offset, adev->mc.real_vram_size) >> PAGE_SHIFT;
  151. place.flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED |
  152. TTM_PL_FLAG_VRAM;
  153. }
  154. break;
  155. case CGS_GPU_MEM_TYPE__GART_CACHEABLE:
  156. domain = AMDGPU_GEM_DOMAIN_GTT;
  157. place.fpfn = min_offset >> PAGE_SHIFT;
  158. place.lpfn = max_offset >> PAGE_SHIFT;
  159. place.flags = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_TT;
  160. break;
  161. case CGS_GPU_MEM_TYPE__GART_WRITECOMBINE:
  162. flags = AMDGPU_GEM_CREATE_CPU_GTT_USWC;
  163. domain = AMDGPU_GEM_DOMAIN_GTT;
  164. place.fpfn = min_offset >> PAGE_SHIFT;
  165. place.lpfn = max_offset >> PAGE_SHIFT;
  166. place.flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_TT |
  167. TTM_PL_FLAG_UNCACHED;
  168. break;
  169. default:
  170. return -EINVAL;
  171. }
  172. *handle = 0;
  173. placement.placement = &place;
  174. placement.num_placement = 1;
  175. placement.busy_placement = &place;
  176. placement.num_busy_placement = 1;
  177. ret = amdgpu_bo_create_restricted(adev, size, PAGE_SIZE,
  178. true, domain, flags,
  179. NULL, &placement, NULL,
  180. &obj);
  181. if (ret) {
  182. DRM_ERROR("(%d) bo create failed\n", ret);
  183. return ret;
  184. }
  185. *handle = (cgs_handle_t)obj;
  186. return ret;
  187. }
  188. static int amdgpu_cgs_free_gpu_mem(void *cgs_device, cgs_handle_t handle)
  189. {
  190. struct amdgpu_bo *obj = (struct amdgpu_bo *)handle;
  191. if (obj) {
  192. int r = amdgpu_bo_reserve(obj, false);
  193. if (likely(r == 0)) {
  194. amdgpu_bo_kunmap(obj);
  195. amdgpu_bo_unpin(obj);
  196. amdgpu_bo_unreserve(obj);
  197. }
  198. amdgpu_bo_unref(&obj);
  199. }
  200. return 0;
  201. }
  202. static int amdgpu_cgs_gmap_gpu_mem(void *cgs_device, cgs_handle_t handle,
  203. uint64_t *mcaddr)
  204. {
  205. int r;
  206. u64 min_offset, max_offset;
  207. struct amdgpu_bo *obj = (struct amdgpu_bo *)handle;
  208. WARN_ON_ONCE(obj->placement.num_placement > 1);
  209. min_offset = obj->placements[0].fpfn << PAGE_SHIFT;
  210. max_offset = obj->placements[0].lpfn << PAGE_SHIFT;
  211. r = amdgpu_bo_reserve(obj, false);
  212. if (unlikely(r != 0))
  213. return r;
  214. r = amdgpu_bo_pin_restricted(obj, AMDGPU_GEM_DOMAIN_GTT,
  215. min_offset, max_offset, mcaddr);
  216. amdgpu_bo_unreserve(obj);
  217. return r;
  218. }
  219. static int amdgpu_cgs_gunmap_gpu_mem(void *cgs_device, cgs_handle_t handle)
  220. {
  221. int r;
  222. struct amdgpu_bo *obj = (struct amdgpu_bo *)handle;
  223. r = amdgpu_bo_reserve(obj, false);
  224. if (unlikely(r != 0))
  225. return r;
  226. r = amdgpu_bo_unpin(obj);
  227. amdgpu_bo_unreserve(obj);
  228. return r;
  229. }
  230. static int amdgpu_cgs_kmap_gpu_mem(void *cgs_device, cgs_handle_t handle,
  231. void **map)
  232. {
  233. int r;
  234. struct amdgpu_bo *obj = (struct amdgpu_bo *)handle;
  235. r = amdgpu_bo_reserve(obj, false);
  236. if (unlikely(r != 0))
  237. return r;
  238. r = amdgpu_bo_kmap(obj, map);
  239. amdgpu_bo_unreserve(obj);
  240. return r;
  241. }
  242. static int amdgpu_cgs_kunmap_gpu_mem(void *cgs_device, cgs_handle_t handle)
  243. {
  244. int r;
  245. struct amdgpu_bo *obj = (struct amdgpu_bo *)handle;
  246. r = amdgpu_bo_reserve(obj, false);
  247. if (unlikely(r != 0))
  248. return r;
  249. amdgpu_bo_kunmap(obj);
  250. amdgpu_bo_unreserve(obj);
  251. return r;
  252. }
  253. static uint32_t amdgpu_cgs_read_register(void *cgs_device, unsigned offset)
  254. {
  255. CGS_FUNC_ADEV;
  256. return RREG32(offset);
  257. }
  258. static void amdgpu_cgs_write_register(void *cgs_device, unsigned offset,
  259. uint32_t value)
  260. {
  261. CGS_FUNC_ADEV;
  262. WREG32(offset, value);
  263. }
  264. static uint32_t amdgpu_cgs_read_ind_register(void *cgs_device,
  265. enum cgs_ind_reg space,
  266. unsigned index)
  267. {
  268. CGS_FUNC_ADEV;
  269. switch (space) {
  270. case CGS_IND_REG__MMIO:
  271. return RREG32_IDX(index);
  272. case CGS_IND_REG__PCIE:
  273. return RREG32_PCIE(index);
  274. case CGS_IND_REG__SMC:
  275. return RREG32_SMC(index);
  276. case CGS_IND_REG__UVD_CTX:
  277. return RREG32_UVD_CTX(index);
  278. case CGS_IND_REG__DIDT:
  279. return RREG32_DIDT(index);
  280. case CGS_IND_REG__AUDIO_ENDPT:
  281. DRM_ERROR("audio endpt register access not implemented.\n");
  282. return 0;
  283. }
  284. WARN(1, "Invalid indirect register space");
  285. return 0;
  286. }
  287. static void amdgpu_cgs_write_ind_register(void *cgs_device,
  288. enum cgs_ind_reg space,
  289. unsigned index, uint32_t value)
  290. {
  291. CGS_FUNC_ADEV;
  292. switch (space) {
  293. case CGS_IND_REG__MMIO:
  294. return WREG32_IDX(index, value);
  295. case CGS_IND_REG__PCIE:
  296. return WREG32_PCIE(index, value);
  297. case CGS_IND_REG__SMC:
  298. return WREG32_SMC(index, value);
  299. case CGS_IND_REG__UVD_CTX:
  300. return WREG32_UVD_CTX(index, value);
  301. case CGS_IND_REG__DIDT:
  302. return WREG32_DIDT(index, value);
  303. case CGS_IND_REG__AUDIO_ENDPT:
  304. DRM_ERROR("audio endpt register access not implemented.\n");
  305. return;
  306. }
  307. WARN(1, "Invalid indirect register space");
  308. }
  309. static uint8_t amdgpu_cgs_read_pci_config_byte(void *cgs_device, unsigned addr)
  310. {
  311. CGS_FUNC_ADEV;
  312. uint8_t val;
  313. int ret = pci_read_config_byte(adev->pdev, addr, &val);
  314. if (WARN(ret, "pci_read_config_byte error"))
  315. return 0;
  316. return val;
  317. }
  318. static uint16_t amdgpu_cgs_read_pci_config_word(void *cgs_device, unsigned addr)
  319. {
  320. CGS_FUNC_ADEV;
  321. uint16_t val;
  322. int ret = pci_read_config_word(adev->pdev, addr, &val);
  323. if (WARN(ret, "pci_read_config_word error"))
  324. return 0;
  325. return val;
  326. }
  327. static uint32_t amdgpu_cgs_read_pci_config_dword(void *cgs_device,
  328. unsigned addr)
  329. {
  330. CGS_FUNC_ADEV;
  331. uint32_t val;
  332. int ret = pci_read_config_dword(adev->pdev, addr, &val);
  333. if (WARN(ret, "pci_read_config_dword error"))
  334. return 0;
  335. return val;
  336. }
  337. static void amdgpu_cgs_write_pci_config_byte(void *cgs_device, unsigned addr,
  338. uint8_t value)
  339. {
  340. CGS_FUNC_ADEV;
  341. int ret = pci_write_config_byte(adev->pdev, addr, value);
  342. WARN(ret, "pci_write_config_byte error");
  343. }
  344. static void amdgpu_cgs_write_pci_config_word(void *cgs_device, unsigned addr,
  345. uint16_t value)
  346. {
  347. CGS_FUNC_ADEV;
  348. int ret = pci_write_config_word(adev->pdev, addr, value);
  349. WARN(ret, "pci_write_config_word error");
  350. }
  351. static void amdgpu_cgs_write_pci_config_dword(void *cgs_device, unsigned addr,
  352. uint32_t value)
  353. {
  354. CGS_FUNC_ADEV;
  355. int ret = pci_write_config_dword(adev->pdev, addr, value);
  356. WARN(ret, "pci_write_config_dword error");
  357. }
  358. static const void *amdgpu_cgs_atom_get_data_table(void *cgs_device,
  359. unsigned table, uint16_t *size,
  360. uint8_t *frev, uint8_t *crev)
  361. {
  362. CGS_FUNC_ADEV;
  363. uint16_t data_start;
  364. if (amdgpu_atom_parse_data_header(
  365. adev->mode_info.atom_context, table, size,
  366. frev, crev, &data_start))
  367. return (uint8_t*)adev->mode_info.atom_context->bios +
  368. data_start;
  369. return NULL;
  370. }
  371. static int amdgpu_cgs_atom_get_cmd_table_revs(void *cgs_device, unsigned table,
  372. uint8_t *frev, uint8_t *crev)
  373. {
  374. CGS_FUNC_ADEV;
  375. if (amdgpu_atom_parse_cmd_header(
  376. adev->mode_info.atom_context, table,
  377. frev, crev))
  378. return 0;
  379. return -EINVAL;
  380. }
  381. static int amdgpu_cgs_atom_exec_cmd_table(void *cgs_device, unsigned table,
  382. void *args)
  383. {
  384. CGS_FUNC_ADEV;
  385. return amdgpu_atom_execute_table(
  386. adev->mode_info.atom_context, table, args);
  387. }
  388. static int amdgpu_cgs_create_pm_request(void *cgs_device, cgs_handle_t *request)
  389. {
  390. /* TODO */
  391. return 0;
  392. }
  393. static int amdgpu_cgs_destroy_pm_request(void *cgs_device, cgs_handle_t request)
  394. {
  395. /* TODO */
  396. return 0;
  397. }
  398. static int amdgpu_cgs_set_pm_request(void *cgs_device, cgs_handle_t request,
  399. int active)
  400. {
  401. /* TODO */
  402. return 0;
  403. }
  404. static int amdgpu_cgs_pm_request_clock(void *cgs_device, cgs_handle_t request,
  405. enum cgs_clock clock, unsigned freq)
  406. {
  407. /* TODO */
  408. return 0;
  409. }
  410. static int amdgpu_cgs_pm_request_engine(void *cgs_device, cgs_handle_t request,
  411. enum cgs_engine engine, int powered)
  412. {
  413. /* TODO */
  414. return 0;
  415. }
  416. static int amdgpu_cgs_pm_query_clock_limits(void *cgs_device,
  417. enum cgs_clock clock,
  418. struct cgs_clock_limits *limits)
  419. {
  420. /* TODO */
  421. return 0;
  422. }
  423. static int amdgpu_cgs_set_camera_voltages(void *cgs_device, uint32_t mask,
  424. const uint32_t *voltages)
  425. {
  426. DRM_ERROR("not implemented");
  427. return -EPERM;
  428. }
  429. struct cgs_irq_params {
  430. unsigned src_id;
  431. cgs_irq_source_set_func_t set;
  432. cgs_irq_handler_func_t handler;
  433. void *private_data;
  434. };
  435. static int cgs_set_irq_state(struct amdgpu_device *adev,
  436. struct amdgpu_irq_src *src,
  437. unsigned type,
  438. enum amdgpu_interrupt_state state)
  439. {
  440. struct cgs_irq_params *irq_params =
  441. (struct cgs_irq_params *)src->data;
  442. if (!irq_params)
  443. return -EINVAL;
  444. if (!irq_params->set)
  445. return -EINVAL;
  446. return irq_params->set(irq_params->private_data,
  447. irq_params->src_id,
  448. type,
  449. (int)state);
  450. }
  451. static int cgs_process_irq(struct amdgpu_device *adev,
  452. struct amdgpu_irq_src *source,
  453. struct amdgpu_iv_entry *entry)
  454. {
  455. struct cgs_irq_params *irq_params =
  456. (struct cgs_irq_params *)source->data;
  457. if (!irq_params)
  458. return -EINVAL;
  459. if (!irq_params->handler)
  460. return -EINVAL;
  461. return irq_params->handler(irq_params->private_data,
  462. irq_params->src_id,
  463. entry->iv_entry);
  464. }
  465. static const struct amdgpu_irq_src_funcs cgs_irq_funcs = {
  466. .set = cgs_set_irq_state,
  467. .process = cgs_process_irq,
  468. };
  469. static int amdgpu_cgs_add_irq_source(void *cgs_device, unsigned src_id,
  470. unsigned num_types,
  471. cgs_irq_source_set_func_t set,
  472. cgs_irq_handler_func_t handler,
  473. void *private_data)
  474. {
  475. CGS_FUNC_ADEV;
  476. int ret = 0;
  477. struct cgs_irq_params *irq_params;
  478. struct amdgpu_irq_src *source =
  479. kzalloc(sizeof(struct amdgpu_irq_src), GFP_KERNEL);
  480. if (!source)
  481. return -ENOMEM;
  482. irq_params =
  483. kzalloc(sizeof(struct cgs_irq_params), GFP_KERNEL);
  484. if (!irq_params) {
  485. kfree(source);
  486. return -ENOMEM;
  487. }
  488. source->num_types = num_types;
  489. source->funcs = &cgs_irq_funcs;
  490. irq_params->src_id = src_id;
  491. irq_params->set = set;
  492. irq_params->handler = handler;
  493. irq_params->private_data = private_data;
  494. source->data = (void *)irq_params;
  495. ret = amdgpu_irq_add_id(adev, src_id, source);
  496. if (ret) {
  497. kfree(irq_params);
  498. kfree(source);
  499. }
  500. return ret;
  501. }
  502. static int amdgpu_cgs_irq_get(void *cgs_device, unsigned src_id, unsigned type)
  503. {
  504. CGS_FUNC_ADEV;
  505. return amdgpu_irq_get(adev, adev->irq.sources[src_id], type);
  506. }
  507. static int amdgpu_cgs_irq_put(void *cgs_device, unsigned src_id, unsigned type)
  508. {
  509. CGS_FUNC_ADEV;
  510. return amdgpu_irq_put(adev, adev->irq.sources[src_id], type);
  511. }
  512. int amdgpu_cgs_set_clockgating_state(void *cgs_device,
  513. enum amd_ip_block_type block_type,
  514. enum amd_clockgating_state state)
  515. {
  516. CGS_FUNC_ADEV;
  517. int i, r = -1;
  518. for (i = 0; i < adev->num_ip_blocks; i++) {
  519. if (!adev->ip_block_status[i].valid)
  520. continue;
  521. if (adev->ip_blocks[i].type == block_type) {
  522. r = adev->ip_blocks[i].funcs->set_clockgating_state(
  523. (void *)adev,
  524. state);
  525. break;
  526. }
  527. }
  528. return r;
  529. }
  530. int amdgpu_cgs_set_powergating_state(void *cgs_device,
  531. enum amd_ip_block_type block_type,
  532. enum amd_powergating_state state)
  533. {
  534. CGS_FUNC_ADEV;
  535. int i, r = -1;
  536. for (i = 0; i < adev->num_ip_blocks; i++) {
  537. if (!adev->ip_block_status[i].valid)
  538. continue;
  539. if (adev->ip_blocks[i].type == block_type) {
  540. r = adev->ip_blocks[i].funcs->set_powergating_state(
  541. (void *)adev,
  542. state);
  543. break;
  544. }
  545. }
  546. return r;
  547. }
  548. static uint32_t fw_type_convert(void *cgs_device, uint32_t fw_type)
  549. {
  550. CGS_FUNC_ADEV;
  551. enum AMDGPU_UCODE_ID result = AMDGPU_UCODE_ID_MAXIMUM;
  552. switch (fw_type) {
  553. case CGS_UCODE_ID_SDMA0:
  554. result = AMDGPU_UCODE_ID_SDMA0;
  555. break;
  556. case CGS_UCODE_ID_SDMA1:
  557. result = AMDGPU_UCODE_ID_SDMA1;
  558. break;
  559. case CGS_UCODE_ID_CP_CE:
  560. result = AMDGPU_UCODE_ID_CP_CE;
  561. break;
  562. case CGS_UCODE_ID_CP_PFP:
  563. result = AMDGPU_UCODE_ID_CP_PFP;
  564. break;
  565. case CGS_UCODE_ID_CP_ME:
  566. result = AMDGPU_UCODE_ID_CP_ME;
  567. break;
  568. case CGS_UCODE_ID_CP_MEC:
  569. case CGS_UCODE_ID_CP_MEC_JT1:
  570. result = AMDGPU_UCODE_ID_CP_MEC1;
  571. break;
  572. case CGS_UCODE_ID_CP_MEC_JT2:
  573. if (adev->asic_type == CHIP_TONGA)
  574. result = AMDGPU_UCODE_ID_CP_MEC2;
  575. else if (adev->asic_type == CHIP_CARRIZO)
  576. result = AMDGPU_UCODE_ID_CP_MEC1;
  577. break;
  578. case CGS_UCODE_ID_RLC_G:
  579. result = AMDGPU_UCODE_ID_RLC_G;
  580. break;
  581. default:
  582. DRM_ERROR("Firmware type not supported\n");
  583. }
  584. return result;
  585. }
  586. static int amdgpu_cgs_get_firmware_info(void *cgs_device,
  587. enum cgs_ucode_id type,
  588. struct cgs_firmware_info *info)
  589. {
  590. CGS_FUNC_ADEV;
  591. if (CGS_UCODE_ID_SMU != type) {
  592. uint64_t gpu_addr;
  593. uint32_t data_size;
  594. const struct gfx_firmware_header_v1_0 *header;
  595. enum AMDGPU_UCODE_ID id;
  596. struct amdgpu_firmware_info *ucode;
  597. id = fw_type_convert(cgs_device, type);
  598. ucode = &adev->firmware.ucode[id];
  599. if (ucode->fw == NULL)
  600. return -EINVAL;
  601. gpu_addr = ucode->mc_addr;
  602. header = (const struct gfx_firmware_header_v1_0 *)ucode->fw->data;
  603. data_size = le32_to_cpu(header->header.ucode_size_bytes);
  604. if ((type == CGS_UCODE_ID_CP_MEC_JT1) ||
  605. (type == CGS_UCODE_ID_CP_MEC_JT2)) {
  606. gpu_addr += le32_to_cpu(header->jt_offset) << 2;
  607. data_size = le32_to_cpu(header->jt_size) << 2;
  608. }
  609. info->mc_addr = gpu_addr;
  610. info->image_size = data_size;
  611. info->version = (uint16_t)le32_to_cpu(header->header.ucode_version);
  612. info->feature_version = (uint16_t)le32_to_cpu(header->ucode_feature_version);
  613. } else {
  614. char fw_name[30] = {0};
  615. int err = 0;
  616. uint32_t ucode_size;
  617. uint32_t ucode_start_address;
  618. const uint8_t *src;
  619. const struct smc_firmware_header_v1_0 *hdr;
  620. switch (adev->asic_type) {
  621. case CHIP_TONGA:
  622. strcpy(fw_name, "amdgpu/tonga_smc.bin");
  623. break;
  624. case CHIP_FIJI:
  625. strcpy(fw_name, "amdgpu/fiji_smc.bin");
  626. break;
  627. default:
  628. DRM_ERROR("SMC firmware not supported\n");
  629. return -EINVAL;
  630. }
  631. err = request_firmware(&adev->pm.fw, fw_name, adev->dev);
  632. if (err) {
  633. DRM_ERROR("Failed to request firmware\n");
  634. return err;
  635. }
  636. err = amdgpu_ucode_validate(adev->pm.fw);
  637. if (err) {
  638. DRM_ERROR("Failed to load firmware \"%s\"", fw_name);
  639. release_firmware(adev->pm.fw);
  640. adev->pm.fw = NULL;
  641. return err;
  642. }
  643. hdr = (const struct smc_firmware_header_v1_0 *) adev->pm.fw->data;
  644. adev->pm.fw_version = le32_to_cpu(hdr->header.ucode_version);
  645. ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes);
  646. ucode_start_address = le32_to_cpu(hdr->ucode_start_addr);
  647. src = (const uint8_t *)(adev->pm.fw->data +
  648. le32_to_cpu(hdr->header.ucode_array_offset_bytes));
  649. info->version = adev->pm.fw_version;
  650. info->image_size = ucode_size;
  651. info->kptr = (void *)src;
  652. }
  653. return 0;
  654. }
  655. static int amdgpu_cgs_query_system_info(void *cgs_device,
  656. struct cgs_system_info *sys_info)
  657. {
  658. CGS_FUNC_ADEV;
  659. if (NULL == sys_info)
  660. return -ENODEV;
  661. if (sizeof(struct cgs_system_info) != sys_info->size)
  662. return -ENODEV;
  663. switch (sys_info->info_id) {
  664. case CGS_SYSTEM_INFO_ADAPTER_BDF_ID:
  665. sys_info->value = adev->pdev->devfn | (adev->pdev->bus->number << 8);
  666. break;
  667. case CGS_SYSTEM_INFO_PCIE_GEN_INFO:
  668. sys_info->value = adev->pm.pcie_gen_mask;
  669. break;
  670. case CGS_SYSTEM_INFO_PCIE_MLW:
  671. sys_info->value = adev->pm.pcie_mlw_mask;
  672. break;
  673. default:
  674. return -ENODEV;
  675. }
  676. return 0;
  677. }
  678. static int amdgpu_cgs_get_active_displays_info(void *cgs_device,
  679. struct cgs_display_info *info)
  680. {
  681. CGS_FUNC_ADEV;
  682. struct amdgpu_crtc *amdgpu_crtc;
  683. struct drm_device *ddev = adev->ddev;
  684. struct drm_crtc *crtc;
  685. uint32_t line_time_us, vblank_lines;
  686. if (info == NULL)
  687. return -EINVAL;
  688. if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) {
  689. list_for_each_entry(crtc,
  690. &ddev->mode_config.crtc_list, head) {
  691. amdgpu_crtc = to_amdgpu_crtc(crtc);
  692. if (crtc->enabled) {
  693. info->active_display_mask |= (1 << amdgpu_crtc->crtc_id);
  694. info->display_count++;
  695. }
  696. if (info->mode_info != NULL &&
  697. crtc->enabled && amdgpu_crtc->enabled &&
  698. amdgpu_crtc->hw_mode.clock) {
  699. line_time_us = (amdgpu_crtc->hw_mode.crtc_htotal * 1000) /
  700. amdgpu_crtc->hw_mode.clock;
  701. vblank_lines = amdgpu_crtc->hw_mode.crtc_vblank_end -
  702. amdgpu_crtc->hw_mode.crtc_vdisplay +
  703. (amdgpu_crtc->v_border * 2);
  704. info->mode_info->vblank_time_us = vblank_lines * line_time_us;
  705. info->mode_info->refresh_rate = drm_mode_vrefresh(&amdgpu_crtc->hw_mode);
  706. info->mode_info->ref_clock = adev->clock.spll.reference_freq;
  707. info->mode_info++;
  708. }
  709. }
  710. }
  711. return 0;
  712. }
  713. /** \brief evaluate acpi namespace object, handle or pathname must be valid
  714. * \param cgs_device
  715. * \param info input/output arguments for the control method
  716. * \return status
  717. */
  718. #if defined(CONFIG_ACPI)
  719. static int amdgpu_cgs_acpi_eval_object(void *cgs_device,
  720. struct cgs_acpi_method_info *info)
  721. {
  722. CGS_FUNC_ADEV;
  723. acpi_handle handle;
  724. struct acpi_object_list input;
  725. struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
  726. union acpi_object *params = NULL;
  727. union acpi_object *obj = NULL;
  728. uint8_t name[5] = {'\0'};
  729. struct cgs_acpi_method_argument *argument = NULL;
  730. uint32_t i, count;
  731. acpi_status status;
  732. int result;
  733. uint32_t func_no = 0xFFFFFFFF;
  734. handle = ACPI_HANDLE(&adev->pdev->dev);
  735. if (!handle)
  736. return -ENODEV;
  737. memset(&input, 0, sizeof(struct acpi_object_list));
  738. /* validate input info */
  739. if (info->size != sizeof(struct cgs_acpi_method_info))
  740. return -EINVAL;
  741. input.count = info->input_count;
  742. if (info->input_count > 0) {
  743. if (info->pinput_argument == NULL)
  744. return -EINVAL;
  745. argument = info->pinput_argument;
  746. func_no = argument->value;
  747. for (i = 0; i < info->input_count; i++) {
  748. if (((argument->type == ACPI_TYPE_STRING) ||
  749. (argument->type == ACPI_TYPE_BUFFER)) &&
  750. (argument->pointer == NULL))
  751. return -EINVAL;
  752. argument++;
  753. }
  754. }
  755. if (info->output_count > 0) {
  756. if (info->poutput_argument == NULL)
  757. return -EINVAL;
  758. argument = info->poutput_argument;
  759. for (i = 0; i < info->output_count; i++) {
  760. if (((argument->type == ACPI_TYPE_STRING) ||
  761. (argument->type == ACPI_TYPE_BUFFER))
  762. && (argument->pointer == NULL))
  763. return -EINVAL;
  764. argument++;
  765. }
  766. }
  767. /* The path name passed to acpi_evaluate_object should be null terminated */
  768. if ((info->field & CGS_ACPI_FIELD_METHOD_NAME) != 0) {
  769. strncpy(name, (char *)&(info->name), sizeof(uint32_t));
  770. name[4] = '\0';
  771. }
  772. /* parse input parameters */
  773. if (input.count > 0) {
  774. input.pointer = params =
  775. kzalloc(sizeof(union acpi_object) * input.count, GFP_KERNEL);
  776. if (params == NULL)
  777. return -EINVAL;
  778. argument = info->pinput_argument;
  779. for (i = 0; i < input.count; i++) {
  780. params->type = argument->type;
  781. switch (params->type) {
  782. case ACPI_TYPE_INTEGER:
  783. params->integer.value = argument->value;
  784. break;
  785. case ACPI_TYPE_STRING:
  786. params->string.length = argument->method_length;
  787. params->string.pointer = argument->pointer;
  788. break;
  789. case ACPI_TYPE_BUFFER:
  790. params->buffer.length = argument->method_length;
  791. params->buffer.pointer = argument->pointer;
  792. break;
  793. default:
  794. break;
  795. }
  796. params++;
  797. argument++;
  798. }
  799. }
  800. /* parse output info */
  801. count = info->output_count;
  802. argument = info->poutput_argument;
  803. /* evaluate the acpi method */
  804. status = acpi_evaluate_object(handle, name, &input, &output);
  805. if (ACPI_FAILURE(status)) {
  806. result = -EIO;
  807. goto error;
  808. }
  809. /* return the output info */
  810. obj = output.pointer;
  811. if (count > 1) {
  812. if ((obj->type != ACPI_TYPE_PACKAGE) ||
  813. (obj->package.count != count)) {
  814. result = -EIO;
  815. goto error;
  816. }
  817. params = obj->package.elements;
  818. } else
  819. params = obj;
  820. if (params == NULL) {
  821. result = -EIO;
  822. goto error;
  823. }
  824. for (i = 0; i < count; i++) {
  825. if (argument->type != params->type) {
  826. result = -EIO;
  827. goto error;
  828. }
  829. switch (params->type) {
  830. case ACPI_TYPE_INTEGER:
  831. argument->value = params->integer.value;
  832. break;
  833. case ACPI_TYPE_STRING:
  834. if ((params->string.length != argument->data_length) ||
  835. (params->string.pointer == NULL)) {
  836. result = -EIO;
  837. goto error;
  838. }
  839. strncpy(argument->pointer,
  840. params->string.pointer,
  841. params->string.length);
  842. break;
  843. case ACPI_TYPE_BUFFER:
  844. if (params->buffer.pointer == NULL) {
  845. result = -EIO;
  846. goto error;
  847. }
  848. memcpy(argument->pointer,
  849. params->buffer.pointer,
  850. argument->data_length);
  851. break;
  852. default:
  853. break;
  854. }
  855. argument++;
  856. params++;
  857. }
  858. error:
  859. if (obj != NULL)
  860. kfree(obj);
  861. kfree((void *)input.pointer);
  862. return result;
  863. }
  864. #else
  865. static int amdgpu_cgs_acpi_eval_object(void *cgs_device,
  866. struct cgs_acpi_method_info *info)
  867. {
  868. return -EIO;
  869. }
  870. #endif
  871. int amdgpu_cgs_call_acpi_method(void *cgs_device,
  872. uint32_t acpi_method,
  873. uint32_t acpi_function,
  874. void *pinput, void *poutput,
  875. uint32_t output_count,
  876. uint32_t input_size,
  877. uint32_t output_size)
  878. {
  879. struct cgs_acpi_method_argument acpi_input[2] = { {0}, {0} };
  880. struct cgs_acpi_method_argument acpi_output = {0};
  881. struct cgs_acpi_method_info info = {0};
  882. acpi_input[0].type = CGS_ACPI_TYPE_INTEGER;
  883. acpi_input[0].method_length = sizeof(uint32_t);
  884. acpi_input[0].data_length = sizeof(uint32_t);
  885. acpi_input[0].value = acpi_function;
  886. acpi_input[1].type = CGS_ACPI_TYPE_BUFFER;
  887. acpi_input[1].method_length = CGS_ACPI_MAX_BUFFER_SIZE;
  888. acpi_input[1].data_length = input_size;
  889. acpi_input[1].pointer = pinput;
  890. acpi_output.type = CGS_ACPI_TYPE_BUFFER;
  891. acpi_output.method_length = CGS_ACPI_MAX_BUFFER_SIZE;
  892. acpi_output.data_length = output_size;
  893. acpi_output.pointer = poutput;
  894. info.size = sizeof(struct cgs_acpi_method_info);
  895. info.field = CGS_ACPI_FIELD_METHOD_NAME | CGS_ACPI_FIELD_INPUT_ARGUMENT_COUNT;
  896. info.input_count = 2;
  897. info.name = acpi_method;
  898. info.pinput_argument = acpi_input;
  899. info.output_count = output_count;
  900. info.poutput_argument = &acpi_output;
  901. return amdgpu_cgs_acpi_eval_object(cgs_device, &info);
  902. }
  903. static const struct cgs_ops amdgpu_cgs_ops = {
  904. amdgpu_cgs_gpu_mem_info,
  905. amdgpu_cgs_gmap_kmem,
  906. amdgpu_cgs_gunmap_kmem,
  907. amdgpu_cgs_alloc_gpu_mem,
  908. amdgpu_cgs_free_gpu_mem,
  909. amdgpu_cgs_gmap_gpu_mem,
  910. amdgpu_cgs_gunmap_gpu_mem,
  911. amdgpu_cgs_kmap_gpu_mem,
  912. amdgpu_cgs_kunmap_gpu_mem,
  913. amdgpu_cgs_read_register,
  914. amdgpu_cgs_write_register,
  915. amdgpu_cgs_read_ind_register,
  916. amdgpu_cgs_write_ind_register,
  917. amdgpu_cgs_read_pci_config_byte,
  918. amdgpu_cgs_read_pci_config_word,
  919. amdgpu_cgs_read_pci_config_dword,
  920. amdgpu_cgs_write_pci_config_byte,
  921. amdgpu_cgs_write_pci_config_word,
  922. amdgpu_cgs_write_pci_config_dword,
  923. amdgpu_cgs_atom_get_data_table,
  924. amdgpu_cgs_atom_get_cmd_table_revs,
  925. amdgpu_cgs_atom_exec_cmd_table,
  926. amdgpu_cgs_create_pm_request,
  927. amdgpu_cgs_destroy_pm_request,
  928. amdgpu_cgs_set_pm_request,
  929. amdgpu_cgs_pm_request_clock,
  930. amdgpu_cgs_pm_request_engine,
  931. amdgpu_cgs_pm_query_clock_limits,
  932. amdgpu_cgs_set_camera_voltages,
  933. amdgpu_cgs_get_firmware_info,
  934. amdgpu_cgs_set_powergating_state,
  935. amdgpu_cgs_set_clockgating_state,
  936. amdgpu_cgs_get_active_displays_info,
  937. amdgpu_cgs_call_acpi_method,
  938. amdgpu_cgs_query_system_info,
  939. };
  940. static const struct cgs_os_ops amdgpu_cgs_os_ops = {
  941. amdgpu_cgs_add_irq_source,
  942. amdgpu_cgs_irq_get,
  943. amdgpu_cgs_irq_put
  944. };
  945. void *amdgpu_cgs_create_device(struct amdgpu_device *adev)
  946. {
  947. struct amdgpu_cgs_device *cgs_device =
  948. kmalloc(sizeof(*cgs_device), GFP_KERNEL);
  949. if (!cgs_device) {
  950. DRM_ERROR("Couldn't allocate CGS device structure\n");
  951. return NULL;
  952. }
  953. cgs_device->base.ops = &amdgpu_cgs_ops;
  954. cgs_device->base.os_ops = &amdgpu_cgs_os_ops;
  955. cgs_device->adev = adev;
  956. return cgs_device;
  957. }
  958. void amdgpu_cgs_destroy_device(void *cgs_device)
  959. {
  960. kfree(cgs_device);
  961. }