amdgpu_cgs.c 35 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329
  1. /*
  2. * Copyright 2015 Advanced Micro Devices, Inc.
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice shall be included in
  12. * all copies or substantial portions of the Software.
  13. *
  14. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  17. * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18. * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19. * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20. * OTHER DEALINGS IN THE SOFTWARE.
  21. *
  22. *
  23. */
  24. #include <linux/list.h>
  25. #include <linux/slab.h>
  26. #include <linux/pci.h>
  27. #include <linux/acpi.h>
  28. #include <drm/drmP.h>
  29. #include <linux/firmware.h>
  30. #include <drm/amdgpu_drm.h>
  31. #include "amdgpu.h"
  32. #include "cgs_linux.h"
  33. #include "atom.h"
  34. #include "amdgpu_ucode.h"
  35. struct amdgpu_cgs_device {
  36. struct cgs_device base;
  37. struct amdgpu_device *adev;
  38. };
  39. #define CGS_FUNC_ADEV \
  40. struct amdgpu_device *adev = \
  41. ((struct amdgpu_cgs_device *)cgs_device)->adev
  42. static int amdgpu_cgs_gpu_mem_info(struct cgs_device *cgs_device, enum cgs_gpu_mem_type type,
  43. uint64_t *mc_start, uint64_t *mc_size,
  44. uint64_t *mem_size)
  45. {
  46. CGS_FUNC_ADEV;
  47. switch(type) {
  48. case CGS_GPU_MEM_TYPE__VISIBLE_CONTIG_FB:
  49. case CGS_GPU_MEM_TYPE__VISIBLE_FB:
  50. *mc_start = 0;
  51. *mc_size = adev->mc.visible_vram_size;
  52. *mem_size = adev->mc.visible_vram_size - adev->vram_pin_size;
  53. break;
  54. case CGS_GPU_MEM_TYPE__INVISIBLE_CONTIG_FB:
  55. case CGS_GPU_MEM_TYPE__INVISIBLE_FB:
  56. *mc_start = adev->mc.visible_vram_size;
  57. *mc_size = adev->mc.real_vram_size - adev->mc.visible_vram_size;
  58. *mem_size = *mc_size;
  59. break;
  60. case CGS_GPU_MEM_TYPE__GART_CACHEABLE:
  61. case CGS_GPU_MEM_TYPE__GART_WRITECOMBINE:
  62. *mc_start = adev->mc.gtt_start;
  63. *mc_size = adev->mc.gtt_size;
  64. *mem_size = adev->mc.gtt_size - adev->gart_pin_size;
  65. break;
  66. default:
  67. return -EINVAL;
  68. }
  69. return 0;
  70. }
  71. static int amdgpu_cgs_gmap_kmem(struct cgs_device *cgs_device, void *kmem,
  72. uint64_t size,
  73. uint64_t min_offset, uint64_t max_offset,
  74. cgs_handle_t *kmem_handle, uint64_t *mcaddr)
  75. {
  76. CGS_FUNC_ADEV;
  77. int ret;
  78. struct amdgpu_bo *bo;
  79. struct page *kmem_page = vmalloc_to_page(kmem);
  80. int npages = ALIGN(size, PAGE_SIZE) >> PAGE_SHIFT;
  81. struct sg_table *sg = drm_prime_pages_to_sg(&kmem_page, npages);
  82. ret = amdgpu_bo_create(adev, size, PAGE_SIZE, false,
  83. AMDGPU_GEM_DOMAIN_GTT, 0, sg, NULL, &bo);
  84. if (ret)
  85. return ret;
  86. ret = amdgpu_bo_reserve(bo, false);
  87. if (unlikely(ret != 0))
  88. return ret;
  89. /* pin buffer into GTT */
  90. ret = amdgpu_bo_pin_restricted(bo, AMDGPU_GEM_DOMAIN_GTT,
  91. min_offset, max_offset, mcaddr);
  92. amdgpu_bo_unreserve(bo);
  93. *kmem_handle = (cgs_handle_t)bo;
  94. return ret;
  95. }
  96. static int amdgpu_cgs_gunmap_kmem(struct cgs_device *cgs_device, cgs_handle_t kmem_handle)
  97. {
  98. struct amdgpu_bo *obj = (struct amdgpu_bo *)kmem_handle;
  99. if (obj) {
  100. int r = amdgpu_bo_reserve(obj, false);
  101. if (likely(r == 0)) {
  102. amdgpu_bo_unpin(obj);
  103. amdgpu_bo_unreserve(obj);
  104. }
  105. amdgpu_bo_unref(&obj);
  106. }
  107. return 0;
  108. }
  109. static int amdgpu_cgs_alloc_gpu_mem(struct cgs_device *cgs_device,
  110. enum cgs_gpu_mem_type type,
  111. uint64_t size, uint64_t align,
  112. uint64_t min_offset, uint64_t max_offset,
  113. cgs_handle_t *handle)
  114. {
  115. CGS_FUNC_ADEV;
  116. uint16_t flags = 0;
  117. int ret = 0;
  118. uint32_t domain = 0;
  119. struct amdgpu_bo *obj;
  120. struct ttm_placement placement;
  121. struct ttm_place place;
  122. if (min_offset > max_offset) {
  123. BUG_ON(1);
  124. return -EINVAL;
  125. }
  126. /* fail if the alignment is not a power of 2 */
  127. if (((align != 1) && (align & (align - 1)))
  128. || size == 0 || align == 0)
  129. return -EINVAL;
  130. switch(type) {
  131. case CGS_GPU_MEM_TYPE__VISIBLE_CONTIG_FB:
  132. case CGS_GPU_MEM_TYPE__VISIBLE_FB:
  133. flags = AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
  134. AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
  135. domain = AMDGPU_GEM_DOMAIN_VRAM;
  136. if (max_offset > adev->mc.real_vram_size)
  137. return -EINVAL;
  138. place.fpfn = min_offset >> PAGE_SHIFT;
  139. place.lpfn = max_offset >> PAGE_SHIFT;
  140. place.flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED |
  141. TTM_PL_FLAG_VRAM;
  142. break;
  143. case CGS_GPU_MEM_TYPE__INVISIBLE_CONTIG_FB:
  144. case CGS_GPU_MEM_TYPE__INVISIBLE_FB:
  145. flags = AMDGPU_GEM_CREATE_NO_CPU_ACCESS |
  146. AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
  147. domain = AMDGPU_GEM_DOMAIN_VRAM;
  148. if (adev->mc.visible_vram_size < adev->mc.real_vram_size) {
  149. place.fpfn =
  150. max(min_offset, adev->mc.visible_vram_size) >> PAGE_SHIFT;
  151. place.lpfn =
  152. min(max_offset, adev->mc.real_vram_size) >> PAGE_SHIFT;
  153. place.flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED |
  154. TTM_PL_FLAG_VRAM;
  155. }
  156. break;
  157. case CGS_GPU_MEM_TYPE__GART_CACHEABLE:
  158. domain = AMDGPU_GEM_DOMAIN_GTT;
  159. place.fpfn = min_offset >> PAGE_SHIFT;
  160. place.lpfn = max_offset >> PAGE_SHIFT;
  161. place.flags = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_TT;
  162. break;
  163. case CGS_GPU_MEM_TYPE__GART_WRITECOMBINE:
  164. flags = AMDGPU_GEM_CREATE_CPU_GTT_USWC;
  165. domain = AMDGPU_GEM_DOMAIN_GTT;
  166. place.fpfn = min_offset >> PAGE_SHIFT;
  167. place.lpfn = max_offset >> PAGE_SHIFT;
  168. place.flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_TT |
  169. TTM_PL_FLAG_UNCACHED;
  170. break;
  171. default:
  172. return -EINVAL;
  173. }
  174. *handle = 0;
  175. placement.placement = &place;
  176. placement.num_placement = 1;
  177. placement.busy_placement = &place;
  178. placement.num_busy_placement = 1;
  179. ret = amdgpu_bo_create_restricted(adev, size, PAGE_SIZE,
  180. true, domain, flags,
  181. NULL, &placement, NULL,
  182. &obj);
  183. if (ret) {
  184. DRM_ERROR("(%d) bo create failed\n", ret);
  185. return ret;
  186. }
  187. *handle = (cgs_handle_t)obj;
  188. return ret;
  189. }
  190. static int amdgpu_cgs_free_gpu_mem(struct cgs_device *cgs_device, cgs_handle_t handle)
  191. {
  192. struct amdgpu_bo *obj = (struct amdgpu_bo *)handle;
  193. if (obj) {
  194. int r = amdgpu_bo_reserve(obj, false);
  195. if (likely(r == 0)) {
  196. amdgpu_bo_kunmap(obj);
  197. amdgpu_bo_unpin(obj);
  198. amdgpu_bo_unreserve(obj);
  199. }
  200. amdgpu_bo_unref(&obj);
  201. }
  202. return 0;
  203. }
  204. static int amdgpu_cgs_gmap_gpu_mem(struct cgs_device *cgs_device, cgs_handle_t handle,
  205. uint64_t *mcaddr)
  206. {
  207. int r;
  208. u64 min_offset, max_offset;
  209. struct amdgpu_bo *obj = (struct amdgpu_bo *)handle;
  210. WARN_ON_ONCE(obj->placement.num_placement > 1);
  211. min_offset = obj->placements[0].fpfn << PAGE_SHIFT;
  212. max_offset = obj->placements[0].lpfn << PAGE_SHIFT;
  213. r = amdgpu_bo_reserve(obj, false);
  214. if (unlikely(r != 0))
  215. return r;
  216. r = amdgpu_bo_pin_restricted(obj, obj->prefered_domains,
  217. min_offset, max_offset, mcaddr);
  218. amdgpu_bo_unreserve(obj);
  219. return r;
  220. }
  221. static int amdgpu_cgs_gunmap_gpu_mem(struct cgs_device *cgs_device, cgs_handle_t handle)
  222. {
  223. int r;
  224. struct amdgpu_bo *obj = (struct amdgpu_bo *)handle;
  225. r = amdgpu_bo_reserve(obj, false);
  226. if (unlikely(r != 0))
  227. return r;
  228. r = amdgpu_bo_unpin(obj);
  229. amdgpu_bo_unreserve(obj);
  230. return r;
  231. }
  232. static int amdgpu_cgs_kmap_gpu_mem(struct cgs_device *cgs_device, cgs_handle_t handle,
  233. void **map)
  234. {
  235. int r;
  236. struct amdgpu_bo *obj = (struct amdgpu_bo *)handle;
  237. r = amdgpu_bo_reserve(obj, false);
  238. if (unlikely(r != 0))
  239. return r;
  240. r = amdgpu_bo_kmap(obj, map);
  241. amdgpu_bo_unreserve(obj);
  242. return r;
  243. }
  244. static int amdgpu_cgs_kunmap_gpu_mem(struct cgs_device *cgs_device, cgs_handle_t handle)
  245. {
  246. int r;
  247. struct amdgpu_bo *obj = (struct amdgpu_bo *)handle;
  248. r = amdgpu_bo_reserve(obj, false);
  249. if (unlikely(r != 0))
  250. return r;
  251. amdgpu_bo_kunmap(obj);
  252. amdgpu_bo_unreserve(obj);
  253. return r;
  254. }
  255. static uint32_t amdgpu_cgs_read_register(struct cgs_device *cgs_device, unsigned offset)
  256. {
  257. CGS_FUNC_ADEV;
  258. return RREG32(offset);
  259. }
  260. static void amdgpu_cgs_write_register(struct cgs_device *cgs_device, unsigned offset,
  261. uint32_t value)
  262. {
  263. CGS_FUNC_ADEV;
  264. WREG32(offset, value);
  265. }
  266. static uint32_t amdgpu_cgs_read_ind_register(struct cgs_device *cgs_device,
  267. enum cgs_ind_reg space,
  268. unsigned index)
  269. {
  270. CGS_FUNC_ADEV;
  271. switch (space) {
  272. case CGS_IND_REG__MMIO:
  273. return RREG32_IDX(index);
  274. case CGS_IND_REG__PCIE:
  275. return RREG32_PCIE(index);
  276. case CGS_IND_REG__SMC:
  277. return RREG32_SMC(index);
  278. case CGS_IND_REG__UVD_CTX:
  279. return RREG32_UVD_CTX(index);
  280. case CGS_IND_REG__DIDT:
  281. return RREG32_DIDT(index);
  282. case CGS_IND_REG_GC_CAC:
  283. return RREG32_GC_CAC(index);
  284. case CGS_IND_REG__AUDIO_ENDPT:
  285. DRM_ERROR("audio endpt register access not implemented.\n");
  286. return 0;
  287. }
  288. WARN(1, "Invalid indirect register space");
  289. return 0;
  290. }
  291. static void amdgpu_cgs_write_ind_register(struct cgs_device *cgs_device,
  292. enum cgs_ind_reg space,
  293. unsigned index, uint32_t value)
  294. {
  295. CGS_FUNC_ADEV;
  296. switch (space) {
  297. case CGS_IND_REG__MMIO:
  298. return WREG32_IDX(index, value);
  299. case CGS_IND_REG__PCIE:
  300. return WREG32_PCIE(index, value);
  301. case CGS_IND_REG__SMC:
  302. return WREG32_SMC(index, value);
  303. case CGS_IND_REG__UVD_CTX:
  304. return WREG32_UVD_CTX(index, value);
  305. case CGS_IND_REG__DIDT:
  306. return WREG32_DIDT(index, value);
  307. case CGS_IND_REG_GC_CAC:
  308. return WREG32_GC_CAC(index, value);
  309. case CGS_IND_REG__AUDIO_ENDPT:
  310. DRM_ERROR("audio endpt register access not implemented.\n");
  311. return;
  312. }
  313. WARN(1, "Invalid indirect register space");
  314. }
  315. static uint8_t amdgpu_cgs_read_pci_config_byte(struct cgs_device *cgs_device, unsigned addr)
  316. {
  317. CGS_FUNC_ADEV;
  318. uint8_t val;
  319. int ret = pci_read_config_byte(adev->pdev, addr, &val);
  320. if (WARN(ret, "pci_read_config_byte error"))
  321. return 0;
  322. return val;
  323. }
  324. static uint16_t amdgpu_cgs_read_pci_config_word(struct cgs_device *cgs_device, unsigned addr)
  325. {
  326. CGS_FUNC_ADEV;
  327. uint16_t val;
  328. int ret = pci_read_config_word(adev->pdev, addr, &val);
  329. if (WARN(ret, "pci_read_config_word error"))
  330. return 0;
  331. return val;
  332. }
  333. static uint32_t amdgpu_cgs_read_pci_config_dword(struct cgs_device *cgs_device,
  334. unsigned addr)
  335. {
  336. CGS_FUNC_ADEV;
  337. uint32_t val;
  338. int ret = pci_read_config_dword(adev->pdev, addr, &val);
  339. if (WARN(ret, "pci_read_config_dword error"))
  340. return 0;
  341. return val;
  342. }
  343. static void amdgpu_cgs_write_pci_config_byte(struct cgs_device *cgs_device, unsigned addr,
  344. uint8_t value)
  345. {
  346. CGS_FUNC_ADEV;
  347. int ret = pci_write_config_byte(adev->pdev, addr, value);
  348. WARN(ret, "pci_write_config_byte error");
  349. }
  350. static void amdgpu_cgs_write_pci_config_word(struct cgs_device *cgs_device, unsigned addr,
  351. uint16_t value)
  352. {
  353. CGS_FUNC_ADEV;
  354. int ret = pci_write_config_word(adev->pdev, addr, value);
  355. WARN(ret, "pci_write_config_word error");
  356. }
  357. static void amdgpu_cgs_write_pci_config_dword(struct cgs_device *cgs_device, unsigned addr,
  358. uint32_t value)
  359. {
  360. CGS_FUNC_ADEV;
  361. int ret = pci_write_config_dword(adev->pdev, addr, value);
  362. WARN(ret, "pci_write_config_dword error");
  363. }
  364. static int amdgpu_cgs_get_pci_resource(struct cgs_device *cgs_device,
  365. enum cgs_resource_type resource_type,
  366. uint64_t size,
  367. uint64_t offset,
  368. uint64_t *resource_base)
  369. {
  370. CGS_FUNC_ADEV;
  371. if (resource_base == NULL)
  372. return -EINVAL;
  373. switch (resource_type) {
  374. case CGS_RESOURCE_TYPE_MMIO:
  375. if (adev->rmmio_size == 0)
  376. return -ENOENT;
  377. if ((offset + size) > adev->rmmio_size)
  378. return -EINVAL;
  379. *resource_base = adev->rmmio_base;
  380. return 0;
  381. case CGS_RESOURCE_TYPE_DOORBELL:
  382. if (adev->doorbell.size == 0)
  383. return -ENOENT;
  384. if ((offset + size) > adev->doorbell.size)
  385. return -EINVAL;
  386. *resource_base = adev->doorbell.base;
  387. return 0;
  388. case CGS_RESOURCE_TYPE_FB:
  389. case CGS_RESOURCE_TYPE_IO:
  390. case CGS_RESOURCE_TYPE_ROM:
  391. default:
  392. return -EINVAL;
  393. }
  394. }
  395. static const void *amdgpu_cgs_atom_get_data_table(struct cgs_device *cgs_device,
  396. unsigned table, uint16_t *size,
  397. uint8_t *frev, uint8_t *crev)
  398. {
  399. CGS_FUNC_ADEV;
  400. uint16_t data_start;
  401. if (amdgpu_atom_parse_data_header(
  402. adev->mode_info.atom_context, table, size,
  403. frev, crev, &data_start))
  404. return (uint8_t*)adev->mode_info.atom_context->bios +
  405. data_start;
  406. return NULL;
  407. }
  408. static int amdgpu_cgs_atom_get_cmd_table_revs(struct cgs_device *cgs_device, unsigned table,
  409. uint8_t *frev, uint8_t *crev)
  410. {
  411. CGS_FUNC_ADEV;
  412. if (amdgpu_atom_parse_cmd_header(
  413. adev->mode_info.atom_context, table,
  414. frev, crev))
  415. return 0;
  416. return -EINVAL;
  417. }
  418. static int amdgpu_cgs_atom_exec_cmd_table(struct cgs_device *cgs_device, unsigned table,
  419. void *args)
  420. {
  421. CGS_FUNC_ADEV;
  422. return amdgpu_atom_execute_table(
  423. adev->mode_info.atom_context, table, args);
  424. }
  425. static int amdgpu_cgs_create_pm_request(struct cgs_device *cgs_device, cgs_handle_t *request)
  426. {
  427. /* TODO */
  428. return 0;
  429. }
  430. static int amdgpu_cgs_destroy_pm_request(struct cgs_device *cgs_device, cgs_handle_t request)
  431. {
  432. /* TODO */
  433. return 0;
  434. }
  435. static int amdgpu_cgs_set_pm_request(struct cgs_device *cgs_device, cgs_handle_t request,
  436. int active)
  437. {
  438. /* TODO */
  439. return 0;
  440. }
  441. static int amdgpu_cgs_pm_request_clock(struct cgs_device *cgs_device, cgs_handle_t request,
  442. enum cgs_clock clock, unsigned freq)
  443. {
  444. /* TODO */
  445. return 0;
  446. }
  447. static int amdgpu_cgs_pm_request_engine(struct cgs_device *cgs_device, cgs_handle_t request,
  448. enum cgs_engine engine, int powered)
  449. {
  450. /* TODO */
  451. return 0;
  452. }
  453. static int amdgpu_cgs_pm_query_clock_limits(struct cgs_device *cgs_device,
  454. enum cgs_clock clock,
  455. struct cgs_clock_limits *limits)
  456. {
  457. /* TODO */
  458. return 0;
  459. }
  460. static int amdgpu_cgs_set_camera_voltages(struct cgs_device *cgs_device, uint32_t mask,
  461. const uint32_t *voltages)
  462. {
  463. DRM_ERROR("not implemented");
  464. return -EPERM;
  465. }
  466. struct cgs_irq_params {
  467. unsigned src_id;
  468. cgs_irq_source_set_func_t set;
  469. cgs_irq_handler_func_t handler;
  470. void *private_data;
  471. };
  472. static int cgs_set_irq_state(struct amdgpu_device *adev,
  473. struct amdgpu_irq_src *src,
  474. unsigned type,
  475. enum amdgpu_interrupt_state state)
  476. {
  477. struct cgs_irq_params *irq_params =
  478. (struct cgs_irq_params *)src->data;
  479. if (!irq_params)
  480. return -EINVAL;
  481. if (!irq_params->set)
  482. return -EINVAL;
  483. return irq_params->set(irq_params->private_data,
  484. irq_params->src_id,
  485. type,
  486. (int)state);
  487. }
  488. static int cgs_process_irq(struct amdgpu_device *adev,
  489. struct amdgpu_irq_src *source,
  490. struct amdgpu_iv_entry *entry)
  491. {
  492. struct cgs_irq_params *irq_params =
  493. (struct cgs_irq_params *)source->data;
  494. if (!irq_params)
  495. return -EINVAL;
  496. if (!irq_params->handler)
  497. return -EINVAL;
  498. return irq_params->handler(irq_params->private_data,
  499. irq_params->src_id,
  500. entry->iv_entry);
  501. }
  502. static const struct amdgpu_irq_src_funcs cgs_irq_funcs = {
  503. .set = cgs_set_irq_state,
  504. .process = cgs_process_irq,
  505. };
  506. static int amdgpu_cgs_add_irq_source(void *cgs_device,
  507. unsigned client_id,
  508. unsigned src_id,
  509. unsigned num_types,
  510. cgs_irq_source_set_func_t set,
  511. cgs_irq_handler_func_t handler,
  512. void *private_data)
  513. {
  514. CGS_FUNC_ADEV;
  515. int ret = 0;
  516. struct cgs_irq_params *irq_params;
  517. struct amdgpu_irq_src *source =
  518. kzalloc(sizeof(struct amdgpu_irq_src), GFP_KERNEL);
  519. if (!source)
  520. return -ENOMEM;
  521. irq_params =
  522. kzalloc(sizeof(struct cgs_irq_params), GFP_KERNEL);
  523. if (!irq_params) {
  524. kfree(source);
  525. return -ENOMEM;
  526. }
  527. source->num_types = num_types;
  528. source->funcs = &cgs_irq_funcs;
  529. irq_params->src_id = src_id;
  530. irq_params->set = set;
  531. irq_params->handler = handler;
  532. irq_params->private_data = private_data;
  533. source->data = (void *)irq_params;
  534. ret = amdgpu_irq_add_id(adev, client_id, src_id, source);
  535. if (ret) {
  536. kfree(irq_params);
  537. kfree(source);
  538. }
  539. return ret;
  540. }
  541. static int amdgpu_cgs_irq_get(void *cgs_device, unsigned client_id,
  542. unsigned src_id, unsigned type)
  543. {
  544. CGS_FUNC_ADEV;
  545. if (!adev->irq.client[client_id].sources)
  546. return -EINVAL;
  547. return amdgpu_irq_get(adev, adev->irq.client[client_id].sources[src_id], type);
  548. }
  549. static int amdgpu_cgs_irq_put(void *cgs_device, unsigned client_id,
  550. unsigned src_id, unsigned type)
  551. {
  552. CGS_FUNC_ADEV;
  553. if (!adev->irq.client[client_id].sources)
  554. return -EINVAL;
  555. return amdgpu_irq_put(adev, adev->irq.client[client_id].sources[src_id], type);
  556. }
  557. static int amdgpu_cgs_set_clockgating_state(struct cgs_device *cgs_device,
  558. enum amd_ip_block_type block_type,
  559. enum amd_clockgating_state state)
  560. {
  561. CGS_FUNC_ADEV;
  562. int i, r = -1;
  563. for (i = 0; i < adev->num_ip_blocks; i++) {
  564. if (!adev->ip_blocks[i].status.valid)
  565. continue;
  566. if (adev->ip_blocks[i].version->type == block_type) {
  567. r = adev->ip_blocks[i].version->funcs->set_clockgating_state(
  568. (void *)adev,
  569. state);
  570. break;
  571. }
  572. }
  573. return r;
  574. }
  575. static int amdgpu_cgs_set_powergating_state(struct cgs_device *cgs_device,
  576. enum amd_ip_block_type block_type,
  577. enum amd_powergating_state state)
  578. {
  579. CGS_FUNC_ADEV;
  580. int i, r = -1;
  581. for (i = 0; i < adev->num_ip_blocks; i++) {
  582. if (!adev->ip_blocks[i].status.valid)
  583. continue;
  584. if (adev->ip_blocks[i].version->type == block_type) {
  585. r = adev->ip_blocks[i].version->funcs->set_powergating_state(
  586. (void *)adev,
  587. state);
  588. break;
  589. }
  590. }
  591. return r;
  592. }
  593. static uint32_t fw_type_convert(struct cgs_device *cgs_device, uint32_t fw_type)
  594. {
  595. CGS_FUNC_ADEV;
  596. enum AMDGPU_UCODE_ID result = AMDGPU_UCODE_ID_MAXIMUM;
  597. switch (fw_type) {
  598. case CGS_UCODE_ID_SDMA0:
  599. result = AMDGPU_UCODE_ID_SDMA0;
  600. break;
  601. case CGS_UCODE_ID_SDMA1:
  602. result = AMDGPU_UCODE_ID_SDMA1;
  603. break;
  604. case CGS_UCODE_ID_CP_CE:
  605. result = AMDGPU_UCODE_ID_CP_CE;
  606. break;
  607. case CGS_UCODE_ID_CP_PFP:
  608. result = AMDGPU_UCODE_ID_CP_PFP;
  609. break;
  610. case CGS_UCODE_ID_CP_ME:
  611. result = AMDGPU_UCODE_ID_CP_ME;
  612. break;
  613. case CGS_UCODE_ID_CP_MEC:
  614. case CGS_UCODE_ID_CP_MEC_JT1:
  615. result = AMDGPU_UCODE_ID_CP_MEC1;
  616. break;
  617. case CGS_UCODE_ID_CP_MEC_JT2:
  618. /* for VI. JT2 should be the same as JT1, because:
  619. 1, MEC2 and MEC1 use exactly same FW.
  620. 2, JT2 is not pached but JT1 is.
  621. */
  622. if (adev->asic_type >= CHIP_TOPAZ)
  623. result = AMDGPU_UCODE_ID_CP_MEC1;
  624. else
  625. result = AMDGPU_UCODE_ID_CP_MEC2;
  626. break;
  627. case CGS_UCODE_ID_RLC_G:
  628. result = AMDGPU_UCODE_ID_RLC_G;
  629. break;
  630. case CGS_UCODE_ID_STORAGE:
  631. result = AMDGPU_UCODE_ID_STORAGE;
  632. break;
  633. default:
  634. DRM_ERROR("Firmware type not supported\n");
  635. }
  636. return result;
  637. }
  638. static int amdgpu_cgs_rel_firmware(struct cgs_device *cgs_device, enum cgs_ucode_id type)
  639. {
  640. CGS_FUNC_ADEV;
  641. if ((CGS_UCODE_ID_SMU == type) || (CGS_UCODE_ID_SMU_SK == type)) {
  642. release_firmware(adev->pm.fw);
  643. adev->pm.fw = NULL;
  644. return 0;
  645. }
  646. /* cannot release other firmware because they are not created by cgs */
  647. return -EINVAL;
  648. }
  649. static uint16_t amdgpu_get_firmware_version(struct cgs_device *cgs_device,
  650. enum cgs_ucode_id type)
  651. {
  652. CGS_FUNC_ADEV;
  653. uint16_t fw_version = 0;
  654. switch (type) {
  655. case CGS_UCODE_ID_SDMA0:
  656. fw_version = adev->sdma.instance[0].fw_version;
  657. break;
  658. case CGS_UCODE_ID_SDMA1:
  659. fw_version = adev->sdma.instance[1].fw_version;
  660. break;
  661. case CGS_UCODE_ID_CP_CE:
  662. fw_version = adev->gfx.ce_fw_version;
  663. break;
  664. case CGS_UCODE_ID_CP_PFP:
  665. fw_version = adev->gfx.pfp_fw_version;
  666. break;
  667. case CGS_UCODE_ID_CP_ME:
  668. fw_version = adev->gfx.me_fw_version;
  669. break;
  670. case CGS_UCODE_ID_CP_MEC:
  671. fw_version = adev->gfx.mec_fw_version;
  672. break;
  673. case CGS_UCODE_ID_CP_MEC_JT1:
  674. fw_version = adev->gfx.mec_fw_version;
  675. break;
  676. case CGS_UCODE_ID_CP_MEC_JT2:
  677. fw_version = adev->gfx.mec_fw_version;
  678. break;
  679. case CGS_UCODE_ID_RLC_G:
  680. fw_version = adev->gfx.rlc_fw_version;
  681. break;
  682. case CGS_UCODE_ID_STORAGE:
  683. break;
  684. default:
  685. DRM_ERROR("firmware type %d do not have version\n", type);
  686. break;
  687. }
  688. return fw_version;
  689. }
  690. static int amdgpu_cgs_enter_safe_mode(struct cgs_device *cgs_device,
  691. bool en)
  692. {
  693. CGS_FUNC_ADEV;
  694. if (adev->gfx.rlc.funcs->enter_safe_mode == NULL ||
  695. adev->gfx.rlc.funcs->exit_safe_mode == NULL)
  696. return 0;
  697. if (en)
  698. adev->gfx.rlc.funcs->enter_safe_mode(adev);
  699. else
  700. adev->gfx.rlc.funcs->exit_safe_mode(adev);
  701. return 0;
  702. }
  703. static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device,
  704. enum cgs_ucode_id type,
  705. struct cgs_firmware_info *info)
  706. {
  707. CGS_FUNC_ADEV;
  708. if ((CGS_UCODE_ID_SMU != type) && (CGS_UCODE_ID_SMU_SK != type)) {
  709. uint64_t gpu_addr;
  710. uint32_t data_size;
  711. const struct gfx_firmware_header_v1_0 *header;
  712. enum AMDGPU_UCODE_ID id;
  713. struct amdgpu_firmware_info *ucode;
  714. id = fw_type_convert(cgs_device, type);
  715. ucode = &adev->firmware.ucode[id];
  716. if (ucode->fw == NULL)
  717. return -EINVAL;
  718. gpu_addr = ucode->mc_addr;
  719. header = (const struct gfx_firmware_header_v1_0 *)ucode->fw->data;
  720. data_size = le32_to_cpu(header->header.ucode_size_bytes);
  721. if ((type == CGS_UCODE_ID_CP_MEC_JT1) ||
  722. (type == CGS_UCODE_ID_CP_MEC_JT2)) {
  723. gpu_addr += ALIGN(le32_to_cpu(header->header.ucode_size_bytes), PAGE_SIZE);
  724. data_size = le32_to_cpu(header->jt_size) << 2;
  725. }
  726. info->kptr = ucode->kaddr;
  727. info->image_size = data_size;
  728. info->mc_addr = gpu_addr;
  729. info->version = (uint16_t)le32_to_cpu(header->header.ucode_version);
  730. if (CGS_UCODE_ID_CP_MEC == type)
  731. info->image_size = (header->jt_offset) << 2;
  732. info->fw_version = amdgpu_get_firmware_version(cgs_device, type);
  733. info->feature_version = (uint16_t)le32_to_cpu(header->ucode_feature_version);
  734. } else {
  735. char fw_name[30] = {0};
  736. int err = 0;
  737. uint32_t ucode_size;
  738. uint32_t ucode_start_address;
  739. const uint8_t *src;
  740. const struct smc_firmware_header_v1_0 *hdr;
  741. if (CGS_UCODE_ID_SMU_SK == type)
  742. amdgpu_cgs_rel_firmware(cgs_device, CGS_UCODE_ID_SMU);
  743. if (!adev->pm.fw) {
  744. switch (adev->asic_type) {
  745. case CHIP_TOPAZ:
  746. if (((adev->pdev->device == 0x6900) && (adev->pdev->revision == 0x81)) ||
  747. ((adev->pdev->device == 0x6900) && (adev->pdev->revision == 0x83)) ||
  748. ((adev->pdev->device == 0x6907) && (adev->pdev->revision == 0x87))) {
  749. info->is_kicker = true;
  750. strcpy(fw_name, "amdgpu/topaz_k_smc.bin");
  751. } else
  752. strcpy(fw_name, "amdgpu/topaz_smc.bin");
  753. break;
  754. case CHIP_TONGA:
  755. if (((adev->pdev->device == 0x6939) && (adev->pdev->revision == 0xf1)) ||
  756. ((adev->pdev->device == 0x6938) && (adev->pdev->revision == 0xf1))) {
  757. info->is_kicker = true;
  758. strcpy(fw_name, "amdgpu/tonga_k_smc.bin");
  759. } else
  760. strcpy(fw_name, "amdgpu/tonga_smc.bin");
  761. break;
  762. case CHIP_FIJI:
  763. strcpy(fw_name, "amdgpu/fiji_smc.bin");
  764. break;
  765. case CHIP_POLARIS11:
  766. if (type == CGS_UCODE_ID_SMU) {
  767. if (((adev->pdev->device == 0x67ef) &&
  768. ((adev->pdev->revision == 0xe0) ||
  769. (adev->pdev->revision == 0xe2) ||
  770. (adev->pdev->revision == 0xe5))) ||
  771. ((adev->pdev->device == 0x67ff) &&
  772. ((adev->pdev->revision == 0xcf) ||
  773. (adev->pdev->revision == 0xef) ||
  774. (adev->pdev->revision == 0xff)))) {
  775. info->is_kicker = true;
  776. strcpy(fw_name, "amdgpu/polaris11_k_smc.bin");
  777. } else
  778. strcpy(fw_name, "amdgpu/polaris11_smc.bin");
  779. } else if (type == CGS_UCODE_ID_SMU_SK) {
  780. strcpy(fw_name, "amdgpu/polaris11_smc_sk.bin");
  781. }
  782. break;
  783. case CHIP_POLARIS10:
  784. if (type == CGS_UCODE_ID_SMU) {
  785. if ((adev->pdev->device == 0x67df) &&
  786. ((adev->pdev->revision == 0xe0) ||
  787. (adev->pdev->revision == 0xe3) ||
  788. (adev->pdev->revision == 0xe4) ||
  789. (adev->pdev->revision == 0xe5) ||
  790. (adev->pdev->revision == 0xe7) ||
  791. (adev->pdev->revision == 0xef))) {
  792. info->is_kicker = true;
  793. strcpy(fw_name, "amdgpu/polaris10_k_smc.bin");
  794. } else
  795. strcpy(fw_name, "amdgpu/polaris10_smc.bin");
  796. } else if (type == CGS_UCODE_ID_SMU_SK) {
  797. strcpy(fw_name, "amdgpu/polaris10_smc_sk.bin");
  798. }
  799. break;
  800. case CHIP_POLARIS12:
  801. strcpy(fw_name, "amdgpu/polaris12_smc.bin");
  802. break;
  803. default:
  804. DRM_ERROR("SMC firmware not supported\n");
  805. return -EINVAL;
  806. }
  807. err = request_firmware(&adev->pm.fw, fw_name, adev->dev);
  808. if (err) {
  809. DRM_ERROR("Failed to request firmware\n");
  810. return err;
  811. }
  812. err = amdgpu_ucode_validate(adev->pm.fw);
  813. if (err) {
  814. DRM_ERROR("Failed to load firmware \"%s\"", fw_name);
  815. release_firmware(adev->pm.fw);
  816. adev->pm.fw = NULL;
  817. return err;
  818. }
  819. }
  820. hdr = (const struct smc_firmware_header_v1_0 *) adev->pm.fw->data;
  821. amdgpu_ucode_print_smc_hdr(&hdr->header);
  822. adev->pm.fw_version = le32_to_cpu(hdr->header.ucode_version);
  823. ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes);
  824. ucode_start_address = le32_to_cpu(hdr->ucode_start_addr);
  825. src = (const uint8_t *)(adev->pm.fw->data +
  826. le32_to_cpu(hdr->header.ucode_array_offset_bytes));
  827. info->version = adev->pm.fw_version;
  828. info->image_size = ucode_size;
  829. info->ucode_start_address = ucode_start_address;
  830. info->kptr = (void *)src;
  831. }
  832. return 0;
  833. }
  834. static int amdgpu_cgs_is_virtualization_enabled(void *cgs_device)
  835. {
  836. CGS_FUNC_ADEV;
  837. return amdgpu_sriov_vf(adev);
  838. }
  839. static int amdgpu_cgs_query_system_info(struct cgs_device *cgs_device,
  840. struct cgs_system_info *sys_info)
  841. {
  842. CGS_FUNC_ADEV;
  843. if (NULL == sys_info)
  844. return -ENODEV;
  845. if (sizeof(struct cgs_system_info) != sys_info->size)
  846. return -ENODEV;
  847. switch (sys_info->info_id) {
  848. case CGS_SYSTEM_INFO_ADAPTER_BDF_ID:
  849. sys_info->value = adev->pdev->devfn | (adev->pdev->bus->number << 8);
  850. break;
  851. case CGS_SYSTEM_INFO_PCIE_GEN_INFO:
  852. sys_info->value = adev->pm.pcie_gen_mask;
  853. break;
  854. case CGS_SYSTEM_INFO_PCIE_MLW:
  855. sys_info->value = adev->pm.pcie_mlw_mask;
  856. break;
  857. case CGS_SYSTEM_INFO_PCIE_DEV:
  858. sys_info->value = adev->pdev->device;
  859. break;
  860. case CGS_SYSTEM_INFO_PCIE_REV:
  861. sys_info->value = adev->pdev->revision;
  862. break;
  863. case CGS_SYSTEM_INFO_CG_FLAGS:
  864. sys_info->value = adev->cg_flags;
  865. break;
  866. case CGS_SYSTEM_INFO_PG_FLAGS:
  867. sys_info->value = adev->pg_flags;
  868. break;
  869. case CGS_SYSTEM_INFO_GFX_CU_INFO:
  870. sys_info->value = adev->gfx.cu_info.number;
  871. break;
  872. case CGS_SYSTEM_INFO_GFX_SE_INFO:
  873. sys_info->value = adev->gfx.config.max_shader_engines;
  874. break;
  875. case CGS_SYSTEM_INFO_PCIE_SUB_SYS_ID:
  876. sys_info->value = adev->pdev->subsystem_device;
  877. break;
  878. case CGS_SYSTEM_INFO_PCIE_SUB_SYS_VENDOR_ID:
  879. sys_info->value = adev->pdev->subsystem_vendor;
  880. break;
  881. default:
  882. return -ENODEV;
  883. }
  884. return 0;
  885. }
  886. static int amdgpu_cgs_get_active_displays_info(struct cgs_device *cgs_device,
  887. struct cgs_display_info *info)
  888. {
  889. CGS_FUNC_ADEV;
  890. struct amdgpu_crtc *amdgpu_crtc;
  891. struct drm_device *ddev = adev->ddev;
  892. struct drm_crtc *crtc;
  893. uint32_t line_time_us, vblank_lines;
  894. struct cgs_mode_info *mode_info;
  895. if (info == NULL)
  896. return -EINVAL;
  897. mode_info = info->mode_info;
  898. if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) {
  899. list_for_each_entry(crtc,
  900. &ddev->mode_config.crtc_list, head) {
  901. amdgpu_crtc = to_amdgpu_crtc(crtc);
  902. if (crtc->enabled) {
  903. info->active_display_mask |= (1 << amdgpu_crtc->crtc_id);
  904. info->display_count++;
  905. }
  906. if (mode_info != NULL &&
  907. crtc->enabled && amdgpu_crtc->enabled &&
  908. amdgpu_crtc->hw_mode.clock) {
  909. line_time_us = (amdgpu_crtc->hw_mode.crtc_htotal * 1000) /
  910. amdgpu_crtc->hw_mode.clock;
  911. vblank_lines = amdgpu_crtc->hw_mode.crtc_vblank_end -
  912. amdgpu_crtc->hw_mode.crtc_vdisplay +
  913. (amdgpu_crtc->v_border * 2);
  914. mode_info->vblank_time_us = vblank_lines * line_time_us;
  915. mode_info->refresh_rate = drm_mode_vrefresh(&amdgpu_crtc->hw_mode);
  916. mode_info->ref_clock = adev->clock.spll.reference_freq;
  917. mode_info = NULL;
  918. }
  919. }
  920. }
  921. return 0;
  922. }
  923. static int amdgpu_cgs_notify_dpm_enabled(struct cgs_device *cgs_device, bool enabled)
  924. {
  925. CGS_FUNC_ADEV;
  926. adev->pm.dpm_enabled = enabled;
  927. return 0;
  928. }
  929. /** \brief evaluate acpi namespace object, handle or pathname must be valid
  930. * \param cgs_device
  931. * \param info input/output arguments for the control method
  932. * \return status
  933. */
  934. #if defined(CONFIG_ACPI)
  935. static int amdgpu_cgs_acpi_eval_object(struct cgs_device *cgs_device,
  936. struct cgs_acpi_method_info *info)
  937. {
  938. CGS_FUNC_ADEV;
  939. acpi_handle handle;
  940. struct acpi_object_list input;
  941. struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
  942. union acpi_object *params, *obj;
  943. uint8_t name[5] = {'\0'};
  944. struct cgs_acpi_method_argument *argument;
  945. uint32_t i, count;
  946. acpi_status status;
  947. int result;
  948. handle = ACPI_HANDLE(&adev->pdev->dev);
  949. if (!handle)
  950. return -ENODEV;
  951. memset(&input, 0, sizeof(struct acpi_object_list));
  952. /* validate input info */
  953. if (info->size != sizeof(struct cgs_acpi_method_info))
  954. return -EINVAL;
  955. input.count = info->input_count;
  956. if (info->input_count > 0) {
  957. if (info->pinput_argument == NULL)
  958. return -EINVAL;
  959. argument = info->pinput_argument;
  960. for (i = 0; i < info->input_count; i++) {
  961. if (((argument->type == ACPI_TYPE_STRING) ||
  962. (argument->type == ACPI_TYPE_BUFFER)) &&
  963. (argument->pointer == NULL))
  964. return -EINVAL;
  965. argument++;
  966. }
  967. }
  968. if (info->output_count > 0) {
  969. if (info->poutput_argument == NULL)
  970. return -EINVAL;
  971. argument = info->poutput_argument;
  972. for (i = 0; i < info->output_count; i++) {
  973. if (((argument->type == ACPI_TYPE_STRING) ||
  974. (argument->type == ACPI_TYPE_BUFFER))
  975. && (argument->pointer == NULL))
  976. return -EINVAL;
  977. argument++;
  978. }
  979. }
  980. /* The path name passed to acpi_evaluate_object should be null terminated */
  981. if ((info->field & CGS_ACPI_FIELD_METHOD_NAME) != 0) {
  982. strncpy(name, (char *)&(info->name), sizeof(uint32_t));
  983. name[4] = '\0';
  984. }
  985. /* parse input parameters */
  986. if (input.count > 0) {
  987. input.pointer = params =
  988. kzalloc(sizeof(union acpi_object) * input.count, GFP_KERNEL);
  989. if (params == NULL)
  990. return -EINVAL;
  991. argument = info->pinput_argument;
  992. for (i = 0; i < input.count; i++) {
  993. params->type = argument->type;
  994. switch (params->type) {
  995. case ACPI_TYPE_INTEGER:
  996. params->integer.value = argument->value;
  997. break;
  998. case ACPI_TYPE_STRING:
  999. params->string.length = argument->data_length;
  1000. params->string.pointer = argument->pointer;
  1001. break;
  1002. case ACPI_TYPE_BUFFER:
  1003. params->buffer.length = argument->data_length;
  1004. params->buffer.pointer = argument->pointer;
  1005. break;
  1006. default:
  1007. break;
  1008. }
  1009. params++;
  1010. argument++;
  1011. }
  1012. }
  1013. /* parse output info */
  1014. count = info->output_count;
  1015. argument = info->poutput_argument;
  1016. /* evaluate the acpi method */
  1017. status = acpi_evaluate_object(handle, name, &input, &output);
  1018. if (ACPI_FAILURE(status)) {
  1019. result = -EIO;
  1020. goto free_input;
  1021. }
  1022. /* return the output info */
  1023. obj = output.pointer;
  1024. if (count > 1) {
  1025. if ((obj->type != ACPI_TYPE_PACKAGE) ||
  1026. (obj->package.count != count)) {
  1027. result = -EIO;
  1028. goto free_obj;
  1029. }
  1030. params = obj->package.elements;
  1031. } else
  1032. params = obj;
  1033. if (params == NULL) {
  1034. result = -EIO;
  1035. goto free_obj;
  1036. }
  1037. for (i = 0; i < count; i++) {
  1038. if (argument->type != params->type) {
  1039. result = -EIO;
  1040. goto free_obj;
  1041. }
  1042. switch (params->type) {
  1043. case ACPI_TYPE_INTEGER:
  1044. argument->value = params->integer.value;
  1045. break;
  1046. case ACPI_TYPE_STRING:
  1047. if ((params->string.length != argument->data_length) ||
  1048. (params->string.pointer == NULL)) {
  1049. result = -EIO;
  1050. goto free_obj;
  1051. }
  1052. strncpy(argument->pointer,
  1053. params->string.pointer,
  1054. params->string.length);
  1055. break;
  1056. case ACPI_TYPE_BUFFER:
  1057. if (params->buffer.pointer == NULL) {
  1058. result = -EIO;
  1059. goto free_obj;
  1060. }
  1061. memcpy(argument->pointer,
  1062. params->buffer.pointer,
  1063. argument->data_length);
  1064. break;
  1065. default:
  1066. break;
  1067. }
  1068. argument++;
  1069. params++;
  1070. }
  1071. result = 0;
  1072. free_obj:
  1073. kfree(obj);
  1074. free_input:
  1075. kfree((void *)input.pointer);
  1076. return result;
  1077. }
  1078. #else
  1079. static int amdgpu_cgs_acpi_eval_object(struct cgs_device *cgs_device,
  1080. struct cgs_acpi_method_info *info)
  1081. {
  1082. return -EIO;
  1083. }
  1084. #endif
  1085. static int amdgpu_cgs_call_acpi_method(struct cgs_device *cgs_device,
  1086. uint32_t acpi_method,
  1087. uint32_t acpi_function,
  1088. void *pinput, void *poutput,
  1089. uint32_t output_count,
  1090. uint32_t input_size,
  1091. uint32_t output_size)
  1092. {
  1093. struct cgs_acpi_method_argument acpi_input[2] = { {0}, {0} };
  1094. struct cgs_acpi_method_argument acpi_output = {0};
  1095. struct cgs_acpi_method_info info = {0};
  1096. acpi_input[0].type = CGS_ACPI_TYPE_INTEGER;
  1097. acpi_input[0].data_length = sizeof(uint32_t);
  1098. acpi_input[0].value = acpi_function;
  1099. acpi_input[1].type = CGS_ACPI_TYPE_BUFFER;
  1100. acpi_input[1].data_length = input_size;
  1101. acpi_input[1].pointer = pinput;
  1102. acpi_output.type = CGS_ACPI_TYPE_BUFFER;
  1103. acpi_output.data_length = output_size;
  1104. acpi_output.pointer = poutput;
  1105. info.size = sizeof(struct cgs_acpi_method_info);
  1106. info.field = CGS_ACPI_FIELD_METHOD_NAME | CGS_ACPI_FIELD_INPUT_ARGUMENT_COUNT;
  1107. info.input_count = 2;
  1108. info.name = acpi_method;
  1109. info.pinput_argument = acpi_input;
  1110. info.output_count = output_count;
  1111. info.poutput_argument = &acpi_output;
  1112. return amdgpu_cgs_acpi_eval_object(cgs_device, &info);
  1113. }
  1114. static const struct cgs_ops amdgpu_cgs_ops = {
  1115. .gpu_mem_info = amdgpu_cgs_gpu_mem_info,
  1116. .gmap_kmem = amdgpu_cgs_gmap_kmem,
  1117. .gunmap_kmem = amdgpu_cgs_gunmap_kmem,
  1118. .alloc_gpu_mem = amdgpu_cgs_alloc_gpu_mem,
  1119. .free_gpu_mem = amdgpu_cgs_free_gpu_mem,
  1120. .gmap_gpu_mem = amdgpu_cgs_gmap_gpu_mem,
  1121. .gunmap_gpu_mem = amdgpu_cgs_gunmap_gpu_mem,
  1122. .kmap_gpu_mem = amdgpu_cgs_kmap_gpu_mem,
  1123. .kunmap_gpu_mem = amdgpu_cgs_kunmap_gpu_mem,
  1124. .read_register = amdgpu_cgs_read_register,
  1125. .write_register = amdgpu_cgs_write_register,
  1126. .read_ind_register = amdgpu_cgs_read_ind_register,
  1127. .write_ind_register = amdgpu_cgs_write_ind_register,
  1128. .read_pci_config_byte = amdgpu_cgs_read_pci_config_byte,
  1129. .read_pci_config_word = amdgpu_cgs_read_pci_config_word,
  1130. .read_pci_config_dword = amdgpu_cgs_read_pci_config_dword,
  1131. .write_pci_config_byte = amdgpu_cgs_write_pci_config_byte,
  1132. .write_pci_config_word = amdgpu_cgs_write_pci_config_word,
  1133. .write_pci_config_dword = amdgpu_cgs_write_pci_config_dword,
  1134. .get_pci_resource = amdgpu_cgs_get_pci_resource,
  1135. .atom_get_data_table = amdgpu_cgs_atom_get_data_table,
  1136. .atom_get_cmd_table_revs = amdgpu_cgs_atom_get_cmd_table_revs,
  1137. .atom_exec_cmd_table = amdgpu_cgs_atom_exec_cmd_table,
  1138. .create_pm_request = amdgpu_cgs_create_pm_request,
  1139. .destroy_pm_request = amdgpu_cgs_destroy_pm_request,
  1140. .set_pm_request = amdgpu_cgs_set_pm_request,
  1141. .pm_request_clock = amdgpu_cgs_pm_request_clock,
  1142. .pm_request_engine = amdgpu_cgs_pm_request_engine,
  1143. .pm_query_clock_limits = amdgpu_cgs_pm_query_clock_limits,
  1144. .set_camera_voltages = amdgpu_cgs_set_camera_voltages,
  1145. .get_firmware_info = amdgpu_cgs_get_firmware_info,
  1146. .rel_firmware = amdgpu_cgs_rel_firmware,
  1147. .set_powergating_state = amdgpu_cgs_set_powergating_state,
  1148. .set_clockgating_state = amdgpu_cgs_set_clockgating_state,
  1149. .get_active_displays_info = amdgpu_cgs_get_active_displays_info,
  1150. .notify_dpm_enabled = amdgpu_cgs_notify_dpm_enabled,
  1151. .call_acpi_method = amdgpu_cgs_call_acpi_method,
  1152. .query_system_info = amdgpu_cgs_query_system_info,
  1153. .is_virtualization_enabled = amdgpu_cgs_is_virtualization_enabled,
  1154. .enter_safe_mode = amdgpu_cgs_enter_safe_mode,
  1155. };
  1156. static const struct cgs_os_ops amdgpu_cgs_os_ops = {
  1157. .add_irq_source = amdgpu_cgs_add_irq_source,
  1158. .irq_get = amdgpu_cgs_irq_get,
  1159. .irq_put = amdgpu_cgs_irq_put
  1160. };
  1161. struct cgs_device *amdgpu_cgs_create_device(struct amdgpu_device *adev)
  1162. {
  1163. struct amdgpu_cgs_device *cgs_device =
  1164. kmalloc(sizeof(*cgs_device), GFP_KERNEL);
  1165. if (!cgs_device) {
  1166. DRM_ERROR("Couldn't allocate CGS device structure\n");
  1167. return NULL;
  1168. }
  1169. cgs_device->base.ops = &amdgpu_cgs_ops;
  1170. cgs_device->base.os_ops = &amdgpu_cgs_os_ops;
  1171. cgs_device->adev = adev;
  1172. return (struct cgs_device *)cgs_device;
  1173. }
  1174. void amdgpu_cgs_destroy_device(struct cgs_device *cgs_device)
  1175. {
  1176. kfree(cgs_device);
  1177. }