amdgpu_cgs.c 30 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187
  1. /*
  2. * Copyright 2015 Advanced Micro Devices, Inc.
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice shall be included in
  12. * all copies or substantial portions of the Software.
  13. *
  14. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  17. * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18. * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19. * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20. * OTHER DEALINGS IN THE SOFTWARE.
  21. *
  22. *
  23. */
  24. #include <linux/list.h>
  25. #include <linux/slab.h>
  26. #include <linux/pci.h>
  27. #include <linux/acpi.h>
  28. #include <drm/drmP.h>
  29. #include <linux/firmware.h>
  30. #include <drm/amdgpu_drm.h>
  31. #include "amdgpu.h"
  32. #include "cgs_linux.h"
  33. #include "atom.h"
  34. #include "amdgpu_ucode.h"
  35. struct amdgpu_cgs_device {
  36. struct cgs_device base;
  37. struct amdgpu_device *adev;
  38. };
  39. #define CGS_FUNC_ADEV \
  40. struct amdgpu_device *adev = \
  41. ((struct amdgpu_cgs_device *)cgs_device)->adev
  42. static int amdgpu_cgs_gpu_mem_info(struct cgs_device *cgs_device, enum cgs_gpu_mem_type type,
  43. uint64_t *mc_start, uint64_t *mc_size,
  44. uint64_t *mem_size)
  45. {
  46. CGS_FUNC_ADEV;
  47. switch(type) {
  48. case CGS_GPU_MEM_TYPE__VISIBLE_CONTIG_FB:
  49. case CGS_GPU_MEM_TYPE__VISIBLE_FB:
  50. *mc_start = 0;
  51. *mc_size = adev->mc.visible_vram_size;
  52. *mem_size = adev->mc.visible_vram_size - adev->vram_pin_size;
  53. break;
  54. case CGS_GPU_MEM_TYPE__INVISIBLE_CONTIG_FB:
  55. case CGS_GPU_MEM_TYPE__INVISIBLE_FB:
  56. *mc_start = adev->mc.visible_vram_size;
  57. *mc_size = adev->mc.real_vram_size - adev->mc.visible_vram_size;
  58. *mem_size = *mc_size;
  59. break;
  60. case CGS_GPU_MEM_TYPE__GART_CACHEABLE:
  61. case CGS_GPU_MEM_TYPE__GART_WRITECOMBINE:
  62. *mc_start = adev->mc.gtt_start;
  63. *mc_size = adev->mc.gtt_size;
  64. *mem_size = adev->mc.gtt_size - adev->gart_pin_size;
  65. break;
  66. default:
  67. return -EINVAL;
  68. }
  69. return 0;
  70. }
  71. static int amdgpu_cgs_gmap_kmem(struct cgs_device *cgs_device, void *kmem,
  72. uint64_t size,
  73. uint64_t min_offset, uint64_t max_offset,
  74. cgs_handle_t *kmem_handle, uint64_t *mcaddr)
  75. {
  76. CGS_FUNC_ADEV;
  77. int ret;
  78. struct amdgpu_bo *bo;
  79. struct page *kmem_page = vmalloc_to_page(kmem);
  80. int npages = ALIGN(size, PAGE_SIZE) >> PAGE_SHIFT;
  81. struct sg_table *sg = drm_prime_pages_to_sg(&kmem_page, npages);
  82. ret = amdgpu_bo_create(adev, size, PAGE_SIZE, false,
  83. AMDGPU_GEM_DOMAIN_GTT, 0, sg, NULL, &bo);
  84. if (ret)
  85. return ret;
  86. ret = amdgpu_bo_reserve(bo, false);
  87. if (unlikely(ret != 0))
  88. return ret;
  89. /* pin buffer into GTT */
  90. ret = amdgpu_bo_pin_restricted(bo, AMDGPU_GEM_DOMAIN_GTT,
  91. min_offset, max_offset, mcaddr);
  92. amdgpu_bo_unreserve(bo);
  93. *kmem_handle = (cgs_handle_t)bo;
  94. return ret;
  95. }
  96. static int amdgpu_cgs_gunmap_kmem(struct cgs_device *cgs_device, cgs_handle_t kmem_handle)
  97. {
  98. struct amdgpu_bo *obj = (struct amdgpu_bo *)kmem_handle;
  99. if (obj) {
  100. int r = amdgpu_bo_reserve(obj, false);
  101. if (likely(r == 0)) {
  102. amdgpu_bo_unpin(obj);
  103. amdgpu_bo_unreserve(obj);
  104. }
  105. amdgpu_bo_unref(&obj);
  106. }
  107. return 0;
  108. }
  109. static int amdgpu_cgs_alloc_gpu_mem(struct cgs_device *cgs_device,
  110. enum cgs_gpu_mem_type type,
  111. uint64_t size, uint64_t align,
  112. uint64_t min_offset, uint64_t max_offset,
  113. cgs_handle_t *handle)
  114. {
  115. CGS_FUNC_ADEV;
  116. uint16_t flags = 0;
  117. int ret = 0;
  118. uint32_t domain = 0;
  119. struct amdgpu_bo *obj;
  120. struct ttm_placement placement;
  121. struct ttm_place place;
  122. if (min_offset > max_offset) {
  123. BUG_ON(1);
  124. return -EINVAL;
  125. }
  126. /* fail if the alignment is not a power of 2 */
  127. if (((align != 1) && (align & (align - 1)))
  128. || size == 0 || align == 0)
  129. return -EINVAL;
  130. switch(type) {
  131. case CGS_GPU_MEM_TYPE__VISIBLE_CONTIG_FB:
  132. case CGS_GPU_MEM_TYPE__VISIBLE_FB:
  133. flags = AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
  134. domain = AMDGPU_GEM_DOMAIN_VRAM;
  135. if (max_offset > adev->mc.real_vram_size)
  136. return -EINVAL;
  137. place.fpfn = min_offset >> PAGE_SHIFT;
  138. place.lpfn = max_offset >> PAGE_SHIFT;
  139. place.flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED |
  140. TTM_PL_FLAG_VRAM;
  141. break;
  142. case CGS_GPU_MEM_TYPE__INVISIBLE_CONTIG_FB:
  143. case CGS_GPU_MEM_TYPE__INVISIBLE_FB:
  144. flags = AMDGPU_GEM_CREATE_NO_CPU_ACCESS;
  145. domain = AMDGPU_GEM_DOMAIN_VRAM;
  146. if (adev->mc.visible_vram_size < adev->mc.real_vram_size) {
  147. place.fpfn =
  148. max(min_offset, adev->mc.visible_vram_size) >> PAGE_SHIFT;
  149. place.lpfn =
  150. min(max_offset, adev->mc.real_vram_size) >> PAGE_SHIFT;
  151. place.flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED |
  152. TTM_PL_FLAG_VRAM;
  153. }
  154. break;
  155. case CGS_GPU_MEM_TYPE__GART_CACHEABLE:
  156. domain = AMDGPU_GEM_DOMAIN_GTT;
  157. place.fpfn = min_offset >> PAGE_SHIFT;
  158. place.lpfn = max_offset >> PAGE_SHIFT;
  159. place.flags = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_TT;
  160. break;
  161. case CGS_GPU_MEM_TYPE__GART_WRITECOMBINE:
  162. flags = AMDGPU_GEM_CREATE_CPU_GTT_USWC;
  163. domain = AMDGPU_GEM_DOMAIN_GTT;
  164. place.fpfn = min_offset >> PAGE_SHIFT;
  165. place.lpfn = max_offset >> PAGE_SHIFT;
  166. place.flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_TT |
  167. TTM_PL_FLAG_UNCACHED;
  168. break;
  169. default:
  170. return -EINVAL;
  171. }
  172. *handle = 0;
  173. placement.placement = &place;
  174. placement.num_placement = 1;
  175. placement.busy_placement = &place;
  176. placement.num_busy_placement = 1;
  177. ret = amdgpu_bo_create_restricted(adev, size, PAGE_SIZE,
  178. true, domain, flags,
  179. NULL, &placement, NULL,
  180. &obj);
  181. if (ret) {
  182. DRM_ERROR("(%d) bo create failed\n", ret);
  183. return ret;
  184. }
  185. *handle = (cgs_handle_t)obj;
  186. return ret;
  187. }
  188. static int amdgpu_cgs_free_gpu_mem(struct cgs_device *cgs_device, cgs_handle_t handle)
  189. {
  190. struct amdgpu_bo *obj = (struct amdgpu_bo *)handle;
  191. if (obj) {
  192. int r = amdgpu_bo_reserve(obj, false);
  193. if (likely(r == 0)) {
  194. amdgpu_bo_kunmap(obj);
  195. amdgpu_bo_unpin(obj);
  196. amdgpu_bo_unreserve(obj);
  197. }
  198. amdgpu_bo_unref(&obj);
  199. }
  200. return 0;
  201. }
  202. static int amdgpu_cgs_gmap_gpu_mem(struct cgs_device *cgs_device, cgs_handle_t handle,
  203. uint64_t *mcaddr)
  204. {
  205. int r;
  206. u64 min_offset, max_offset;
  207. struct amdgpu_bo *obj = (struct amdgpu_bo *)handle;
  208. WARN_ON_ONCE(obj->placement.num_placement > 1);
  209. min_offset = obj->placements[0].fpfn << PAGE_SHIFT;
  210. max_offset = obj->placements[0].lpfn << PAGE_SHIFT;
  211. r = amdgpu_bo_reserve(obj, false);
  212. if (unlikely(r != 0))
  213. return r;
  214. r = amdgpu_bo_pin_restricted(obj, AMDGPU_GEM_DOMAIN_GTT,
  215. min_offset, max_offset, mcaddr);
  216. amdgpu_bo_unreserve(obj);
  217. return r;
  218. }
  219. static int amdgpu_cgs_gunmap_gpu_mem(struct cgs_device *cgs_device, cgs_handle_t handle)
  220. {
  221. int r;
  222. struct amdgpu_bo *obj = (struct amdgpu_bo *)handle;
  223. r = amdgpu_bo_reserve(obj, false);
  224. if (unlikely(r != 0))
  225. return r;
  226. r = amdgpu_bo_unpin(obj);
  227. amdgpu_bo_unreserve(obj);
  228. return r;
  229. }
  230. static int amdgpu_cgs_kmap_gpu_mem(struct cgs_device *cgs_device, cgs_handle_t handle,
  231. void **map)
  232. {
  233. int r;
  234. struct amdgpu_bo *obj = (struct amdgpu_bo *)handle;
  235. r = amdgpu_bo_reserve(obj, false);
  236. if (unlikely(r != 0))
  237. return r;
  238. r = amdgpu_bo_kmap(obj, map);
  239. amdgpu_bo_unreserve(obj);
  240. return r;
  241. }
  242. static int amdgpu_cgs_kunmap_gpu_mem(struct cgs_device *cgs_device, cgs_handle_t handle)
  243. {
  244. int r;
  245. struct amdgpu_bo *obj = (struct amdgpu_bo *)handle;
  246. r = amdgpu_bo_reserve(obj, false);
  247. if (unlikely(r != 0))
  248. return r;
  249. amdgpu_bo_kunmap(obj);
  250. amdgpu_bo_unreserve(obj);
  251. return r;
  252. }
  253. static uint32_t amdgpu_cgs_read_register(struct cgs_device *cgs_device, unsigned offset)
  254. {
  255. CGS_FUNC_ADEV;
  256. return RREG32(offset);
  257. }
  258. static void amdgpu_cgs_write_register(struct cgs_device *cgs_device, unsigned offset,
  259. uint32_t value)
  260. {
  261. CGS_FUNC_ADEV;
  262. WREG32(offset, value);
  263. }
  264. static uint32_t amdgpu_cgs_read_ind_register(struct cgs_device *cgs_device,
  265. enum cgs_ind_reg space,
  266. unsigned index)
  267. {
  268. CGS_FUNC_ADEV;
  269. switch (space) {
  270. case CGS_IND_REG__MMIO:
  271. return RREG32_IDX(index);
  272. case CGS_IND_REG__PCIE:
  273. return RREG32_PCIE(index);
  274. case CGS_IND_REG__SMC:
  275. return RREG32_SMC(index);
  276. case CGS_IND_REG__UVD_CTX:
  277. return RREG32_UVD_CTX(index);
  278. case CGS_IND_REG__DIDT:
  279. return RREG32_DIDT(index);
  280. case CGS_IND_REG_GC_CAC:
  281. return RREG32_GC_CAC(index);
  282. case CGS_IND_REG__AUDIO_ENDPT:
  283. DRM_ERROR("audio endpt register access not implemented.\n");
  284. return 0;
  285. }
  286. WARN(1, "Invalid indirect register space");
  287. return 0;
  288. }
  289. static void amdgpu_cgs_write_ind_register(struct cgs_device *cgs_device,
  290. enum cgs_ind_reg space,
  291. unsigned index, uint32_t value)
  292. {
  293. CGS_FUNC_ADEV;
  294. switch (space) {
  295. case CGS_IND_REG__MMIO:
  296. return WREG32_IDX(index, value);
  297. case CGS_IND_REG__PCIE:
  298. return WREG32_PCIE(index, value);
  299. case CGS_IND_REG__SMC:
  300. return WREG32_SMC(index, value);
  301. case CGS_IND_REG__UVD_CTX:
  302. return WREG32_UVD_CTX(index, value);
  303. case CGS_IND_REG__DIDT:
  304. return WREG32_DIDT(index, value);
  305. case CGS_IND_REG_GC_CAC:
  306. return WREG32_GC_CAC(index, value);
  307. case CGS_IND_REG__AUDIO_ENDPT:
  308. DRM_ERROR("audio endpt register access not implemented.\n");
  309. return;
  310. }
  311. WARN(1, "Invalid indirect register space");
  312. }
  313. static uint8_t amdgpu_cgs_read_pci_config_byte(struct cgs_device *cgs_device, unsigned addr)
  314. {
  315. CGS_FUNC_ADEV;
  316. uint8_t val;
  317. int ret = pci_read_config_byte(adev->pdev, addr, &val);
  318. if (WARN(ret, "pci_read_config_byte error"))
  319. return 0;
  320. return val;
  321. }
  322. static uint16_t amdgpu_cgs_read_pci_config_word(struct cgs_device *cgs_device, unsigned addr)
  323. {
  324. CGS_FUNC_ADEV;
  325. uint16_t val;
  326. int ret = pci_read_config_word(adev->pdev, addr, &val);
  327. if (WARN(ret, "pci_read_config_word error"))
  328. return 0;
  329. return val;
  330. }
  331. static uint32_t amdgpu_cgs_read_pci_config_dword(struct cgs_device *cgs_device,
  332. unsigned addr)
  333. {
  334. CGS_FUNC_ADEV;
  335. uint32_t val;
  336. int ret = pci_read_config_dword(adev->pdev, addr, &val);
  337. if (WARN(ret, "pci_read_config_dword error"))
  338. return 0;
  339. return val;
  340. }
  341. static void amdgpu_cgs_write_pci_config_byte(struct cgs_device *cgs_device, unsigned addr,
  342. uint8_t value)
  343. {
  344. CGS_FUNC_ADEV;
  345. int ret = pci_write_config_byte(adev->pdev, addr, value);
  346. WARN(ret, "pci_write_config_byte error");
  347. }
  348. static void amdgpu_cgs_write_pci_config_word(struct cgs_device *cgs_device, unsigned addr,
  349. uint16_t value)
  350. {
  351. CGS_FUNC_ADEV;
  352. int ret = pci_write_config_word(adev->pdev, addr, value);
  353. WARN(ret, "pci_write_config_word error");
  354. }
  355. static void amdgpu_cgs_write_pci_config_dword(struct cgs_device *cgs_device, unsigned addr,
  356. uint32_t value)
  357. {
  358. CGS_FUNC_ADEV;
  359. int ret = pci_write_config_dword(adev->pdev, addr, value);
  360. WARN(ret, "pci_write_config_dword error");
  361. }
  362. static int amdgpu_cgs_get_pci_resource(struct cgs_device *cgs_device,
  363. enum cgs_resource_type resource_type,
  364. uint64_t size,
  365. uint64_t offset,
  366. uint64_t *resource_base)
  367. {
  368. CGS_FUNC_ADEV;
  369. if (resource_base == NULL)
  370. return -EINVAL;
  371. switch (resource_type) {
  372. case CGS_RESOURCE_TYPE_MMIO:
  373. if (adev->rmmio_size == 0)
  374. return -ENOENT;
  375. if ((offset + size) > adev->rmmio_size)
  376. return -EINVAL;
  377. *resource_base = adev->rmmio_base;
  378. return 0;
  379. case CGS_RESOURCE_TYPE_DOORBELL:
  380. if (adev->doorbell.size == 0)
  381. return -ENOENT;
  382. if ((offset + size) > adev->doorbell.size)
  383. return -EINVAL;
  384. *resource_base = adev->doorbell.base;
  385. return 0;
  386. case CGS_RESOURCE_TYPE_FB:
  387. case CGS_RESOURCE_TYPE_IO:
  388. case CGS_RESOURCE_TYPE_ROM:
  389. default:
  390. return -EINVAL;
  391. }
  392. }
  393. static const void *amdgpu_cgs_atom_get_data_table(struct cgs_device *cgs_device,
  394. unsigned table, uint16_t *size,
  395. uint8_t *frev, uint8_t *crev)
  396. {
  397. CGS_FUNC_ADEV;
  398. uint16_t data_start;
  399. if (amdgpu_atom_parse_data_header(
  400. adev->mode_info.atom_context, table, size,
  401. frev, crev, &data_start))
  402. return (uint8_t*)adev->mode_info.atom_context->bios +
  403. data_start;
  404. return NULL;
  405. }
  406. static int amdgpu_cgs_atom_get_cmd_table_revs(struct cgs_device *cgs_device, unsigned table,
  407. uint8_t *frev, uint8_t *crev)
  408. {
  409. CGS_FUNC_ADEV;
  410. if (amdgpu_atom_parse_cmd_header(
  411. adev->mode_info.atom_context, table,
  412. frev, crev))
  413. return 0;
  414. return -EINVAL;
  415. }
  416. static int amdgpu_cgs_atom_exec_cmd_table(struct cgs_device *cgs_device, unsigned table,
  417. void *args)
  418. {
  419. CGS_FUNC_ADEV;
  420. return amdgpu_atom_execute_table(
  421. adev->mode_info.atom_context, table, args);
  422. }
  423. static int amdgpu_cgs_create_pm_request(struct cgs_device *cgs_device, cgs_handle_t *request)
  424. {
  425. /* TODO */
  426. return 0;
  427. }
  428. static int amdgpu_cgs_destroy_pm_request(struct cgs_device *cgs_device, cgs_handle_t request)
  429. {
  430. /* TODO */
  431. return 0;
  432. }
  433. static int amdgpu_cgs_set_pm_request(struct cgs_device *cgs_device, cgs_handle_t request,
  434. int active)
  435. {
  436. /* TODO */
  437. return 0;
  438. }
  439. static int amdgpu_cgs_pm_request_clock(struct cgs_device *cgs_device, cgs_handle_t request,
  440. enum cgs_clock clock, unsigned freq)
  441. {
  442. /* TODO */
  443. return 0;
  444. }
  445. static int amdgpu_cgs_pm_request_engine(struct cgs_device *cgs_device, cgs_handle_t request,
  446. enum cgs_engine engine, int powered)
  447. {
  448. /* TODO */
  449. return 0;
  450. }
  451. static int amdgpu_cgs_pm_query_clock_limits(struct cgs_device *cgs_device,
  452. enum cgs_clock clock,
  453. struct cgs_clock_limits *limits)
  454. {
  455. /* TODO */
  456. return 0;
  457. }
  458. static int amdgpu_cgs_set_camera_voltages(struct cgs_device *cgs_device, uint32_t mask,
  459. const uint32_t *voltages)
  460. {
  461. DRM_ERROR("not implemented");
  462. return -EPERM;
  463. }
  464. struct cgs_irq_params {
  465. unsigned src_id;
  466. cgs_irq_source_set_func_t set;
  467. cgs_irq_handler_func_t handler;
  468. void *private_data;
  469. };
  470. static int cgs_set_irq_state(struct amdgpu_device *adev,
  471. struct amdgpu_irq_src *src,
  472. unsigned type,
  473. enum amdgpu_interrupt_state state)
  474. {
  475. struct cgs_irq_params *irq_params =
  476. (struct cgs_irq_params *)src->data;
  477. if (!irq_params)
  478. return -EINVAL;
  479. if (!irq_params->set)
  480. return -EINVAL;
  481. return irq_params->set(irq_params->private_data,
  482. irq_params->src_id,
  483. type,
  484. (int)state);
  485. }
  486. static int cgs_process_irq(struct amdgpu_device *adev,
  487. struct amdgpu_irq_src *source,
  488. struct amdgpu_iv_entry *entry)
  489. {
  490. struct cgs_irq_params *irq_params =
  491. (struct cgs_irq_params *)source->data;
  492. if (!irq_params)
  493. return -EINVAL;
  494. if (!irq_params->handler)
  495. return -EINVAL;
  496. return irq_params->handler(irq_params->private_data,
  497. irq_params->src_id,
  498. entry->iv_entry);
  499. }
  500. static const struct amdgpu_irq_src_funcs cgs_irq_funcs = {
  501. .set = cgs_set_irq_state,
  502. .process = cgs_process_irq,
  503. };
  504. static int amdgpu_cgs_add_irq_source(struct cgs_device *cgs_device, unsigned src_id,
  505. unsigned num_types,
  506. cgs_irq_source_set_func_t set,
  507. cgs_irq_handler_func_t handler,
  508. void *private_data)
  509. {
  510. CGS_FUNC_ADEV;
  511. int ret = 0;
  512. struct cgs_irq_params *irq_params;
  513. struct amdgpu_irq_src *source =
  514. kzalloc(sizeof(struct amdgpu_irq_src), GFP_KERNEL);
  515. if (!source)
  516. return -ENOMEM;
  517. irq_params =
  518. kzalloc(sizeof(struct cgs_irq_params), GFP_KERNEL);
  519. if (!irq_params) {
  520. kfree(source);
  521. return -ENOMEM;
  522. }
  523. source->num_types = num_types;
  524. source->funcs = &cgs_irq_funcs;
  525. irq_params->src_id = src_id;
  526. irq_params->set = set;
  527. irq_params->handler = handler;
  528. irq_params->private_data = private_data;
  529. source->data = (void *)irq_params;
  530. ret = amdgpu_irq_add_id(adev, src_id, source);
  531. if (ret) {
  532. kfree(irq_params);
  533. kfree(source);
  534. }
  535. return ret;
  536. }
  537. static int amdgpu_cgs_irq_get(struct cgs_device *cgs_device, unsigned src_id, unsigned type)
  538. {
  539. CGS_FUNC_ADEV;
  540. return amdgpu_irq_get(adev, adev->irq.sources[src_id], type);
  541. }
  542. static int amdgpu_cgs_irq_put(struct cgs_device *cgs_device, unsigned src_id, unsigned type)
  543. {
  544. CGS_FUNC_ADEV;
  545. return amdgpu_irq_put(adev, adev->irq.sources[src_id], type);
  546. }
  547. int amdgpu_cgs_set_clockgating_state(struct cgs_device *cgs_device,
  548. enum amd_ip_block_type block_type,
  549. enum amd_clockgating_state state)
  550. {
  551. CGS_FUNC_ADEV;
  552. int i, r = -1;
  553. for (i = 0; i < adev->num_ip_blocks; i++) {
  554. if (!adev->ip_block_status[i].valid)
  555. continue;
  556. if (adev->ip_blocks[i].type == block_type) {
  557. r = adev->ip_blocks[i].funcs->set_clockgating_state(
  558. (void *)adev,
  559. state);
  560. break;
  561. }
  562. }
  563. return r;
  564. }
  565. int amdgpu_cgs_set_powergating_state(struct cgs_device *cgs_device,
  566. enum amd_ip_block_type block_type,
  567. enum amd_powergating_state state)
  568. {
  569. CGS_FUNC_ADEV;
  570. int i, r = -1;
  571. for (i = 0; i < adev->num_ip_blocks; i++) {
  572. if (!adev->ip_block_status[i].valid)
  573. continue;
  574. if (adev->ip_blocks[i].type == block_type) {
  575. r = adev->ip_blocks[i].funcs->set_powergating_state(
  576. (void *)adev,
  577. state);
  578. break;
  579. }
  580. }
  581. return r;
  582. }
  583. static uint32_t fw_type_convert(struct cgs_device *cgs_device, uint32_t fw_type)
  584. {
  585. CGS_FUNC_ADEV;
  586. enum AMDGPU_UCODE_ID result = AMDGPU_UCODE_ID_MAXIMUM;
  587. switch (fw_type) {
  588. case CGS_UCODE_ID_SDMA0:
  589. result = AMDGPU_UCODE_ID_SDMA0;
  590. break;
  591. case CGS_UCODE_ID_SDMA1:
  592. result = AMDGPU_UCODE_ID_SDMA1;
  593. break;
  594. case CGS_UCODE_ID_CP_CE:
  595. result = AMDGPU_UCODE_ID_CP_CE;
  596. break;
  597. case CGS_UCODE_ID_CP_PFP:
  598. result = AMDGPU_UCODE_ID_CP_PFP;
  599. break;
  600. case CGS_UCODE_ID_CP_ME:
  601. result = AMDGPU_UCODE_ID_CP_ME;
  602. break;
  603. case CGS_UCODE_ID_CP_MEC:
  604. case CGS_UCODE_ID_CP_MEC_JT1:
  605. result = AMDGPU_UCODE_ID_CP_MEC1;
  606. break;
  607. case CGS_UCODE_ID_CP_MEC_JT2:
  608. if (adev->asic_type == CHIP_TONGA || adev->asic_type == CHIP_POLARIS11
  609. || adev->asic_type == CHIP_POLARIS10)
  610. result = AMDGPU_UCODE_ID_CP_MEC2;
  611. else
  612. result = AMDGPU_UCODE_ID_CP_MEC1;
  613. break;
  614. case CGS_UCODE_ID_RLC_G:
  615. result = AMDGPU_UCODE_ID_RLC_G;
  616. break;
  617. default:
  618. DRM_ERROR("Firmware type not supported\n");
  619. }
  620. return result;
  621. }
  622. static int amdgpu_cgs_rel_firmware(struct cgs_device *cgs_device, enum cgs_ucode_id type)
  623. {
  624. CGS_FUNC_ADEV;
  625. if ((CGS_UCODE_ID_SMU == type) || (CGS_UCODE_ID_SMU_SK == type)) {
  626. release_firmware(adev->pm.fw);
  627. return 0;
  628. }
  629. /* cannot release other firmware because they are not created by cgs */
  630. return -EINVAL;
  631. }
  632. static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device,
  633. enum cgs_ucode_id type,
  634. struct cgs_firmware_info *info)
  635. {
  636. CGS_FUNC_ADEV;
  637. if ((CGS_UCODE_ID_SMU != type) && (CGS_UCODE_ID_SMU_SK != type)) {
  638. uint64_t gpu_addr;
  639. uint32_t data_size;
  640. const struct gfx_firmware_header_v1_0 *header;
  641. enum AMDGPU_UCODE_ID id;
  642. struct amdgpu_firmware_info *ucode;
  643. id = fw_type_convert(cgs_device, type);
  644. ucode = &adev->firmware.ucode[id];
  645. if (ucode->fw == NULL)
  646. return -EINVAL;
  647. gpu_addr = ucode->mc_addr;
  648. header = (const struct gfx_firmware_header_v1_0 *)ucode->fw->data;
  649. data_size = le32_to_cpu(header->header.ucode_size_bytes);
  650. if ((type == CGS_UCODE_ID_CP_MEC_JT1) ||
  651. (type == CGS_UCODE_ID_CP_MEC_JT2)) {
  652. gpu_addr += le32_to_cpu(header->jt_offset) << 2;
  653. data_size = le32_to_cpu(header->jt_size) << 2;
  654. }
  655. info->mc_addr = gpu_addr;
  656. info->image_size = data_size;
  657. info->version = (uint16_t)le32_to_cpu(header->header.ucode_version);
  658. info->feature_version = (uint16_t)le32_to_cpu(header->ucode_feature_version);
  659. } else {
  660. char fw_name[30] = {0};
  661. int err = 0;
  662. uint32_t ucode_size;
  663. uint32_t ucode_start_address;
  664. const uint8_t *src;
  665. const struct smc_firmware_header_v1_0 *hdr;
  666. if (!adev->pm.fw) {
  667. switch (adev->asic_type) {
  668. case CHIP_TOPAZ:
  669. strcpy(fw_name, "amdgpu/topaz_smc.bin");
  670. break;
  671. case CHIP_TONGA:
  672. strcpy(fw_name, "amdgpu/tonga_smc.bin");
  673. break;
  674. case CHIP_FIJI:
  675. strcpy(fw_name, "amdgpu/fiji_smc.bin");
  676. break;
  677. case CHIP_POLARIS11:
  678. if (type == CGS_UCODE_ID_SMU)
  679. strcpy(fw_name, "amdgpu/polaris11_smc.bin");
  680. else if (type == CGS_UCODE_ID_SMU_SK)
  681. strcpy(fw_name, "amdgpu/polaris11_smc_sk.bin");
  682. break;
  683. case CHIP_POLARIS10:
  684. if (type == CGS_UCODE_ID_SMU)
  685. strcpy(fw_name, "amdgpu/polaris10_smc.bin");
  686. else if (type == CGS_UCODE_ID_SMU_SK)
  687. strcpy(fw_name, "amdgpu/polaris10_smc_sk.bin");
  688. break;
  689. default:
  690. DRM_ERROR("SMC firmware not supported\n");
  691. return -EINVAL;
  692. }
  693. err = request_firmware(&adev->pm.fw, fw_name, adev->dev);
  694. if (err) {
  695. DRM_ERROR("Failed to request firmware\n");
  696. return err;
  697. }
  698. err = amdgpu_ucode_validate(adev->pm.fw);
  699. if (err) {
  700. DRM_ERROR("Failed to load firmware \"%s\"", fw_name);
  701. release_firmware(adev->pm.fw);
  702. adev->pm.fw = NULL;
  703. return err;
  704. }
  705. }
  706. hdr = (const struct smc_firmware_header_v1_0 *) adev->pm.fw->data;
  707. amdgpu_ucode_print_smc_hdr(&hdr->header);
  708. adev->pm.fw_version = le32_to_cpu(hdr->header.ucode_version);
  709. ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes);
  710. ucode_start_address = le32_to_cpu(hdr->ucode_start_addr);
  711. src = (const uint8_t *)(adev->pm.fw->data +
  712. le32_to_cpu(hdr->header.ucode_array_offset_bytes));
  713. info->version = adev->pm.fw_version;
  714. info->image_size = ucode_size;
  715. info->ucode_start_address = ucode_start_address;
  716. info->kptr = (void *)src;
  717. }
  718. return 0;
  719. }
  720. static int amdgpu_cgs_query_system_info(struct cgs_device *cgs_device,
  721. struct cgs_system_info *sys_info)
  722. {
  723. CGS_FUNC_ADEV;
  724. if (NULL == sys_info)
  725. return -ENODEV;
  726. if (sizeof(struct cgs_system_info) != sys_info->size)
  727. return -ENODEV;
  728. switch (sys_info->info_id) {
  729. case CGS_SYSTEM_INFO_ADAPTER_BDF_ID:
  730. sys_info->value = adev->pdev->devfn | (adev->pdev->bus->number << 8);
  731. break;
  732. case CGS_SYSTEM_INFO_PCIE_GEN_INFO:
  733. sys_info->value = adev->pm.pcie_gen_mask;
  734. break;
  735. case CGS_SYSTEM_INFO_PCIE_MLW:
  736. sys_info->value = adev->pm.pcie_mlw_mask;
  737. break;
  738. case CGS_SYSTEM_INFO_PCIE_DEV:
  739. sys_info->value = adev->pdev->device;
  740. break;
  741. case CGS_SYSTEM_INFO_PCIE_REV:
  742. sys_info->value = adev->pdev->revision;
  743. break;
  744. case CGS_SYSTEM_INFO_CG_FLAGS:
  745. sys_info->value = adev->cg_flags;
  746. break;
  747. case CGS_SYSTEM_INFO_PG_FLAGS:
  748. sys_info->value = adev->pg_flags;
  749. break;
  750. case CGS_SYSTEM_INFO_GFX_CU_INFO:
  751. sys_info->value = adev->gfx.cu_info.number;
  752. break;
  753. case CGS_SYSTEM_INFO_GFX_SE_INFO:
  754. sys_info->value = adev->gfx.config.max_shader_engines;
  755. break;
  756. default:
  757. return -ENODEV;
  758. }
  759. return 0;
  760. }
  761. static int amdgpu_cgs_get_active_displays_info(struct cgs_device *cgs_device,
  762. struct cgs_display_info *info)
  763. {
  764. CGS_FUNC_ADEV;
  765. struct amdgpu_crtc *amdgpu_crtc;
  766. struct drm_device *ddev = adev->ddev;
  767. struct drm_crtc *crtc;
  768. uint32_t line_time_us, vblank_lines;
  769. struct cgs_mode_info *mode_info;
  770. if (info == NULL)
  771. return -EINVAL;
  772. mode_info = info->mode_info;
  773. if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) {
  774. list_for_each_entry(crtc,
  775. &ddev->mode_config.crtc_list, head) {
  776. amdgpu_crtc = to_amdgpu_crtc(crtc);
  777. if (crtc->enabled) {
  778. info->active_display_mask |= (1 << amdgpu_crtc->crtc_id);
  779. info->display_count++;
  780. }
  781. if (mode_info != NULL &&
  782. crtc->enabled && amdgpu_crtc->enabled &&
  783. amdgpu_crtc->hw_mode.clock) {
  784. line_time_us = (amdgpu_crtc->hw_mode.crtc_htotal * 1000) /
  785. amdgpu_crtc->hw_mode.clock;
  786. vblank_lines = amdgpu_crtc->hw_mode.crtc_vblank_end -
  787. amdgpu_crtc->hw_mode.crtc_vdisplay +
  788. (amdgpu_crtc->v_border * 2);
  789. mode_info->vblank_time_us = vblank_lines * line_time_us;
  790. mode_info->refresh_rate = drm_mode_vrefresh(&amdgpu_crtc->hw_mode);
  791. mode_info->ref_clock = adev->clock.spll.reference_freq;
  792. mode_info = NULL;
  793. }
  794. }
  795. }
  796. return 0;
  797. }
  798. static int amdgpu_cgs_notify_dpm_enabled(struct cgs_device *cgs_device, bool enabled)
  799. {
  800. CGS_FUNC_ADEV;
  801. adev->pm.dpm_enabled = enabled;
  802. return 0;
  803. }
  804. /** \brief evaluate acpi namespace object, handle or pathname must be valid
  805. * \param cgs_device
  806. * \param info input/output arguments for the control method
  807. * \return status
  808. */
  809. #if defined(CONFIG_ACPI)
  810. static int amdgpu_cgs_acpi_eval_object(struct cgs_device *cgs_device,
  811. struct cgs_acpi_method_info *info)
  812. {
  813. CGS_FUNC_ADEV;
  814. acpi_handle handle;
  815. struct acpi_object_list input;
  816. struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
  817. union acpi_object *params, *obj;
  818. uint8_t name[5] = {'\0'};
  819. struct cgs_acpi_method_argument *argument;
  820. uint32_t i, count;
  821. acpi_status status;
  822. int result;
  823. handle = ACPI_HANDLE(&adev->pdev->dev);
  824. if (!handle)
  825. return -ENODEV;
  826. memset(&input, 0, sizeof(struct acpi_object_list));
  827. /* validate input info */
  828. if (info->size != sizeof(struct cgs_acpi_method_info))
  829. return -EINVAL;
  830. input.count = info->input_count;
  831. if (info->input_count > 0) {
  832. if (info->pinput_argument == NULL)
  833. return -EINVAL;
  834. argument = info->pinput_argument;
  835. for (i = 0; i < info->input_count; i++) {
  836. if (((argument->type == ACPI_TYPE_STRING) ||
  837. (argument->type == ACPI_TYPE_BUFFER)) &&
  838. (argument->pointer == NULL))
  839. return -EINVAL;
  840. argument++;
  841. }
  842. }
  843. if (info->output_count > 0) {
  844. if (info->poutput_argument == NULL)
  845. return -EINVAL;
  846. argument = info->poutput_argument;
  847. for (i = 0; i < info->output_count; i++) {
  848. if (((argument->type == ACPI_TYPE_STRING) ||
  849. (argument->type == ACPI_TYPE_BUFFER))
  850. && (argument->pointer == NULL))
  851. return -EINVAL;
  852. argument++;
  853. }
  854. }
  855. /* The path name passed to acpi_evaluate_object should be null terminated */
  856. if ((info->field & CGS_ACPI_FIELD_METHOD_NAME) != 0) {
  857. strncpy(name, (char *)&(info->name), sizeof(uint32_t));
  858. name[4] = '\0';
  859. }
  860. /* parse input parameters */
  861. if (input.count > 0) {
  862. input.pointer = params =
  863. kzalloc(sizeof(union acpi_object) * input.count, GFP_KERNEL);
  864. if (params == NULL)
  865. return -EINVAL;
  866. argument = info->pinput_argument;
  867. for (i = 0; i < input.count; i++) {
  868. params->type = argument->type;
  869. switch (params->type) {
  870. case ACPI_TYPE_INTEGER:
  871. params->integer.value = argument->value;
  872. break;
  873. case ACPI_TYPE_STRING:
  874. params->string.length = argument->data_length;
  875. params->string.pointer = argument->pointer;
  876. break;
  877. case ACPI_TYPE_BUFFER:
  878. params->buffer.length = argument->data_length;
  879. params->buffer.pointer = argument->pointer;
  880. break;
  881. default:
  882. break;
  883. }
  884. params++;
  885. argument++;
  886. }
  887. }
  888. /* parse output info */
  889. count = info->output_count;
  890. argument = info->poutput_argument;
  891. /* evaluate the acpi method */
  892. status = acpi_evaluate_object(handle, name, &input, &output);
  893. if (ACPI_FAILURE(status)) {
  894. result = -EIO;
  895. goto free_input;
  896. }
  897. /* return the output info */
  898. obj = output.pointer;
  899. if (count > 1) {
  900. if ((obj->type != ACPI_TYPE_PACKAGE) ||
  901. (obj->package.count != count)) {
  902. result = -EIO;
  903. goto free_obj;
  904. }
  905. params = obj->package.elements;
  906. } else
  907. params = obj;
  908. if (params == NULL) {
  909. result = -EIO;
  910. goto free_obj;
  911. }
  912. for (i = 0; i < count; i++) {
  913. if (argument->type != params->type) {
  914. result = -EIO;
  915. goto free_obj;
  916. }
  917. switch (params->type) {
  918. case ACPI_TYPE_INTEGER:
  919. argument->value = params->integer.value;
  920. break;
  921. case ACPI_TYPE_STRING:
  922. if ((params->string.length != argument->data_length) ||
  923. (params->string.pointer == NULL)) {
  924. result = -EIO;
  925. goto free_obj;
  926. }
  927. strncpy(argument->pointer,
  928. params->string.pointer,
  929. params->string.length);
  930. break;
  931. case ACPI_TYPE_BUFFER:
  932. if (params->buffer.pointer == NULL) {
  933. result = -EIO;
  934. goto free_obj;
  935. }
  936. memcpy(argument->pointer,
  937. params->buffer.pointer,
  938. argument->data_length);
  939. break;
  940. default:
  941. break;
  942. }
  943. argument++;
  944. params++;
  945. }
  946. result = 0;
  947. free_obj:
  948. kfree(obj);
  949. free_input:
  950. kfree((void *)input.pointer);
  951. return result;
  952. }
  953. #else
  954. static int amdgpu_cgs_acpi_eval_object(struct cgs_device *cgs_device,
  955. struct cgs_acpi_method_info *info)
  956. {
  957. return -EIO;
  958. }
  959. #endif
  960. static int amdgpu_cgs_call_acpi_method(struct cgs_device *cgs_device,
  961. uint32_t acpi_method,
  962. uint32_t acpi_function,
  963. void *pinput, void *poutput,
  964. uint32_t output_count,
  965. uint32_t input_size,
  966. uint32_t output_size)
  967. {
  968. struct cgs_acpi_method_argument acpi_input[2] = { {0}, {0} };
  969. struct cgs_acpi_method_argument acpi_output = {0};
  970. struct cgs_acpi_method_info info = {0};
  971. acpi_input[0].type = CGS_ACPI_TYPE_INTEGER;
  972. acpi_input[0].data_length = sizeof(uint32_t);
  973. acpi_input[0].value = acpi_function;
  974. acpi_input[1].type = CGS_ACPI_TYPE_BUFFER;
  975. acpi_input[1].data_length = input_size;
  976. acpi_input[1].pointer = pinput;
  977. acpi_output.type = CGS_ACPI_TYPE_BUFFER;
  978. acpi_output.data_length = output_size;
  979. acpi_output.pointer = poutput;
  980. info.size = sizeof(struct cgs_acpi_method_info);
  981. info.field = CGS_ACPI_FIELD_METHOD_NAME | CGS_ACPI_FIELD_INPUT_ARGUMENT_COUNT;
  982. info.input_count = 2;
  983. info.name = acpi_method;
  984. info.pinput_argument = acpi_input;
  985. info.output_count = output_count;
  986. info.poutput_argument = &acpi_output;
  987. return amdgpu_cgs_acpi_eval_object(cgs_device, &info);
  988. }
  989. static const struct cgs_ops amdgpu_cgs_ops = {
  990. amdgpu_cgs_gpu_mem_info,
  991. amdgpu_cgs_gmap_kmem,
  992. amdgpu_cgs_gunmap_kmem,
  993. amdgpu_cgs_alloc_gpu_mem,
  994. amdgpu_cgs_free_gpu_mem,
  995. amdgpu_cgs_gmap_gpu_mem,
  996. amdgpu_cgs_gunmap_gpu_mem,
  997. amdgpu_cgs_kmap_gpu_mem,
  998. amdgpu_cgs_kunmap_gpu_mem,
  999. amdgpu_cgs_read_register,
  1000. amdgpu_cgs_write_register,
  1001. amdgpu_cgs_read_ind_register,
  1002. amdgpu_cgs_write_ind_register,
  1003. amdgpu_cgs_read_pci_config_byte,
  1004. amdgpu_cgs_read_pci_config_word,
  1005. amdgpu_cgs_read_pci_config_dword,
  1006. amdgpu_cgs_write_pci_config_byte,
  1007. amdgpu_cgs_write_pci_config_word,
  1008. amdgpu_cgs_write_pci_config_dword,
  1009. amdgpu_cgs_get_pci_resource,
  1010. amdgpu_cgs_atom_get_data_table,
  1011. amdgpu_cgs_atom_get_cmd_table_revs,
  1012. amdgpu_cgs_atom_exec_cmd_table,
  1013. amdgpu_cgs_create_pm_request,
  1014. amdgpu_cgs_destroy_pm_request,
  1015. amdgpu_cgs_set_pm_request,
  1016. amdgpu_cgs_pm_request_clock,
  1017. amdgpu_cgs_pm_request_engine,
  1018. amdgpu_cgs_pm_query_clock_limits,
  1019. amdgpu_cgs_set_camera_voltages,
  1020. amdgpu_cgs_get_firmware_info,
  1021. amdgpu_cgs_rel_firmware,
  1022. amdgpu_cgs_set_powergating_state,
  1023. amdgpu_cgs_set_clockgating_state,
  1024. amdgpu_cgs_get_active_displays_info,
  1025. amdgpu_cgs_notify_dpm_enabled,
  1026. amdgpu_cgs_call_acpi_method,
  1027. amdgpu_cgs_query_system_info,
  1028. };
  1029. static const struct cgs_os_ops amdgpu_cgs_os_ops = {
  1030. amdgpu_cgs_add_irq_source,
  1031. amdgpu_cgs_irq_get,
  1032. amdgpu_cgs_irq_put
  1033. };
  1034. struct cgs_device *amdgpu_cgs_create_device(struct amdgpu_device *adev)
  1035. {
  1036. struct amdgpu_cgs_device *cgs_device =
  1037. kmalloc(sizeof(*cgs_device), GFP_KERNEL);
  1038. if (!cgs_device) {
  1039. DRM_ERROR("Couldn't allocate CGS device structure\n");
  1040. return NULL;
  1041. }
  1042. cgs_device->base.ops = &amdgpu_cgs_ops;
  1043. cgs_device->base.os_ops = &amdgpu_cgs_os_ops;
  1044. cgs_device->adev = adev;
  1045. return (struct cgs_device *)cgs_device;
  1046. }
  1047. void amdgpu_cgs_destroy_device(struct cgs_device *cgs_device)
  1048. {
  1049. kfree(cgs_device);
  1050. }