amdgpu_cgs.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443
  1. /*
  2. * Copyright 2015 Advanced Micro Devices, Inc.
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice shall be included in
  12. * all copies or substantial portions of the Software.
  13. *
  14. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  17. * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18. * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19. * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20. * OTHER DEALINGS IN THE SOFTWARE.
  21. *
  22. *
  23. */
  24. #include <linux/pci.h>
  25. #include "amdgpu.h"
  26. #include "cgs_linux.h"
  27. struct amdgpu_cgs_device {
  28. struct cgs_device base;
  29. struct amdgpu_device *adev;
  30. };
  31. #define CGS_FUNC_ADEV \
  32. struct amdgpu_device *adev = \
  33. ((struct amdgpu_cgs_device *)cgs_device)->adev
  34. static int amdgpu_cgs_gpu_mem_info(void *cgs_device, enum cgs_gpu_mem_type type,
  35. uint64_t *mc_start, uint64_t *mc_size,
  36. uint64_t *mem_size)
  37. {
  38. return 0;
  39. }
  40. static int amdgpu_cgs_gmap_kmem(void *cgs_device, void *kmem,
  41. uint64_t size,
  42. uint64_t min_offset, uint64_t max_offset,
  43. cgs_handle_t *kmem_handle, uint64_t *mcaddr)
  44. {
  45. return 0;
  46. }
  47. static int amdgpu_cgs_gunmap_kmem(void *cgs_device, cgs_handle_t kmem_handle)
  48. {
  49. return 0;
  50. }
  51. static int amdgpu_cgs_alloc_gpu_mem(void *cgs_device,
  52. enum cgs_gpu_mem_type type,
  53. uint64_t size, uint64_t align,
  54. uint64_t min_offset, uint64_t max_offset,
  55. cgs_handle_t *handle)
  56. {
  57. return 0;
  58. }
  59. static int amdgpu_cgs_import_gpu_mem(void *cgs_device, int dmabuf_fd,
  60. cgs_handle_t *handle)
  61. {
  62. /* TODO */
  63. return 0;
  64. }
  65. static int amdgpu_cgs_free_gpu_mem(void *cgs_device, cgs_handle_t handle)
  66. {
  67. /* TODO */
  68. return 0;
  69. }
  70. static int amdgpu_cgs_gmap_gpu_mem(void *cgs_device, cgs_handle_t handle,
  71. uint64_t *mcaddr)
  72. {
  73. /* TODO */
  74. return 0;
  75. }
  76. static int amdgpu_cgs_gunmap_gpu_mem(void *cgs_device, cgs_handle_t handle)
  77. {
  78. /* TODO */
  79. return 0;
  80. }
  81. static int amdgpu_cgs_kmap_gpu_mem(void *cgs_device, cgs_handle_t handle,
  82. void **map)
  83. {
  84. /* TODO */
  85. return 0;
  86. }
  87. static int amdgpu_cgs_kunmap_gpu_mem(void *cgs_device, cgs_handle_t handle)
  88. {
  89. /* TODO */
  90. return 0;
  91. }
  92. static uint32_t amdgpu_cgs_read_register(void *cgs_device, unsigned offset)
  93. {
  94. CGS_FUNC_ADEV;
  95. return RREG32(offset);
  96. }
  97. static void amdgpu_cgs_write_register(void *cgs_device, unsigned offset,
  98. uint32_t value)
  99. {
  100. CGS_FUNC_ADEV;
  101. WREG32(offset, value);
  102. }
  103. static uint32_t amdgpu_cgs_read_ind_register(void *cgs_device,
  104. enum cgs_ind_reg space,
  105. unsigned index)
  106. {
  107. CGS_FUNC_ADEV;
  108. switch (space) {
  109. case CGS_IND_REG__MMIO:
  110. return RREG32_IDX(index);
  111. case CGS_IND_REG__PCIE:
  112. return RREG32_PCIE(index);
  113. case CGS_IND_REG__SMC:
  114. return RREG32_SMC(index);
  115. case CGS_IND_REG__UVD_CTX:
  116. return RREG32_UVD_CTX(index);
  117. case CGS_IND_REG__DIDT:
  118. return RREG32_DIDT(index);
  119. case CGS_IND_REG__AUDIO_ENDPT:
  120. DRM_ERROR("audio endpt register access not implemented.\n");
  121. return 0;
  122. }
  123. WARN(1, "Invalid indirect register space");
  124. return 0;
  125. }
  126. static void amdgpu_cgs_write_ind_register(void *cgs_device,
  127. enum cgs_ind_reg space,
  128. unsigned index, uint32_t value)
  129. {
  130. CGS_FUNC_ADEV;
  131. switch (space) {
  132. case CGS_IND_REG__MMIO:
  133. return WREG32_IDX(index, value);
  134. case CGS_IND_REG__PCIE:
  135. return WREG32_PCIE(index, value);
  136. case CGS_IND_REG__SMC:
  137. return WREG32_SMC(index, value);
  138. case CGS_IND_REG__UVD_CTX:
  139. return WREG32_UVD_CTX(index, value);
  140. case CGS_IND_REG__DIDT:
  141. return WREG32_DIDT(index, value);
  142. case CGS_IND_REG__AUDIO_ENDPT:
  143. DRM_ERROR("audio endpt register access not implemented.\n");
  144. return;
  145. }
  146. WARN(1, "Invalid indirect register space");
  147. }
  148. static uint8_t amdgpu_cgs_read_pci_config_byte(void *cgs_device, unsigned addr)
  149. {
  150. CGS_FUNC_ADEV;
  151. uint8_t val;
  152. int ret = pci_read_config_byte(adev->pdev, addr, &val);
  153. if (WARN(ret, "pci_read_config_byte error"))
  154. return 0;
  155. return val;
  156. }
  157. static uint16_t amdgpu_cgs_read_pci_config_word(void *cgs_device, unsigned addr)
  158. {
  159. CGS_FUNC_ADEV;
  160. uint16_t val;
  161. int ret = pci_read_config_word(adev->pdev, addr, &val);
  162. if (WARN(ret, "pci_read_config_word error"))
  163. return 0;
  164. return val;
  165. }
  166. static uint32_t amdgpu_cgs_read_pci_config_dword(void *cgs_device,
  167. unsigned addr)
  168. {
  169. CGS_FUNC_ADEV;
  170. uint32_t val;
  171. int ret = pci_read_config_dword(adev->pdev, addr, &val);
  172. if (WARN(ret, "pci_read_config_dword error"))
  173. return 0;
  174. return val;
  175. }
  176. static void amdgpu_cgs_write_pci_config_byte(void *cgs_device, unsigned addr,
  177. uint8_t value)
  178. {
  179. CGS_FUNC_ADEV;
  180. int ret = pci_write_config_byte(adev->pdev, addr, value);
  181. WARN(ret, "pci_write_config_byte error");
  182. }
  183. static void amdgpu_cgs_write_pci_config_word(void *cgs_device, unsigned addr,
  184. uint16_t value)
  185. {
  186. CGS_FUNC_ADEV;
  187. int ret = pci_write_config_word(adev->pdev, addr, value);
  188. WARN(ret, "pci_write_config_word error");
  189. }
  190. static void amdgpu_cgs_write_pci_config_dword(void *cgs_device, unsigned addr,
  191. uint32_t value)
  192. {
  193. CGS_FUNC_ADEV;
  194. int ret = pci_write_config_dword(adev->pdev, addr, value);
  195. WARN(ret, "pci_write_config_dword error");
  196. }
  197. static const void *amdgpu_cgs_atom_get_data_table(void *cgs_device,
  198. unsigned table, uint16_t *size,
  199. uint8_t *frev, uint8_t *crev)
  200. {
  201. /* TODO */
  202. return NULL;
  203. }
  204. static int amdgpu_cgs_atom_get_cmd_table_revs(void *cgs_device, unsigned table,
  205. uint8_t *frev, uint8_t *crev)
  206. {
  207. /* TODO */
  208. return 0;
  209. }
  210. static int amdgpu_cgs_atom_exec_cmd_table(void *cgs_device, unsigned table,
  211. void *args)
  212. {
  213. /* TODO */
  214. return 0;
  215. }
  216. static int amdgpu_cgs_create_pm_request(void *cgs_device, cgs_handle_t *request)
  217. {
  218. /* TODO */
  219. return 0;
  220. }
  221. static int amdgpu_cgs_destroy_pm_request(void *cgs_device, cgs_handle_t request)
  222. {
  223. /* TODO */
  224. return 0;
  225. }
  226. static int amdgpu_cgs_set_pm_request(void *cgs_device, cgs_handle_t request,
  227. int active)
  228. {
  229. /* TODO */
  230. return 0;
  231. }
  232. static int amdgpu_cgs_pm_request_clock(void *cgs_device, cgs_handle_t request,
  233. enum cgs_clock clock, unsigned freq)
  234. {
  235. /* TODO */
  236. return 0;
  237. }
  238. static int amdgpu_cgs_pm_request_engine(void *cgs_device, cgs_handle_t request,
  239. enum cgs_engine engine, int powered)
  240. {
  241. /* TODO */
  242. return 0;
  243. }
  244. static int amdgpu_cgs_pm_query_clock_limits(void *cgs_device,
  245. enum cgs_clock clock,
  246. struct cgs_clock_limits *limits)
  247. {
  248. /* TODO */
  249. return 0;
  250. }
  251. static int amdgpu_cgs_set_camera_voltages(void *cgs_device, uint32_t mask,
  252. const uint32_t *voltages)
  253. {
  254. DRM_ERROR("not implemented");
  255. return -EPERM;
  256. }
  257. struct cgs_irq_params {
  258. unsigned src_id;
  259. cgs_irq_source_set_func_t set;
  260. cgs_irq_handler_func_t handler;
  261. void *private_data;
  262. };
  263. static int cgs_set_irq_state(struct amdgpu_device *adev,
  264. struct amdgpu_irq_src *src,
  265. unsigned type,
  266. enum amdgpu_interrupt_state state)
  267. {
  268. struct cgs_irq_params *irq_params =
  269. (struct cgs_irq_params *)src->data;
  270. if (!irq_params)
  271. return -EINVAL;
  272. if (!irq_params->set)
  273. return -EINVAL;
  274. return irq_params->set(irq_params->private_data,
  275. irq_params->src_id,
  276. type,
  277. (int)state);
  278. }
  279. static int cgs_process_irq(struct amdgpu_device *adev,
  280. struct amdgpu_irq_src *source,
  281. struct amdgpu_iv_entry *entry)
  282. {
  283. struct cgs_irq_params *irq_params =
  284. (struct cgs_irq_params *)source->data;
  285. if (!irq_params)
  286. return -EINVAL;
  287. if (!irq_params->handler)
  288. return -EINVAL;
  289. return irq_params->handler(irq_params->private_data,
  290. irq_params->src_id,
  291. entry->iv_entry);
  292. }
  293. static const struct amdgpu_irq_src_funcs cgs_irq_funcs = {
  294. .set = cgs_set_irq_state,
  295. .process = cgs_process_irq,
  296. };
  297. static int amdgpu_cgs_add_irq_source(void *cgs_device, unsigned src_id,
  298. unsigned num_types,
  299. cgs_irq_source_set_func_t set,
  300. cgs_irq_handler_func_t handler,
  301. void *private_data)
  302. {
  303. CGS_FUNC_ADEV;
  304. int ret = 0;
  305. struct cgs_irq_params *irq_params;
  306. struct amdgpu_irq_src *source =
  307. kzalloc(sizeof(struct amdgpu_irq_src), GFP_KERNEL);
  308. if (!source)
  309. return -ENOMEM;
  310. irq_params =
  311. kzalloc(sizeof(struct cgs_irq_params), GFP_KERNEL);
  312. if (!irq_params) {
  313. kfree(source);
  314. return -ENOMEM;
  315. }
  316. source->num_types = num_types;
  317. source->funcs = &cgs_irq_funcs;
  318. irq_params->src_id = src_id;
  319. irq_params->set = set;
  320. irq_params->handler = handler;
  321. irq_params->private_data = private_data;
  322. source->data = (void *)irq_params;
  323. ret = amdgpu_irq_add_id(adev, src_id, source);
  324. if (ret) {
  325. kfree(irq_params);
  326. kfree(source);
  327. }
  328. return ret;
  329. }
  330. static int amdgpu_cgs_irq_get(void *cgs_device, unsigned src_id, unsigned type)
  331. {
  332. CGS_FUNC_ADEV;
  333. return amdgpu_irq_get(adev, adev->irq.sources[src_id], type);
  334. }
  335. static int amdgpu_cgs_irq_put(void *cgs_device, unsigned src_id, unsigned type)
  336. {
  337. CGS_FUNC_ADEV;
  338. return amdgpu_irq_put(adev, adev->irq.sources[src_id], type);
  339. }
  340. static const struct cgs_ops amdgpu_cgs_ops = {
  341. amdgpu_cgs_gpu_mem_info,
  342. amdgpu_cgs_gmap_kmem,
  343. amdgpu_cgs_gunmap_kmem,
  344. amdgpu_cgs_alloc_gpu_mem,
  345. amdgpu_cgs_free_gpu_mem,
  346. amdgpu_cgs_gmap_gpu_mem,
  347. amdgpu_cgs_gunmap_gpu_mem,
  348. amdgpu_cgs_kmap_gpu_mem,
  349. amdgpu_cgs_kunmap_gpu_mem,
  350. amdgpu_cgs_read_register,
  351. amdgpu_cgs_write_register,
  352. amdgpu_cgs_read_ind_register,
  353. amdgpu_cgs_write_ind_register,
  354. amdgpu_cgs_read_pci_config_byte,
  355. amdgpu_cgs_read_pci_config_word,
  356. amdgpu_cgs_read_pci_config_dword,
  357. amdgpu_cgs_write_pci_config_byte,
  358. amdgpu_cgs_write_pci_config_word,
  359. amdgpu_cgs_write_pci_config_dword,
  360. amdgpu_cgs_atom_get_data_table,
  361. amdgpu_cgs_atom_get_cmd_table_revs,
  362. amdgpu_cgs_atom_exec_cmd_table,
  363. amdgpu_cgs_create_pm_request,
  364. amdgpu_cgs_destroy_pm_request,
  365. amdgpu_cgs_set_pm_request,
  366. amdgpu_cgs_pm_request_clock,
  367. amdgpu_cgs_pm_request_engine,
  368. amdgpu_cgs_pm_query_clock_limits,
  369. amdgpu_cgs_set_camera_voltages
  370. };
  371. static const struct cgs_os_ops amdgpu_cgs_os_ops = {
  372. amdgpu_cgs_import_gpu_mem,
  373. amdgpu_cgs_add_irq_source,
  374. amdgpu_cgs_irq_get,
  375. amdgpu_cgs_irq_put
  376. };
  377. void *amdgpu_cgs_create_device(struct amdgpu_device *adev)
  378. {
  379. struct amdgpu_cgs_device *cgs_device =
  380. kmalloc(sizeof(*cgs_device), GFP_KERNEL);
  381. if (!cgs_device) {
  382. DRM_ERROR("Couldn't allocate CGS device structure\n");
  383. return NULL;
  384. }
  385. cgs_device->base.ops = &amdgpu_cgs_ops;
  386. cgs_device->base.os_ops = &amdgpu_cgs_os_ops;
  387. cgs_device->adev = adev;
  388. return cgs_device;
  389. }
  390. void amdgpu_cgs_destroy_device(void *cgs_device)
  391. {
  392. kfree(cgs_device);
  393. }