amdgpu.h 58 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873
  1. /*
  2. * Copyright 2008 Advanced Micro Devices, Inc.
  3. * Copyright 2008 Red Hat Inc.
  4. * Copyright 2009 Jerome Glisse.
  5. *
  6. * Permission is hereby granted, free of charge, to any person obtaining a
  7. * copy of this software and associated documentation files (the "Software"),
  8. * to deal in the Software without restriction, including without limitation
  9. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  10. * and/or sell copies of the Software, and to permit persons to whom the
  11. * Software is furnished to do so, subject to the following conditions:
  12. *
  13. * The above copyright notice and this permission notice shall be included in
  14. * all copies or substantial portions of the Software.
  15. *
  16. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  17. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  18. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  19. * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  20. * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  21. * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  22. * OTHER DEALINGS IN THE SOFTWARE.
  23. *
  24. * Authors: Dave Airlie
  25. * Alex Deucher
  26. * Jerome Glisse
  27. */
  28. #ifndef __AMDGPU_H__
  29. #define __AMDGPU_H__
  30. #include <linux/atomic.h>
  31. #include <linux/wait.h>
  32. #include <linux/list.h>
  33. #include <linux/kref.h>
  34. #include <linux/rbtree.h>
  35. #include <linux/hashtable.h>
  36. #include <linux/dma-fence.h>
  37. #include <drm/ttm/ttm_bo_api.h>
  38. #include <drm/ttm/ttm_bo_driver.h>
  39. #include <drm/ttm/ttm_placement.h>
  40. #include <drm/ttm/ttm_module.h>
  41. #include <drm/ttm/ttm_execbuf_util.h>
  42. #include <drm/drmP.h>
  43. #include <drm/drm_gem.h>
  44. #include <drm/amdgpu_drm.h>
  45. #include <kgd_kfd_interface.h>
  46. #include "amd_shared.h"
  47. #include "amdgpu_mode.h"
  48. #include "amdgpu_ih.h"
  49. #include "amdgpu_irq.h"
  50. #include "amdgpu_ucode.h"
  51. #include "amdgpu_ttm.h"
  52. #include "amdgpu_psp.h"
  53. #include "amdgpu_gds.h"
  54. #include "amdgpu_sync.h"
  55. #include "amdgpu_ring.h"
  56. #include "amdgpu_vm.h"
  57. #include "amd_powerplay.h"
  58. #include "amdgpu_dpm.h"
  59. #include "amdgpu_acp.h"
  60. #include "amdgpu_uvd.h"
  61. #include "amdgpu_vce.h"
  62. #include "amdgpu_vcn.h"
  63. #include "amdgpu_mn.h"
  64. #include "gpu_scheduler.h"
  65. #include "amdgpu_virt.h"
  66. #include "amdgpu_gart.h"
  67. /*
  68. * Modules parameters.
  69. */
  70. extern int amdgpu_modeset;
  71. extern int amdgpu_vram_limit;
  72. extern int amdgpu_vis_vram_limit;
  73. extern int amdgpu_gart_size;
  74. extern int amdgpu_gtt_size;
  75. extern int amdgpu_moverate;
  76. extern int amdgpu_benchmarking;
  77. extern int amdgpu_testing;
  78. extern int amdgpu_audio;
  79. extern int amdgpu_disp_priority;
  80. extern int amdgpu_hw_i2c;
  81. extern int amdgpu_pcie_gen2;
  82. extern int amdgpu_msi;
  83. extern int amdgpu_lockup_timeout;
  84. extern int amdgpu_dpm;
  85. extern int amdgpu_fw_load_type;
  86. extern int amdgpu_aspm;
  87. extern int amdgpu_runtime_pm;
  88. extern unsigned amdgpu_ip_block_mask;
  89. extern int amdgpu_bapm;
  90. extern int amdgpu_deep_color;
  91. extern int amdgpu_vm_size;
  92. extern int amdgpu_vm_block_size;
  93. extern int amdgpu_vm_fragment_size;
  94. extern int amdgpu_vm_fault_stop;
  95. extern int amdgpu_vm_debug;
  96. extern int amdgpu_vm_update_mode;
  97. extern int amdgpu_sched_jobs;
  98. extern int amdgpu_sched_hw_submission;
  99. extern int amdgpu_no_evict;
  100. extern int amdgpu_direct_gma_size;
  101. extern unsigned amdgpu_pcie_gen_cap;
  102. extern unsigned amdgpu_pcie_lane_cap;
  103. extern unsigned amdgpu_cg_mask;
  104. extern unsigned amdgpu_pg_mask;
  105. extern unsigned amdgpu_sdma_phase_quantum;
  106. extern char *amdgpu_disable_cu;
  107. extern char *amdgpu_virtual_display;
  108. extern unsigned amdgpu_pp_feature_mask;
  109. extern int amdgpu_vram_page_split;
  110. extern int amdgpu_ngg;
  111. extern int amdgpu_prim_buf_per_se;
  112. extern int amdgpu_pos_buf_per_se;
  113. extern int amdgpu_cntl_sb_buf_per_se;
  114. extern int amdgpu_param_buf_per_se;
  115. extern int amdgpu_job_hang_limit;
  116. extern int amdgpu_lbpw;
  117. #ifdef CONFIG_DRM_AMDGPU_SI
  118. extern int amdgpu_si_support;
  119. #endif
  120. #ifdef CONFIG_DRM_AMDGPU_CIK
  121. extern int amdgpu_cik_support;
  122. #endif
  123. #define AMDGPU_DEFAULT_GTT_SIZE_MB 3072ULL /* 3GB by default */
  124. #define AMDGPU_WAIT_IDLE_TIMEOUT_IN_MS 3000
  125. #define AMDGPU_MAX_USEC_TIMEOUT 100000 /* 100 ms */
  126. #define AMDGPU_FENCE_JIFFIES_TIMEOUT (HZ / 2)
  127. /* AMDGPU_IB_POOL_SIZE must be a power of 2 */
  128. #define AMDGPU_IB_POOL_SIZE 16
  129. #define AMDGPU_DEBUGFS_MAX_COMPONENTS 32
  130. #define AMDGPUFB_CONN_LIMIT 4
  131. #define AMDGPU_BIOS_NUM_SCRATCH 16
  132. /* max number of IP instances */
  133. #define AMDGPU_MAX_SDMA_INSTANCES 2
  134. /* hard reset data */
  135. #define AMDGPU_ASIC_RESET_DATA 0x39d5e86b
  136. /* reset flags */
  137. #define AMDGPU_RESET_GFX (1 << 0)
  138. #define AMDGPU_RESET_COMPUTE (1 << 1)
  139. #define AMDGPU_RESET_DMA (1 << 2)
  140. #define AMDGPU_RESET_CP (1 << 3)
  141. #define AMDGPU_RESET_GRBM (1 << 4)
  142. #define AMDGPU_RESET_DMA1 (1 << 5)
  143. #define AMDGPU_RESET_RLC (1 << 6)
  144. #define AMDGPU_RESET_SEM (1 << 7)
  145. #define AMDGPU_RESET_IH (1 << 8)
  146. #define AMDGPU_RESET_VMC (1 << 9)
  147. #define AMDGPU_RESET_MC (1 << 10)
  148. #define AMDGPU_RESET_DISPLAY (1 << 11)
  149. #define AMDGPU_RESET_UVD (1 << 12)
  150. #define AMDGPU_RESET_VCE (1 << 13)
  151. #define AMDGPU_RESET_VCE1 (1 << 14)
  152. /* GFX current status */
  153. #define AMDGPU_GFX_NORMAL_MODE 0x00000000L
  154. #define AMDGPU_GFX_SAFE_MODE 0x00000001L
  155. #define AMDGPU_GFX_PG_DISABLED_MODE 0x00000002L
  156. #define AMDGPU_GFX_CG_DISABLED_MODE 0x00000004L
  157. #define AMDGPU_GFX_LBPW_DISABLED_MODE 0x00000008L
  158. /* max cursor sizes (in pixels) */
  159. #define CIK_CURSOR_WIDTH 128
  160. #define CIK_CURSOR_HEIGHT 128
  161. struct amdgpu_device;
  162. struct amdgpu_ib;
  163. struct amdgpu_cs_parser;
  164. struct amdgpu_job;
  165. struct amdgpu_irq_src;
  166. struct amdgpu_fpriv;
  167. struct amdgpu_bo_va_mapping;
  168. enum amdgpu_cp_irq {
  169. AMDGPU_CP_IRQ_GFX_EOP = 0,
  170. AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP,
  171. AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE1_EOP,
  172. AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE2_EOP,
  173. AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE3_EOP,
  174. AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE0_EOP,
  175. AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE1_EOP,
  176. AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE2_EOP,
  177. AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE3_EOP,
  178. AMDGPU_CP_IRQ_LAST
  179. };
  180. enum amdgpu_sdma_irq {
  181. AMDGPU_SDMA_IRQ_TRAP0 = 0,
  182. AMDGPU_SDMA_IRQ_TRAP1,
  183. AMDGPU_SDMA_IRQ_LAST
  184. };
  185. enum amdgpu_thermal_irq {
  186. AMDGPU_THERMAL_IRQ_LOW_TO_HIGH = 0,
  187. AMDGPU_THERMAL_IRQ_HIGH_TO_LOW,
  188. AMDGPU_THERMAL_IRQ_LAST
  189. };
  190. enum amdgpu_kiq_irq {
  191. AMDGPU_CP_KIQ_IRQ_DRIVER0 = 0,
  192. AMDGPU_CP_KIQ_IRQ_LAST
  193. };
  194. int amdgpu_set_clockgating_state(struct amdgpu_device *adev,
  195. enum amd_ip_block_type block_type,
  196. enum amd_clockgating_state state);
  197. int amdgpu_set_powergating_state(struct amdgpu_device *adev,
  198. enum amd_ip_block_type block_type,
  199. enum amd_powergating_state state);
  200. void amdgpu_get_clockgating_state(struct amdgpu_device *adev, u32 *flags);
  201. int amdgpu_wait_for_idle(struct amdgpu_device *adev,
  202. enum amd_ip_block_type block_type);
  203. bool amdgpu_is_idle(struct amdgpu_device *adev,
  204. enum amd_ip_block_type block_type);
  205. #define AMDGPU_MAX_IP_NUM 16
  206. struct amdgpu_ip_block_status {
  207. bool valid;
  208. bool sw;
  209. bool hw;
  210. bool late_initialized;
  211. bool hang;
  212. };
  213. struct amdgpu_ip_block_version {
  214. const enum amd_ip_block_type type;
  215. const u32 major;
  216. const u32 minor;
  217. const u32 rev;
  218. const struct amd_ip_funcs *funcs;
  219. };
  220. struct amdgpu_ip_block {
  221. struct amdgpu_ip_block_status status;
  222. const struct amdgpu_ip_block_version *version;
  223. };
  224. int amdgpu_ip_block_version_cmp(struct amdgpu_device *adev,
  225. enum amd_ip_block_type type,
  226. u32 major, u32 minor);
  227. struct amdgpu_ip_block * amdgpu_get_ip_block(struct amdgpu_device *adev,
  228. enum amd_ip_block_type type);
  229. int amdgpu_ip_block_add(struct amdgpu_device *adev,
  230. const struct amdgpu_ip_block_version *ip_block_version);
  231. /* provided by hw blocks that can move/clear data. e.g., gfx or sdma */
  232. struct amdgpu_buffer_funcs {
  233. /* maximum bytes in a single operation */
  234. uint32_t copy_max_bytes;
  235. /* number of dw to reserve per operation */
  236. unsigned copy_num_dw;
  237. /* used for buffer migration */
  238. void (*emit_copy_buffer)(struct amdgpu_ib *ib,
  239. /* src addr in bytes */
  240. uint64_t src_offset,
  241. /* dst addr in bytes */
  242. uint64_t dst_offset,
  243. /* number of byte to transfer */
  244. uint32_t byte_count);
  245. /* maximum bytes in a single operation */
  246. uint32_t fill_max_bytes;
  247. /* number of dw to reserve per operation */
  248. unsigned fill_num_dw;
  249. /* used for buffer clearing */
  250. void (*emit_fill_buffer)(struct amdgpu_ib *ib,
  251. /* value to write to memory */
  252. uint32_t src_data,
  253. /* dst addr in bytes */
  254. uint64_t dst_offset,
  255. /* number of byte to fill */
  256. uint32_t byte_count);
  257. };
  258. /* provided by hw blocks that can write ptes, e.g., sdma */
  259. struct amdgpu_vm_pte_funcs {
  260. /* copy pte entries from GART */
  261. void (*copy_pte)(struct amdgpu_ib *ib,
  262. uint64_t pe, uint64_t src,
  263. unsigned count);
  264. /* write pte one entry at a time with addr mapping */
  265. void (*write_pte)(struct amdgpu_ib *ib, uint64_t pe,
  266. uint64_t value, unsigned count,
  267. uint32_t incr);
  268. /* for linear pte/pde updates without addr mapping */
  269. void (*set_pte_pde)(struct amdgpu_ib *ib,
  270. uint64_t pe,
  271. uint64_t addr, unsigned count,
  272. uint32_t incr, uint64_t flags);
  273. };
  274. /* provided by the gmc block */
  275. struct amdgpu_gart_funcs {
  276. /* flush the vm tlb via mmio */
  277. void (*flush_gpu_tlb)(struct amdgpu_device *adev,
  278. uint32_t vmid);
  279. /* write pte/pde updates using the cpu */
  280. int (*set_pte_pde)(struct amdgpu_device *adev,
  281. void *cpu_pt_addr, /* cpu addr of page table */
  282. uint32_t gpu_page_idx, /* pte/pde to update */
  283. uint64_t addr, /* addr to write into pte/pde */
  284. uint64_t flags); /* access flags */
  285. /* enable/disable PRT support */
  286. void (*set_prt)(struct amdgpu_device *adev, bool enable);
  287. /* set pte flags based per asic */
  288. uint64_t (*get_vm_pte_flags)(struct amdgpu_device *adev,
  289. uint32_t flags);
  290. /* get the pde for a given mc addr */
  291. u64 (*get_vm_pde)(struct amdgpu_device *adev, u64 addr);
  292. uint32_t (*get_invalidate_req)(unsigned int vm_id);
  293. };
  294. /* provided by the ih block */
  295. struct amdgpu_ih_funcs {
  296. /* ring read/write ptr handling, called from interrupt context */
  297. u32 (*get_wptr)(struct amdgpu_device *adev);
  298. bool (*prescreen_iv)(struct amdgpu_device *adev);
  299. void (*decode_iv)(struct amdgpu_device *adev,
  300. struct amdgpu_iv_entry *entry);
  301. void (*set_rptr)(struct amdgpu_device *adev);
  302. };
  303. /*
  304. * BIOS.
  305. */
  306. bool amdgpu_get_bios(struct amdgpu_device *adev);
  307. bool amdgpu_read_bios(struct amdgpu_device *adev);
  308. /*
  309. * Dummy page
  310. */
  311. struct amdgpu_dummy_page {
  312. struct page *page;
  313. dma_addr_t addr;
  314. };
  315. int amdgpu_dummy_page_init(struct amdgpu_device *adev);
  316. void amdgpu_dummy_page_fini(struct amdgpu_device *adev);
  317. /*
  318. * Clocks
  319. */
  320. #define AMDGPU_MAX_PPLL 3
  321. struct amdgpu_clock {
  322. struct amdgpu_pll ppll[AMDGPU_MAX_PPLL];
  323. struct amdgpu_pll spll;
  324. struct amdgpu_pll mpll;
  325. /* 10 Khz units */
  326. uint32_t default_mclk;
  327. uint32_t default_sclk;
  328. uint32_t default_dispclk;
  329. uint32_t current_dispclk;
  330. uint32_t dp_extclk;
  331. uint32_t max_pixel_clock;
  332. };
  333. /*
  334. * GEM.
  335. */
  336. #define AMDGPU_GEM_DOMAIN_MAX 0x3
  337. #define gem_to_amdgpu_bo(gobj) container_of((gobj), struct amdgpu_bo, gem_base)
  338. void amdgpu_gem_object_free(struct drm_gem_object *obj);
  339. int amdgpu_gem_object_open(struct drm_gem_object *obj,
  340. struct drm_file *file_priv);
  341. void amdgpu_gem_object_close(struct drm_gem_object *obj,
  342. struct drm_file *file_priv);
  343. unsigned long amdgpu_gem_timeout(uint64_t timeout_ns);
  344. struct sg_table *amdgpu_gem_prime_get_sg_table(struct drm_gem_object *obj);
  345. struct drm_gem_object *
  346. amdgpu_gem_prime_import_sg_table(struct drm_device *dev,
  347. struct dma_buf_attachment *attach,
  348. struct sg_table *sg);
  349. struct dma_buf *amdgpu_gem_prime_export(struct drm_device *dev,
  350. struct drm_gem_object *gobj,
  351. int flags);
  352. int amdgpu_gem_prime_pin(struct drm_gem_object *obj);
  353. void amdgpu_gem_prime_unpin(struct drm_gem_object *obj);
  354. struct reservation_object *amdgpu_gem_prime_res_obj(struct drm_gem_object *);
  355. void *amdgpu_gem_prime_vmap(struct drm_gem_object *obj);
  356. void amdgpu_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr);
  357. int amdgpu_gem_debugfs_init(struct amdgpu_device *adev);
  358. /* sub-allocation manager, it has to be protected by another lock.
  359. * By conception this is an helper for other part of the driver
  360. * like the indirect buffer or semaphore, which both have their
  361. * locking.
  362. *
  363. * Principe is simple, we keep a list of sub allocation in offset
  364. * order (first entry has offset == 0, last entry has the highest
  365. * offset).
  366. *
  367. * When allocating new object we first check if there is room at
  368. * the end total_size - (last_object_offset + last_object_size) >=
  369. * alloc_size. If so we allocate new object there.
  370. *
  371. * When there is not enough room at the end, we start waiting for
  372. * each sub object until we reach object_offset+object_size >=
  373. * alloc_size, this object then become the sub object we return.
  374. *
  375. * Alignment can't be bigger than page size.
  376. *
  377. * Hole are not considered for allocation to keep things simple.
  378. * Assumption is that there won't be hole (all object on same
  379. * alignment).
  380. */
  381. #define AMDGPU_SA_NUM_FENCE_LISTS 32
  382. struct amdgpu_sa_manager {
  383. wait_queue_head_t wq;
  384. struct amdgpu_bo *bo;
  385. struct list_head *hole;
  386. struct list_head flist[AMDGPU_SA_NUM_FENCE_LISTS];
  387. struct list_head olist;
  388. unsigned size;
  389. uint64_t gpu_addr;
  390. void *cpu_ptr;
  391. uint32_t domain;
  392. uint32_t align;
  393. };
  394. /* sub-allocation buffer */
  395. struct amdgpu_sa_bo {
  396. struct list_head olist;
  397. struct list_head flist;
  398. struct amdgpu_sa_manager *manager;
  399. unsigned soffset;
  400. unsigned eoffset;
  401. struct dma_fence *fence;
  402. };
  403. /*
  404. * GEM objects.
  405. */
  406. void amdgpu_gem_force_release(struct amdgpu_device *adev);
  407. int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size,
  408. int alignment, u32 initial_domain,
  409. u64 flags, bool kernel,
  410. struct reservation_object *resv,
  411. struct drm_gem_object **obj);
  412. int amdgpu_mode_dumb_create(struct drm_file *file_priv,
  413. struct drm_device *dev,
  414. struct drm_mode_create_dumb *args);
  415. int amdgpu_mode_dumb_mmap(struct drm_file *filp,
  416. struct drm_device *dev,
  417. uint32_t handle, uint64_t *offset_p);
  418. int amdgpu_fence_slab_init(void);
  419. void amdgpu_fence_slab_fini(void);
  420. /*
  421. * VMHUB structures, functions & helpers
  422. */
  423. struct amdgpu_vmhub {
  424. uint32_t ctx0_ptb_addr_lo32;
  425. uint32_t ctx0_ptb_addr_hi32;
  426. uint32_t vm_inv_eng0_req;
  427. uint32_t vm_inv_eng0_ack;
  428. uint32_t vm_context0_cntl;
  429. uint32_t vm_l2_pro_fault_status;
  430. uint32_t vm_l2_pro_fault_cntl;
  431. };
  432. /*
  433. * GPU MC structures, functions & helpers
  434. */
  435. struct amdgpu_mc {
  436. resource_size_t aper_size;
  437. resource_size_t aper_base;
  438. resource_size_t agp_base;
  439. /* for some chips with <= 32MB we need to lie
  440. * about vram size near mc fb location */
  441. u64 mc_vram_size;
  442. u64 visible_vram_size;
  443. u64 gart_size;
  444. u64 gart_start;
  445. u64 gart_end;
  446. u64 vram_start;
  447. u64 vram_end;
  448. unsigned vram_width;
  449. u64 real_vram_size;
  450. int vram_mtrr;
  451. u64 mc_mask;
  452. const struct firmware *fw; /* MC firmware */
  453. uint32_t fw_version;
  454. struct amdgpu_irq_src vm_fault;
  455. uint32_t vram_type;
  456. uint32_t srbm_soft_reset;
  457. bool prt_warning;
  458. uint64_t stolen_size;
  459. /* apertures */
  460. u64 shared_aperture_start;
  461. u64 shared_aperture_end;
  462. u64 private_aperture_start;
  463. u64 private_aperture_end;
  464. /* protects concurrent invalidation */
  465. spinlock_t invalidate_lock;
  466. };
  467. /*
  468. * GPU doorbell structures, functions & helpers
  469. */
  470. typedef enum _AMDGPU_DOORBELL_ASSIGNMENT
  471. {
  472. AMDGPU_DOORBELL_KIQ = 0x000,
  473. AMDGPU_DOORBELL_HIQ = 0x001,
  474. AMDGPU_DOORBELL_DIQ = 0x002,
  475. AMDGPU_DOORBELL_MEC_RING0 = 0x010,
  476. AMDGPU_DOORBELL_MEC_RING1 = 0x011,
  477. AMDGPU_DOORBELL_MEC_RING2 = 0x012,
  478. AMDGPU_DOORBELL_MEC_RING3 = 0x013,
  479. AMDGPU_DOORBELL_MEC_RING4 = 0x014,
  480. AMDGPU_DOORBELL_MEC_RING5 = 0x015,
  481. AMDGPU_DOORBELL_MEC_RING6 = 0x016,
  482. AMDGPU_DOORBELL_MEC_RING7 = 0x017,
  483. AMDGPU_DOORBELL_GFX_RING0 = 0x020,
  484. AMDGPU_DOORBELL_sDMA_ENGINE0 = 0x1E0,
  485. AMDGPU_DOORBELL_sDMA_ENGINE1 = 0x1E1,
  486. AMDGPU_DOORBELL_IH = 0x1E8,
  487. AMDGPU_DOORBELL_MAX_ASSIGNMENT = 0x3FF,
  488. AMDGPU_DOORBELL_INVALID = 0xFFFF
  489. } AMDGPU_DOORBELL_ASSIGNMENT;
  490. struct amdgpu_doorbell {
  491. /* doorbell mmio */
  492. resource_size_t base;
  493. resource_size_t size;
  494. u32 __iomem *ptr;
  495. u32 num_doorbells; /* Number of doorbells actually reserved for amdgpu. */
  496. };
  497. /*
  498. * 64bit doorbell, offset are in QWORD, occupy 2KB doorbell space
  499. */
  500. typedef enum _AMDGPU_DOORBELL64_ASSIGNMENT
  501. {
  502. /*
  503. * All compute related doorbells: kiq, hiq, diq, traditional compute queue, user queue, should locate in
  504. * a continues range so that programming CP_MEC_DOORBELL_RANGE_LOWER/UPPER can cover this range.
  505. * Compute related doorbells are allocated from 0x00 to 0x8a
  506. */
  507. /* kernel scheduling */
  508. AMDGPU_DOORBELL64_KIQ = 0x00,
  509. /* HSA interface queue and debug queue */
  510. AMDGPU_DOORBELL64_HIQ = 0x01,
  511. AMDGPU_DOORBELL64_DIQ = 0x02,
  512. /* Compute engines */
  513. AMDGPU_DOORBELL64_MEC_RING0 = 0x03,
  514. AMDGPU_DOORBELL64_MEC_RING1 = 0x04,
  515. AMDGPU_DOORBELL64_MEC_RING2 = 0x05,
  516. AMDGPU_DOORBELL64_MEC_RING3 = 0x06,
  517. AMDGPU_DOORBELL64_MEC_RING4 = 0x07,
  518. AMDGPU_DOORBELL64_MEC_RING5 = 0x08,
  519. AMDGPU_DOORBELL64_MEC_RING6 = 0x09,
  520. AMDGPU_DOORBELL64_MEC_RING7 = 0x0a,
  521. /* User queue doorbell range (128 doorbells) */
  522. AMDGPU_DOORBELL64_USERQUEUE_START = 0x0b,
  523. AMDGPU_DOORBELL64_USERQUEUE_END = 0x8a,
  524. /* Graphics engine */
  525. AMDGPU_DOORBELL64_GFX_RING0 = 0x8b,
  526. /*
  527. * Other graphics doorbells can be allocated here: from 0x8c to 0xef
  528. * Graphics voltage island aperture 1
  529. * default non-graphics QWORD index is 0xF0 - 0xFF inclusive
  530. */
  531. /* sDMA engines */
  532. AMDGPU_DOORBELL64_sDMA_ENGINE0 = 0xF0,
  533. AMDGPU_DOORBELL64_sDMA_HI_PRI_ENGINE0 = 0xF1,
  534. AMDGPU_DOORBELL64_sDMA_ENGINE1 = 0xF2,
  535. AMDGPU_DOORBELL64_sDMA_HI_PRI_ENGINE1 = 0xF3,
  536. /* Interrupt handler */
  537. AMDGPU_DOORBELL64_IH = 0xF4, /* For legacy interrupt ring buffer */
  538. AMDGPU_DOORBELL64_IH_RING1 = 0xF5, /* For page migration request log */
  539. AMDGPU_DOORBELL64_IH_RING2 = 0xF6, /* For page migration translation/invalidation log */
  540. /* VCN engine use 32 bits doorbell */
  541. AMDGPU_DOORBELL64_VCN0_1 = 0xF8, /* lower 32 bits for VNC0 and upper 32 bits for VNC1 */
  542. AMDGPU_DOORBELL64_VCN2_3 = 0xF9,
  543. AMDGPU_DOORBELL64_VCN4_5 = 0xFA,
  544. AMDGPU_DOORBELL64_VCN6_7 = 0xFB,
  545. /* overlap the doorbell assignment with VCN as they are mutually exclusive
  546. * VCE engine's doorbell is 32 bit and two VCE ring share one QWORD
  547. */
  548. AMDGPU_DOORBELL64_UVD_RING0_1 = 0xF8,
  549. AMDGPU_DOORBELL64_UVD_RING2_3 = 0xF9,
  550. AMDGPU_DOORBELL64_UVD_RING4_5 = 0xFA,
  551. AMDGPU_DOORBELL64_UVD_RING6_7 = 0xFB,
  552. AMDGPU_DOORBELL64_VCE_RING0_1 = 0xFC,
  553. AMDGPU_DOORBELL64_VCE_RING2_3 = 0xFD,
  554. AMDGPU_DOORBELL64_VCE_RING4_5 = 0xFE,
  555. AMDGPU_DOORBELL64_VCE_RING6_7 = 0xFF,
  556. AMDGPU_DOORBELL64_MAX_ASSIGNMENT = 0xFF,
  557. AMDGPU_DOORBELL64_INVALID = 0xFFFF
  558. } AMDGPU_DOORBELL64_ASSIGNMENT;
  559. void amdgpu_doorbell_get_kfd_info(struct amdgpu_device *adev,
  560. phys_addr_t *aperture_base,
  561. size_t *aperture_size,
  562. size_t *start_offset);
  563. /*
  564. * IRQS.
  565. */
  566. struct amdgpu_flip_work {
  567. struct delayed_work flip_work;
  568. struct work_struct unpin_work;
  569. struct amdgpu_device *adev;
  570. int crtc_id;
  571. u32 target_vblank;
  572. uint64_t base;
  573. struct drm_pending_vblank_event *event;
  574. struct amdgpu_bo *old_abo;
  575. struct dma_fence *excl;
  576. unsigned shared_count;
  577. struct dma_fence **shared;
  578. struct dma_fence_cb cb;
  579. bool async;
  580. };
  581. /*
  582. * CP & rings.
  583. */
  584. struct amdgpu_ib {
  585. struct amdgpu_sa_bo *sa_bo;
  586. uint32_t length_dw;
  587. uint64_t gpu_addr;
  588. uint32_t *ptr;
  589. uint32_t flags;
  590. };
  591. extern const struct amd_sched_backend_ops amdgpu_sched_ops;
  592. int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs,
  593. struct amdgpu_job **job, struct amdgpu_vm *vm);
  594. int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, unsigned size,
  595. struct amdgpu_job **job);
  596. void amdgpu_job_free_resources(struct amdgpu_job *job);
  597. void amdgpu_job_free(struct amdgpu_job *job);
  598. int amdgpu_job_submit(struct amdgpu_job *job, struct amdgpu_ring *ring,
  599. struct amd_sched_entity *entity, void *owner,
  600. struct dma_fence **f);
  601. /*
  602. * Queue manager
  603. */
  604. struct amdgpu_queue_mapper {
  605. int hw_ip;
  606. struct mutex lock;
  607. /* protected by lock */
  608. struct amdgpu_ring *queue_map[AMDGPU_MAX_RINGS];
  609. };
  610. struct amdgpu_queue_mgr {
  611. struct amdgpu_queue_mapper mapper[AMDGPU_MAX_IP_NUM];
  612. };
  613. int amdgpu_queue_mgr_init(struct amdgpu_device *adev,
  614. struct amdgpu_queue_mgr *mgr);
  615. int amdgpu_queue_mgr_fini(struct amdgpu_device *adev,
  616. struct amdgpu_queue_mgr *mgr);
  617. int amdgpu_queue_mgr_map(struct amdgpu_device *adev,
  618. struct amdgpu_queue_mgr *mgr,
  619. int hw_ip, int instance, int ring,
  620. struct amdgpu_ring **out_ring);
  621. /*
  622. * context related structures
  623. */
  624. struct amdgpu_ctx_ring {
  625. uint64_t sequence;
  626. struct dma_fence **fences;
  627. struct amd_sched_entity entity;
  628. };
  629. struct amdgpu_ctx {
  630. struct kref refcount;
  631. struct amdgpu_device *adev;
  632. struct amdgpu_queue_mgr queue_mgr;
  633. unsigned reset_counter;
  634. spinlock_t ring_lock;
  635. struct dma_fence **fences;
  636. struct amdgpu_ctx_ring rings[AMDGPU_MAX_RINGS];
  637. bool preamble_presented;
  638. };
  639. struct amdgpu_ctx_mgr {
  640. struct amdgpu_device *adev;
  641. struct mutex lock;
  642. /* protected by lock */
  643. struct idr ctx_handles;
  644. };
  645. struct amdgpu_ctx *amdgpu_ctx_get(struct amdgpu_fpriv *fpriv, uint32_t id);
  646. int amdgpu_ctx_put(struct amdgpu_ctx *ctx);
  647. uint64_t amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, struct amdgpu_ring *ring,
  648. struct dma_fence *fence);
  649. struct dma_fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx,
  650. struct amdgpu_ring *ring, uint64_t seq);
  651. int amdgpu_ctx_ioctl(struct drm_device *dev, void *data,
  652. struct drm_file *filp);
  653. void amdgpu_ctx_mgr_init(struct amdgpu_ctx_mgr *mgr);
  654. void amdgpu_ctx_mgr_fini(struct amdgpu_ctx_mgr *mgr);
  655. /*
  656. * file private structure
  657. */
  658. struct amdgpu_fpriv {
  659. struct amdgpu_vm vm;
  660. struct amdgpu_bo_va *prt_va;
  661. struct amdgpu_bo_va *csa_va;
  662. struct mutex bo_list_lock;
  663. struct idr bo_list_handles;
  664. struct amdgpu_ctx_mgr ctx_mgr;
  665. u32 vram_lost_counter;
  666. };
  667. /*
  668. * residency list
  669. */
  670. struct amdgpu_bo_list_entry {
  671. struct amdgpu_bo *robj;
  672. struct ttm_validate_buffer tv;
  673. struct amdgpu_bo_va *bo_va;
  674. uint32_t priority;
  675. struct page **user_pages;
  676. int user_invalidated;
  677. };
  678. struct amdgpu_bo_list {
  679. struct mutex lock;
  680. struct rcu_head rhead;
  681. struct kref refcount;
  682. struct amdgpu_bo *gds_obj;
  683. struct amdgpu_bo *gws_obj;
  684. struct amdgpu_bo *oa_obj;
  685. unsigned first_userptr;
  686. unsigned num_entries;
  687. struct amdgpu_bo_list_entry *array;
  688. };
  689. struct amdgpu_bo_list *
  690. amdgpu_bo_list_get(struct amdgpu_fpriv *fpriv, int id);
  691. void amdgpu_bo_list_get_list(struct amdgpu_bo_list *list,
  692. struct list_head *validated);
  693. void amdgpu_bo_list_put(struct amdgpu_bo_list *list);
  694. void amdgpu_bo_list_free(struct amdgpu_bo_list *list);
  695. /*
  696. * GFX stuff
  697. */
  698. #include "clearstate_defs.h"
  699. struct amdgpu_rlc_funcs {
  700. void (*enter_safe_mode)(struct amdgpu_device *adev);
  701. void (*exit_safe_mode)(struct amdgpu_device *adev);
  702. };
  703. struct amdgpu_rlc {
  704. /* for power gating */
  705. struct amdgpu_bo *save_restore_obj;
  706. uint64_t save_restore_gpu_addr;
  707. volatile uint32_t *sr_ptr;
  708. const u32 *reg_list;
  709. u32 reg_list_size;
  710. /* for clear state */
  711. struct amdgpu_bo *clear_state_obj;
  712. uint64_t clear_state_gpu_addr;
  713. volatile uint32_t *cs_ptr;
  714. const struct cs_section_def *cs_data;
  715. u32 clear_state_size;
  716. /* for cp tables */
  717. struct amdgpu_bo *cp_table_obj;
  718. uint64_t cp_table_gpu_addr;
  719. volatile uint32_t *cp_table_ptr;
  720. u32 cp_table_size;
  721. /* safe mode for updating CG/PG state */
  722. bool in_safe_mode;
  723. const struct amdgpu_rlc_funcs *funcs;
  724. /* for firmware data */
  725. u32 save_and_restore_offset;
  726. u32 clear_state_descriptor_offset;
  727. u32 avail_scratch_ram_locations;
  728. u32 reg_restore_list_size;
  729. u32 reg_list_format_start;
  730. u32 reg_list_format_separate_start;
  731. u32 starting_offsets_start;
  732. u32 reg_list_format_size_bytes;
  733. u32 reg_list_size_bytes;
  734. u32 *register_list_format;
  735. u32 *register_restore;
  736. };
  737. #define AMDGPU_MAX_COMPUTE_QUEUES KGD_MAX_QUEUES
  738. struct amdgpu_mec {
  739. struct amdgpu_bo *hpd_eop_obj;
  740. u64 hpd_eop_gpu_addr;
  741. struct amdgpu_bo *mec_fw_obj;
  742. u64 mec_fw_gpu_addr;
  743. u32 num_mec;
  744. u32 num_pipe_per_mec;
  745. u32 num_queue_per_pipe;
  746. void *mqd_backup[AMDGPU_MAX_COMPUTE_RINGS + 1];
  747. /* These are the resources for which amdgpu takes ownership */
  748. DECLARE_BITMAP(queue_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
  749. };
  750. struct amdgpu_kiq {
  751. u64 eop_gpu_addr;
  752. struct amdgpu_bo *eop_obj;
  753. struct mutex ring_mutex;
  754. struct amdgpu_ring ring;
  755. struct amdgpu_irq_src irq;
  756. };
  757. /*
  758. * GPU scratch registers structures, functions & helpers
  759. */
  760. struct amdgpu_scratch {
  761. unsigned num_reg;
  762. uint32_t reg_base;
  763. uint32_t free_mask;
  764. };
  765. /*
  766. * GFX configurations
  767. */
  768. #define AMDGPU_GFX_MAX_SE 4
  769. #define AMDGPU_GFX_MAX_SH_PER_SE 2
  770. struct amdgpu_rb_config {
  771. uint32_t rb_backend_disable;
  772. uint32_t user_rb_backend_disable;
  773. uint32_t raster_config;
  774. uint32_t raster_config_1;
  775. };
  776. struct gb_addr_config {
  777. uint16_t pipe_interleave_size;
  778. uint8_t num_pipes;
  779. uint8_t max_compress_frags;
  780. uint8_t num_banks;
  781. uint8_t num_se;
  782. uint8_t num_rb_per_se;
  783. };
  784. struct amdgpu_gfx_config {
  785. unsigned max_shader_engines;
  786. unsigned max_tile_pipes;
  787. unsigned max_cu_per_sh;
  788. unsigned max_sh_per_se;
  789. unsigned max_backends_per_se;
  790. unsigned max_texture_channel_caches;
  791. unsigned max_gprs;
  792. unsigned max_gs_threads;
  793. unsigned max_hw_contexts;
  794. unsigned sc_prim_fifo_size_frontend;
  795. unsigned sc_prim_fifo_size_backend;
  796. unsigned sc_hiz_tile_fifo_size;
  797. unsigned sc_earlyz_tile_fifo_size;
  798. unsigned num_tile_pipes;
  799. unsigned backend_enable_mask;
  800. unsigned mem_max_burst_length_bytes;
  801. unsigned mem_row_size_in_kb;
  802. unsigned shader_engine_tile_size;
  803. unsigned num_gpus;
  804. unsigned multi_gpu_tile_size;
  805. unsigned mc_arb_ramcfg;
  806. unsigned gb_addr_config;
  807. unsigned num_rbs;
  808. unsigned gs_vgt_table_depth;
  809. unsigned gs_prim_buffer_depth;
  810. uint32_t tile_mode_array[32];
  811. uint32_t macrotile_mode_array[16];
  812. struct gb_addr_config gb_addr_config_fields;
  813. struct amdgpu_rb_config rb_config[AMDGPU_GFX_MAX_SE][AMDGPU_GFX_MAX_SH_PER_SE];
  814. /* gfx configure feature */
  815. uint32_t double_offchip_lds_buf;
  816. };
  817. struct amdgpu_cu_info {
  818. uint32_t max_waves_per_simd;
  819. uint32_t wave_front_size;
  820. uint32_t max_scratch_slots_per_cu;
  821. uint32_t lds_size;
  822. /* total active CU number */
  823. uint32_t number;
  824. uint32_t ao_cu_mask;
  825. uint32_t ao_cu_bitmap[4][4];
  826. uint32_t bitmap[4][4];
  827. };
  828. struct amdgpu_gfx_funcs {
  829. /* get the gpu clock counter */
  830. uint64_t (*get_gpu_clock_counter)(struct amdgpu_device *adev);
  831. void (*select_se_sh)(struct amdgpu_device *adev, u32 se_num, u32 sh_num, u32 instance);
  832. void (*read_wave_data)(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t *dst, int *no_fields);
  833. void (*read_wave_vgprs)(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t thread, uint32_t start, uint32_t size, uint32_t *dst);
  834. void (*read_wave_sgprs)(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t start, uint32_t size, uint32_t *dst);
  835. };
  836. struct amdgpu_ngg_buf {
  837. struct amdgpu_bo *bo;
  838. uint64_t gpu_addr;
  839. uint32_t size;
  840. uint32_t bo_size;
  841. };
  842. enum {
  843. NGG_PRIM = 0,
  844. NGG_POS,
  845. NGG_CNTL,
  846. NGG_PARAM,
  847. NGG_BUF_MAX
  848. };
  849. struct amdgpu_ngg {
  850. struct amdgpu_ngg_buf buf[NGG_BUF_MAX];
  851. uint32_t gds_reserve_addr;
  852. uint32_t gds_reserve_size;
  853. bool init;
  854. };
  855. struct amdgpu_gfx {
  856. struct mutex gpu_clock_mutex;
  857. struct amdgpu_gfx_config config;
  858. struct amdgpu_rlc rlc;
  859. struct amdgpu_mec mec;
  860. struct amdgpu_kiq kiq;
  861. struct amdgpu_scratch scratch;
  862. const struct firmware *me_fw; /* ME firmware */
  863. uint32_t me_fw_version;
  864. const struct firmware *pfp_fw; /* PFP firmware */
  865. uint32_t pfp_fw_version;
  866. const struct firmware *ce_fw; /* CE firmware */
  867. uint32_t ce_fw_version;
  868. const struct firmware *rlc_fw; /* RLC firmware */
  869. uint32_t rlc_fw_version;
  870. const struct firmware *mec_fw; /* MEC firmware */
  871. uint32_t mec_fw_version;
  872. const struct firmware *mec2_fw; /* MEC2 firmware */
  873. uint32_t mec2_fw_version;
  874. uint32_t me_feature_version;
  875. uint32_t ce_feature_version;
  876. uint32_t pfp_feature_version;
  877. uint32_t rlc_feature_version;
  878. uint32_t mec_feature_version;
  879. uint32_t mec2_feature_version;
  880. struct amdgpu_ring gfx_ring[AMDGPU_MAX_GFX_RINGS];
  881. unsigned num_gfx_rings;
  882. struct amdgpu_ring compute_ring[AMDGPU_MAX_COMPUTE_RINGS];
  883. unsigned num_compute_rings;
  884. struct amdgpu_irq_src eop_irq;
  885. struct amdgpu_irq_src priv_reg_irq;
  886. struct amdgpu_irq_src priv_inst_irq;
  887. /* gfx status */
  888. uint32_t gfx_current_status;
  889. /* ce ram size*/
  890. unsigned ce_ram_size;
  891. struct amdgpu_cu_info cu_info;
  892. const struct amdgpu_gfx_funcs *funcs;
  893. /* reset mask */
  894. uint32_t grbm_soft_reset;
  895. uint32_t srbm_soft_reset;
  896. bool in_reset;
  897. /* s3/s4 mask */
  898. bool in_suspend;
  899. /* NGG */
  900. struct amdgpu_ngg ngg;
  901. };
  902. int amdgpu_ib_get(struct amdgpu_device *adev, struct amdgpu_vm *vm,
  903. unsigned size, struct amdgpu_ib *ib);
  904. void amdgpu_ib_free(struct amdgpu_device *adev, struct amdgpu_ib *ib,
  905. struct dma_fence *f);
  906. int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
  907. struct amdgpu_ib *ibs, struct amdgpu_job *job,
  908. struct dma_fence **f);
  909. int amdgpu_ib_pool_init(struct amdgpu_device *adev);
  910. void amdgpu_ib_pool_fini(struct amdgpu_device *adev);
  911. int amdgpu_ib_ring_tests(struct amdgpu_device *adev);
  912. /*
  913. * CS.
  914. */
  915. struct amdgpu_cs_chunk {
  916. uint32_t chunk_id;
  917. uint32_t length_dw;
  918. void *kdata;
  919. };
  920. struct amdgpu_cs_parser {
  921. struct amdgpu_device *adev;
  922. struct drm_file *filp;
  923. struct amdgpu_ctx *ctx;
  924. /* chunks */
  925. unsigned nchunks;
  926. struct amdgpu_cs_chunk *chunks;
  927. /* scheduler job object */
  928. struct amdgpu_job *job;
  929. /* buffer objects */
  930. struct ww_acquire_ctx ticket;
  931. struct amdgpu_bo_list *bo_list;
  932. struct amdgpu_mn *mn;
  933. struct amdgpu_bo_list_entry vm_pd;
  934. struct list_head validated;
  935. struct dma_fence *fence;
  936. uint64_t bytes_moved_threshold;
  937. uint64_t bytes_moved_vis_threshold;
  938. uint64_t bytes_moved;
  939. uint64_t bytes_moved_vis;
  940. struct amdgpu_bo_list_entry *evictable;
  941. /* user fence */
  942. struct amdgpu_bo_list_entry uf_entry;
  943. unsigned num_post_dep_syncobjs;
  944. struct drm_syncobj **post_dep_syncobjs;
  945. };
  946. #define AMDGPU_PREAMBLE_IB_PRESENT (1 << 0) /* bit set means command submit involves a preamble IB */
  947. #define AMDGPU_PREAMBLE_IB_PRESENT_FIRST (1 << 1) /* bit set means preamble IB is first presented in belonging context */
  948. #define AMDGPU_HAVE_CTX_SWITCH (1 << 2) /* bit set means context switch occured */
  949. struct amdgpu_job {
  950. struct amd_sched_job base;
  951. struct amdgpu_device *adev;
  952. struct amdgpu_vm *vm;
  953. struct amdgpu_ring *ring;
  954. struct amdgpu_sync sync;
  955. struct amdgpu_sync dep_sync;
  956. struct amdgpu_sync sched_sync;
  957. struct amdgpu_ib *ibs;
  958. struct dma_fence *fence; /* the hw fence */
  959. uint32_t preamble_status;
  960. uint32_t num_ibs;
  961. void *owner;
  962. uint64_t fence_ctx; /* the fence_context this job uses */
  963. bool vm_needs_flush;
  964. unsigned vm_id;
  965. uint64_t vm_pd_addr;
  966. uint32_t gds_base, gds_size;
  967. uint32_t gws_base, gws_size;
  968. uint32_t oa_base, oa_size;
  969. /* user fence handling */
  970. uint64_t uf_addr;
  971. uint64_t uf_sequence;
  972. };
  973. #define to_amdgpu_job(sched_job) \
  974. container_of((sched_job), struct amdgpu_job, base)
  975. static inline u32 amdgpu_get_ib_value(struct amdgpu_cs_parser *p,
  976. uint32_t ib_idx, int idx)
  977. {
  978. return p->job->ibs[ib_idx].ptr[idx];
  979. }
  980. static inline void amdgpu_set_ib_value(struct amdgpu_cs_parser *p,
  981. uint32_t ib_idx, int idx,
  982. uint32_t value)
  983. {
  984. p->job->ibs[ib_idx].ptr[idx] = value;
  985. }
  986. /*
  987. * Writeback
  988. */
  989. #define AMDGPU_MAX_WB 1024 /* Reserve at most 1024 WB slots for amdgpu-owned rings. */
  990. struct amdgpu_wb {
  991. struct amdgpu_bo *wb_obj;
  992. volatile uint32_t *wb;
  993. uint64_t gpu_addr;
  994. u32 num_wb; /* Number of wb slots actually reserved for amdgpu. */
  995. unsigned long used[DIV_ROUND_UP(AMDGPU_MAX_WB, BITS_PER_LONG)];
  996. };
  997. int amdgpu_wb_get(struct amdgpu_device *adev, u32 *wb);
  998. void amdgpu_wb_free(struct amdgpu_device *adev, u32 wb);
  999. void amdgpu_get_pcie_info(struct amdgpu_device *adev);
  1000. /*
  1001. * SDMA
  1002. */
  1003. struct amdgpu_sdma_instance {
  1004. /* SDMA firmware */
  1005. const struct firmware *fw;
  1006. uint32_t fw_version;
  1007. uint32_t feature_version;
  1008. struct amdgpu_ring ring;
  1009. bool burst_nop;
  1010. };
  1011. struct amdgpu_sdma {
  1012. struct amdgpu_sdma_instance instance[AMDGPU_MAX_SDMA_INSTANCES];
  1013. #ifdef CONFIG_DRM_AMDGPU_SI
  1014. //SI DMA has a difference trap irq number for the second engine
  1015. struct amdgpu_irq_src trap_irq_1;
  1016. #endif
  1017. struct amdgpu_irq_src trap_irq;
  1018. struct amdgpu_irq_src illegal_inst_irq;
  1019. int num_instances;
  1020. uint32_t srbm_soft_reset;
  1021. };
  1022. /*
  1023. * Firmware
  1024. */
  1025. enum amdgpu_firmware_load_type {
  1026. AMDGPU_FW_LOAD_DIRECT = 0,
  1027. AMDGPU_FW_LOAD_SMU,
  1028. AMDGPU_FW_LOAD_PSP,
  1029. };
  1030. struct amdgpu_firmware {
  1031. struct amdgpu_firmware_info ucode[AMDGPU_UCODE_ID_MAXIMUM];
  1032. enum amdgpu_firmware_load_type load_type;
  1033. struct amdgpu_bo *fw_buf;
  1034. unsigned int fw_size;
  1035. unsigned int max_ucodes;
  1036. /* firmwares are loaded by psp instead of smu from vega10 */
  1037. const struct amdgpu_psp_funcs *funcs;
  1038. struct amdgpu_bo *rbuf;
  1039. struct mutex mutex;
  1040. /* gpu info firmware data pointer */
  1041. const struct firmware *gpu_info_fw;
  1042. };
  1043. /*
  1044. * Benchmarking
  1045. */
  1046. void amdgpu_benchmark(struct amdgpu_device *adev, int test_number);
  1047. /*
  1048. * Testing
  1049. */
  1050. void amdgpu_test_moves(struct amdgpu_device *adev);
  1051. /*
  1052. * Debugfs
  1053. */
  1054. struct amdgpu_debugfs {
  1055. const struct drm_info_list *files;
  1056. unsigned num_files;
  1057. };
  1058. int amdgpu_debugfs_add_files(struct amdgpu_device *adev,
  1059. const struct drm_info_list *files,
  1060. unsigned nfiles);
  1061. int amdgpu_debugfs_fence_init(struct amdgpu_device *adev);
  1062. #if defined(CONFIG_DEBUG_FS)
  1063. int amdgpu_debugfs_init(struct drm_minor *minor);
  1064. #endif
  1065. int amdgpu_debugfs_firmware_init(struct amdgpu_device *adev);
  1066. /*
  1067. * amdgpu smumgr functions
  1068. */
  1069. struct amdgpu_smumgr_funcs {
  1070. int (*check_fw_load_finish)(struct amdgpu_device *adev, uint32_t fwtype);
  1071. int (*request_smu_load_fw)(struct amdgpu_device *adev);
  1072. int (*request_smu_specific_fw)(struct amdgpu_device *adev, uint32_t fwtype);
  1073. };
  1074. /*
  1075. * amdgpu smumgr
  1076. */
  1077. struct amdgpu_smumgr {
  1078. struct amdgpu_bo *toc_buf;
  1079. struct amdgpu_bo *smu_buf;
  1080. /* asic priv smu data */
  1081. void *priv;
  1082. spinlock_t smu_lock;
  1083. /* smumgr functions */
  1084. const struct amdgpu_smumgr_funcs *smumgr_funcs;
  1085. /* ucode loading complete flag */
  1086. uint32_t fw_flags;
  1087. };
  1088. /*
  1089. * ASIC specific register table accessible by UMD
  1090. */
  1091. struct amdgpu_allowed_register_entry {
  1092. uint32_t reg_offset;
  1093. bool grbm_indexed;
  1094. };
  1095. /*
  1096. * ASIC specific functions.
  1097. */
  1098. struct amdgpu_asic_funcs {
  1099. bool (*read_disabled_bios)(struct amdgpu_device *adev);
  1100. bool (*read_bios_from_rom)(struct amdgpu_device *adev,
  1101. u8 *bios, u32 length_bytes);
  1102. int (*read_register)(struct amdgpu_device *adev, u32 se_num,
  1103. u32 sh_num, u32 reg_offset, u32 *value);
  1104. void (*set_vga_state)(struct amdgpu_device *adev, bool state);
  1105. int (*reset)(struct amdgpu_device *adev);
  1106. /* get the reference clock */
  1107. u32 (*get_xclk)(struct amdgpu_device *adev);
  1108. /* MM block clocks */
  1109. int (*set_uvd_clocks)(struct amdgpu_device *adev, u32 vclk, u32 dclk);
  1110. int (*set_vce_clocks)(struct amdgpu_device *adev, u32 evclk, u32 ecclk);
  1111. /* static power management */
  1112. int (*get_pcie_lanes)(struct amdgpu_device *adev);
  1113. void (*set_pcie_lanes)(struct amdgpu_device *adev, int lanes);
  1114. /* get config memsize register */
  1115. u32 (*get_config_memsize)(struct amdgpu_device *adev);
  1116. };
  1117. /*
  1118. * IOCTL.
  1119. */
  1120. int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data,
  1121. struct drm_file *filp);
  1122. int amdgpu_bo_list_ioctl(struct drm_device *dev, void *data,
  1123. struct drm_file *filp);
  1124. int amdgpu_gem_info_ioctl(struct drm_device *dev, void *data,
  1125. struct drm_file *filp);
  1126. int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data,
  1127. struct drm_file *filp);
  1128. int amdgpu_gem_mmap_ioctl(struct drm_device *dev, void *data,
  1129. struct drm_file *filp);
  1130. int amdgpu_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
  1131. struct drm_file *filp);
  1132. int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
  1133. struct drm_file *filp);
  1134. int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data,
  1135. struct drm_file *filp);
  1136. int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp);
  1137. int amdgpu_cs_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *filp);
  1138. int amdgpu_cs_wait_fences_ioctl(struct drm_device *dev, void *data,
  1139. struct drm_file *filp);
  1140. int amdgpu_gem_metadata_ioctl(struct drm_device *dev, void *data,
  1141. struct drm_file *filp);
  1142. /* VRAM scratch page for HDP bug, default vram page */
  1143. struct amdgpu_vram_scratch {
  1144. struct amdgpu_bo *robj;
  1145. volatile uint32_t *ptr;
  1146. u64 gpu_addr;
  1147. };
  1148. /*
  1149. * ACPI
  1150. */
  1151. struct amdgpu_atif_notification_cfg {
  1152. bool enabled;
  1153. int command_code;
  1154. };
  1155. struct amdgpu_atif_notifications {
  1156. bool display_switch;
  1157. bool expansion_mode_change;
  1158. bool thermal_state;
  1159. bool forced_power_state;
  1160. bool system_power_state;
  1161. bool display_conf_change;
  1162. bool px_gfx_switch;
  1163. bool brightness_change;
  1164. bool dgpu_display_event;
  1165. };
  1166. struct amdgpu_atif_functions {
  1167. bool system_params;
  1168. bool sbios_requests;
  1169. bool select_active_disp;
  1170. bool lid_state;
  1171. bool get_tv_standard;
  1172. bool set_tv_standard;
  1173. bool get_panel_expansion_mode;
  1174. bool set_panel_expansion_mode;
  1175. bool temperature_change;
  1176. bool graphics_device_types;
  1177. };
  1178. struct amdgpu_atif {
  1179. struct amdgpu_atif_notifications notifications;
  1180. struct amdgpu_atif_functions functions;
  1181. struct amdgpu_atif_notification_cfg notification_cfg;
  1182. struct amdgpu_encoder *encoder_for_bl;
  1183. };
  1184. struct amdgpu_atcs_functions {
  1185. bool get_ext_state;
  1186. bool pcie_perf_req;
  1187. bool pcie_dev_rdy;
  1188. bool pcie_bus_width;
  1189. };
  1190. struct amdgpu_atcs {
  1191. struct amdgpu_atcs_functions functions;
  1192. };
  1193. /*
  1194. * CGS
  1195. */
  1196. struct cgs_device *amdgpu_cgs_create_device(struct amdgpu_device *adev);
  1197. void amdgpu_cgs_destroy_device(struct cgs_device *cgs_device);
  1198. /*
  1199. * Core structure, functions and helpers.
  1200. */
  1201. typedef uint32_t (*amdgpu_rreg_t)(struct amdgpu_device*, uint32_t);
  1202. typedef void (*amdgpu_wreg_t)(struct amdgpu_device*, uint32_t, uint32_t);
  1203. typedef uint32_t (*amdgpu_block_rreg_t)(struct amdgpu_device*, uint32_t, uint32_t);
  1204. typedef void (*amdgpu_block_wreg_t)(struct amdgpu_device*, uint32_t, uint32_t, uint32_t);
  1205. #define AMDGPU_RESET_MAGIC_NUM 64
  1206. struct amdgpu_device {
  1207. struct device *dev;
  1208. struct drm_device *ddev;
  1209. struct pci_dev *pdev;
  1210. #ifdef CONFIG_DRM_AMD_ACP
  1211. struct amdgpu_acp acp;
  1212. #endif
  1213. /* ASIC */
  1214. enum amd_asic_type asic_type;
  1215. uint32_t family;
  1216. uint32_t rev_id;
  1217. uint32_t external_rev_id;
  1218. unsigned long flags;
  1219. int usec_timeout;
  1220. const struct amdgpu_asic_funcs *asic_funcs;
  1221. bool shutdown;
  1222. bool need_dma32;
  1223. bool accel_working;
  1224. struct work_struct reset_work;
  1225. struct notifier_block acpi_nb;
  1226. struct amdgpu_i2c_chan *i2c_bus[AMDGPU_MAX_I2C_BUS];
  1227. struct amdgpu_debugfs debugfs[AMDGPU_DEBUGFS_MAX_COMPONENTS];
  1228. unsigned debugfs_count;
  1229. #if defined(CONFIG_DEBUG_FS)
  1230. struct dentry *debugfs_regs[AMDGPU_DEBUGFS_MAX_COMPONENTS];
  1231. #endif
  1232. struct amdgpu_atif atif;
  1233. struct amdgpu_atcs atcs;
  1234. struct mutex srbm_mutex;
  1235. /* GRBM index mutex. Protects concurrent access to GRBM index */
  1236. struct mutex grbm_idx_mutex;
  1237. struct dev_pm_domain vga_pm_domain;
  1238. bool have_disp_power_ref;
  1239. /* BIOS */
  1240. bool is_atom_fw;
  1241. uint8_t *bios;
  1242. uint32_t bios_size;
  1243. struct amdgpu_bo *stolen_vga_memory;
  1244. uint32_t bios_scratch_reg_offset;
  1245. uint32_t bios_scratch[AMDGPU_BIOS_NUM_SCRATCH];
  1246. /* Register/doorbell mmio */
  1247. resource_size_t rmmio_base;
  1248. resource_size_t rmmio_size;
  1249. void __iomem *rmmio;
  1250. /* protects concurrent MM_INDEX/DATA based register access */
  1251. spinlock_t mmio_idx_lock;
  1252. /* protects concurrent SMC based register access */
  1253. spinlock_t smc_idx_lock;
  1254. amdgpu_rreg_t smc_rreg;
  1255. amdgpu_wreg_t smc_wreg;
  1256. /* protects concurrent PCIE register access */
  1257. spinlock_t pcie_idx_lock;
  1258. amdgpu_rreg_t pcie_rreg;
  1259. amdgpu_wreg_t pcie_wreg;
  1260. amdgpu_rreg_t pciep_rreg;
  1261. amdgpu_wreg_t pciep_wreg;
  1262. /* protects concurrent UVD register access */
  1263. spinlock_t uvd_ctx_idx_lock;
  1264. amdgpu_rreg_t uvd_ctx_rreg;
  1265. amdgpu_wreg_t uvd_ctx_wreg;
  1266. /* protects concurrent DIDT register access */
  1267. spinlock_t didt_idx_lock;
  1268. amdgpu_rreg_t didt_rreg;
  1269. amdgpu_wreg_t didt_wreg;
  1270. /* protects concurrent gc_cac register access */
  1271. spinlock_t gc_cac_idx_lock;
  1272. amdgpu_rreg_t gc_cac_rreg;
  1273. amdgpu_wreg_t gc_cac_wreg;
  1274. /* protects concurrent se_cac register access */
  1275. spinlock_t se_cac_idx_lock;
  1276. amdgpu_rreg_t se_cac_rreg;
  1277. amdgpu_wreg_t se_cac_wreg;
  1278. /* protects concurrent ENDPOINT (audio) register access */
  1279. spinlock_t audio_endpt_idx_lock;
  1280. amdgpu_block_rreg_t audio_endpt_rreg;
  1281. amdgpu_block_wreg_t audio_endpt_wreg;
  1282. void __iomem *rio_mem;
  1283. resource_size_t rio_mem_size;
  1284. struct amdgpu_doorbell doorbell;
  1285. /* clock/pll info */
  1286. struct amdgpu_clock clock;
  1287. /* MC */
  1288. struct amdgpu_mc mc;
  1289. struct amdgpu_gart gart;
  1290. struct amdgpu_dummy_page dummy_page;
  1291. struct amdgpu_vm_manager vm_manager;
  1292. struct amdgpu_vmhub vmhub[AMDGPU_MAX_VMHUBS];
  1293. /* memory management */
  1294. struct amdgpu_mman mman;
  1295. struct amdgpu_vram_scratch vram_scratch;
  1296. struct amdgpu_wb wb;
  1297. atomic64_t num_bytes_moved;
  1298. atomic64_t num_evictions;
  1299. atomic64_t num_vram_cpu_page_faults;
  1300. atomic_t gpu_reset_counter;
  1301. atomic_t vram_lost_counter;
  1302. /* data for buffer migration throttling */
  1303. struct {
  1304. spinlock_t lock;
  1305. s64 last_update_us;
  1306. s64 accum_us; /* accumulated microseconds */
  1307. s64 accum_us_vis; /* for visible VRAM */
  1308. u32 log2_max_MBps;
  1309. } mm_stats;
  1310. /* display */
  1311. bool enable_virtual_display;
  1312. struct amdgpu_mode_info mode_info;
  1313. struct work_struct hotplug_work;
  1314. struct amdgpu_irq_src crtc_irq;
  1315. struct amdgpu_irq_src pageflip_irq;
  1316. struct amdgpu_irq_src hpd_irq;
  1317. /* rings */
  1318. u64 fence_context;
  1319. unsigned num_rings;
  1320. struct amdgpu_ring *rings[AMDGPU_MAX_RINGS];
  1321. bool ib_pool_ready;
  1322. struct amdgpu_sa_manager ring_tmp_bo;
  1323. /* interrupts */
  1324. struct amdgpu_irq irq;
  1325. /* powerplay */
  1326. struct amd_powerplay powerplay;
  1327. bool pp_enabled;
  1328. bool pp_force_state_enabled;
  1329. /* dpm */
  1330. struct amdgpu_pm pm;
  1331. u32 cg_flags;
  1332. u32 pg_flags;
  1333. /* amdgpu smumgr */
  1334. struct amdgpu_smumgr smu;
  1335. /* gfx */
  1336. struct amdgpu_gfx gfx;
  1337. /* sdma */
  1338. struct amdgpu_sdma sdma;
  1339. union {
  1340. struct {
  1341. /* uvd */
  1342. struct amdgpu_uvd uvd;
  1343. /* vce */
  1344. struct amdgpu_vce vce;
  1345. };
  1346. /* vcn */
  1347. struct amdgpu_vcn vcn;
  1348. };
  1349. /* firmwares */
  1350. struct amdgpu_firmware firmware;
  1351. /* PSP */
  1352. struct psp_context psp;
  1353. /* GDS */
  1354. struct amdgpu_gds gds;
  1355. struct amdgpu_ip_block ip_blocks[AMDGPU_MAX_IP_NUM];
  1356. int num_ip_blocks;
  1357. struct mutex mn_lock;
  1358. DECLARE_HASHTABLE(mn_hash, 7);
  1359. /* tracking pinned memory */
  1360. u64 vram_pin_size;
  1361. u64 invisible_pin_size;
  1362. u64 gart_pin_size;
  1363. /* amdkfd interface */
  1364. struct kfd_dev *kfd;
  1365. /* delayed work_func for deferring clockgating during resume */
  1366. struct delayed_work late_init_work;
  1367. struct amdgpu_virt virt;
  1368. /* link all shadow bo */
  1369. struct list_head shadow_list;
  1370. struct mutex shadow_list_lock;
  1371. /* link all gtt */
  1372. spinlock_t gtt_list_lock;
  1373. struct list_head gtt_list;
  1374. /* keep an lru list of rings by HW IP */
  1375. struct list_head ring_lru_list;
  1376. spinlock_t ring_lru_list_lock;
  1377. /* record hw reset is performed */
  1378. bool has_hw_reset;
  1379. u8 reset_magic[AMDGPU_RESET_MAGIC_NUM];
  1380. /* record last mm index being written through WREG32*/
  1381. unsigned long last_mm_index;
  1382. };
  1383. static inline struct amdgpu_device *amdgpu_ttm_adev(struct ttm_bo_device *bdev)
  1384. {
  1385. return container_of(bdev, struct amdgpu_device, mman.bdev);
  1386. }
  1387. int amdgpu_device_init(struct amdgpu_device *adev,
  1388. struct drm_device *ddev,
  1389. struct pci_dev *pdev,
  1390. uint32_t flags);
  1391. void amdgpu_device_fini(struct amdgpu_device *adev);
  1392. int amdgpu_gpu_wait_for_idle(struct amdgpu_device *adev);
  1393. uint32_t amdgpu_mm_rreg(struct amdgpu_device *adev, uint32_t reg,
  1394. uint32_t acc_flags);
  1395. void amdgpu_mm_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v,
  1396. uint32_t acc_flags);
  1397. u32 amdgpu_io_rreg(struct amdgpu_device *adev, u32 reg);
  1398. void amdgpu_io_wreg(struct amdgpu_device *adev, u32 reg, u32 v);
  1399. u32 amdgpu_mm_rdoorbell(struct amdgpu_device *adev, u32 index);
  1400. void amdgpu_mm_wdoorbell(struct amdgpu_device *adev, u32 index, u32 v);
  1401. u64 amdgpu_mm_rdoorbell64(struct amdgpu_device *adev, u32 index);
  1402. void amdgpu_mm_wdoorbell64(struct amdgpu_device *adev, u32 index, u64 v);
  1403. /*
  1404. * Registers read & write functions.
  1405. */
  1406. #define AMDGPU_REGS_IDX (1<<0)
  1407. #define AMDGPU_REGS_NO_KIQ (1<<1)
  1408. #define RREG32_NO_KIQ(reg) amdgpu_mm_rreg(adev, (reg), AMDGPU_REGS_NO_KIQ)
  1409. #define WREG32_NO_KIQ(reg, v) amdgpu_mm_wreg(adev, (reg), (v), AMDGPU_REGS_NO_KIQ)
  1410. #define RREG32(reg) amdgpu_mm_rreg(adev, (reg), 0)
  1411. #define RREG32_IDX(reg) amdgpu_mm_rreg(adev, (reg), AMDGPU_REGS_IDX)
  1412. #define DREG32(reg) printk(KERN_INFO "REGISTER: " #reg " : 0x%08X\n", amdgpu_mm_rreg(adev, (reg), 0))
  1413. #define WREG32(reg, v) amdgpu_mm_wreg(adev, (reg), (v), 0)
  1414. #define WREG32_IDX(reg, v) amdgpu_mm_wreg(adev, (reg), (v), AMDGPU_REGS_IDX)
  1415. #define REG_SET(FIELD, v) (((v) << FIELD##_SHIFT) & FIELD##_MASK)
  1416. #define REG_GET(FIELD, v) (((v) << FIELD##_SHIFT) & FIELD##_MASK)
  1417. #define RREG32_PCIE(reg) adev->pcie_rreg(adev, (reg))
  1418. #define WREG32_PCIE(reg, v) adev->pcie_wreg(adev, (reg), (v))
  1419. #define RREG32_PCIE_PORT(reg) adev->pciep_rreg(adev, (reg))
  1420. #define WREG32_PCIE_PORT(reg, v) adev->pciep_wreg(adev, (reg), (v))
  1421. #define RREG32_SMC(reg) adev->smc_rreg(adev, (reg))
  1422. #define WREG32_SMC(reg, v) adev->smc_wreg(adev, (reg), (v))
  1423. #define RREG32_UVD_CTX(reg) adev->uvd_ctx_rreg(adev, (reg))
  1424. #define WREG32_UVD_CTX(reg, v) adev->uvd_ctx_wreg(adev, (reg), (v))
  1425. #define RREG32_DIDT(reg) adev->didt_rreg(adev, (reg))
  1426. #define WREG32_DIDT(reg, v) adev->didt_wreg(adev, (reg), (v))
  1427. #define RREG32_GC_CAC(reg) adev->gc_cac_rreg(adev, (reg))
  1428. #define WREG32_GC_CAC(reg, v) adev->gc_cac_wreg(adev, (reg), (v))
  1429. #define RREG32_SE_CAC(reg) adev->se_cac_rreg(adev, (reg))
  1430. #define WREG32_SE_CAC(reg, v) adev->se_cac_wreg(adev, (reg), (v))
  1431. #define RREG32_AUDIO_ENDPT(block, reg) adev->audio_endpt_rreg(adev, (block), (reg))
  1432. #define WREG32_AUDIO_ENDPT(block, reg, v) adev->audio_endpt_wreg(adev, (block), (reg), (v))
  1433. #define WREG32_P(reg, val, mask) \
  1434. do { \
  1435. uint32_t tmp_ = RREG32(reg); \
  1436. tmp_ &= (mask); \
  1437. tmp_ |= ((val) & ~(mask)); \
  1438. WREG32(reg, tmp_); \
  1439. } while (0)
  1440. #define WREG32_AND(reg, and) WREG32_P(reg, 0, and)
  1441. #define WREG32_OR(reg, or) WREG32_P(reg, or, ~(or))
  1442. #define WREG32_PLL_P(reg, val, mask) \
  1443. do { \
  1444. uint32_t tmp_ = RREG32_PLL(reg); \
  1445. tmp_ &= (mask); \
  1446. tmp_ |= ((val) & ~(mask)); \
  1447. WREG32_PLL(reg, tmp_); \
  1448. } while (0)
  1449. #define DREG32_SYS(sqf, adev, reg) seq_printf((sqf), #reg " : 0x%08X\n", amdgpu_mm_rreg((adev), (reg), false))
  1450. #define RREG32_IO(reg) amdgpu_io_rreg(adev, (reg))
  1451. #define WREG32_IO(reg, v) amdgpu_io_wreg(adev, (reg), (v))
  1452. #define RDOORBELL32(index) amdgpu_mm_rdoorbell(adev, (index))
  1453. #define WDOORBELL32(index, v) amdgpu_mm_wdoorbell(adev, (index), (v))
  1454. #define RDOORBELL64(index) amdgpu_mm_rdoorbell64(adev, (index))
  1455. #define WDOORBELL64(index, v) amdgpu_mm_wdoorbell64(adev, (index), (v))
  1456. #define REG_FIELD_SHIFT(reg, field) reg##__##field##__SHIFT
  1457. #define REG_FIELD_MASK(reg, field) reg##__##field##_MASK
  1458. #define REG_SET_FIELD(orig_val, reg, field, field_val) \
  1459. (((orig_val) & ~REG_FIELD_MASK(reg, field)) | \
  1460. (REG_FIELD_MASK(reg, field) & ((field_val) << REG_FIELD_SHIFT(reg, field))))
  1461. #define REG_GET_FIELD(value, reg, field) \
  1462. (((value) & REG_FIELD_MASK(reg, field)) >> REG_FIELD_SHIFT(reg, field))
  1463. #define WREG32_FIELD(reg, field, val) \
  1464. WREG32(mm##reg, (RREG32(mm##reg) & ~REG_FIELD_MASK(reg, field)) | (val) << REG_FIELD_SHIFT(reg, field))
  1465. #define WREG32_FIELD_OFFSET(reg, offset, field, val) \
  1466. WREG32(mm##reg + offset, (RREG32(mm##reg + offset) & ~REG_FIELD_MASK(reg, field)) | (val) << REG_FIELD_SHIFT(reg, field))
  1467. /*
  1468. * BIOS helpers.
  1469. */
  1470. #define RBIOS8(i) (adev->bios[i])
  1471. #define RBIOS16(i) (RBIOS8(i) | (RBIOS8((i)+1) << 8))
  1472. #define RBIOS32(i) ((RBIOS16(i)) | (RBIOS16((i)+2) << 16))
  1473. static inline struct amdgpu_sdma_instance *
  1474. amdgpu_get_sdma_instance(struct amdgpu_ring *ring)
  1475. {
  1476. struct amdgpu_device *adev = ring->adev;
  1477. int i;
  1478. for (i = 0; i < adev->sdma.num_instances; i++)
  1479. if (&adev->sdma.instance[i].ring == ring)
  1480. break;
  1481. if (i < AMDGPU_MAX_SDMA_INSTANCES)
  1482. return &adev->sdma.instance[i];
  1483. else
  1484. return NULL;
  1485. }
  1486. /*
  1487. * ASICs macro.
  1488. */
  1489. #define amdgpu_asic_set_vga_state(adev, state) (adev)->asic_funcs->set_vga_state((adev), (state))
  1490. #define amdgpu_asic_reset(adev) (adev)->asic_funcs->reset((adev))
  1491. #define amdgpu_asic_get_xclk(adev) (adev)->asic_funcs->get_xclk((adev))
  1492. #define amdgpu_asic_set_uvd_clocks(adev, v, d) (adev)->asic_funcs->set_uvd_clocks((adev), (v), (d))
  1493. #define amdgpu_asic_set_vce_clocks(adev, ev, ec) (adev)->asic_funcs->set_vce_clocks((adev), (ev), (ec))
  1494. #define amdgpu_get_pcie_lanes(adev) (adev)->asic_funcs->get_pcie_lanes((adev))
  1495. #define amdgpu_set_pcie_lanes(adev, l) (adev)->asic_funcs->set_pcie_lanes((adev), (l))
  1496. #define amdgpu_asic_get_gpu_clock_counter(adev) (adev)->asic_funcs->get_gpu_clock_counter((adev))
  1497. #define amdgpu_asic_read_disabled_bios(adev) (adev)->asic_funcs->read_disabled_bios((adev))
  1498. #define amdgpu_asic_read_bios_from_rom(adev, b, l) (adev)->asic_funcs->read_bios_from_rom((adev), (b), (l))
  1499. #define amdgpu_asic_read_register(adev, se, sh, offset, v)((adev)->asic_funcs->read_register((adev), (se), (sh), (offset), (v)))
  1500. #define amdgpu_asic_get_config_memsize(adev) (adev)->asic_funcs->get_config_memsize((adev))
  1501. #define amdgpu_gart_flush_gpu_tlb(adev, vmid) (adev)->gart.gart_funcs->flush_gpu_tlb((adev), (vmid))
  1502. #define amdgpu_gart_set_pte_pde(adev, pt, idx, addr, flags) (adev)->gart.gart_funcs->set_pte_pde((adev), (pt), (idx), (addr), (flags))
  1503. #define amdgpu_gart_get_vm_pde(adev, addr) (adev)->gart.gart_funcs->get_vm_pde((adev), (addr))
  1504. #define amdgpu_vm_copy_pte(adev, ib, pe, src, count) ((adev)->vm_manager.vm_pte_funcs->copy_pte((ib), (pe), (src), (count)))
  1505. #define amdgpu_vm_write_pte(adev, ib, pe, value, count, incr) ((adev)->vm_manager.vm_pte_funcs->write_pte((ib), (pe), (value), (count), (incr)))
  1506. #define amdgpu_vm_set_pte_pde(adev, ib, pe, addr, count, incr, flags) ((adev)->vm_manager.vm_pte_funcs->set_pte_pde((ib), (pe), (addr), (count), (incr), (flags)))
  1507. #define amdgpu_vm_get_pte_flags(adev, flags) (adev)->gart.gart_funcs->get_vm_pte_flags((adev),(flags))
  1508. #define amdgpu_ring_parse_cs(r, p, ib) ((r)->funcs->parse_cs((p), (ib)))
  1509. #define amdgpu_ring_test_ring(r) (r)->funcs->test_ring((r))
  1510. #define amdgpu_ring_test_ib(r, t) (r)->funcs->test_ib((r), (t))
  1511. #define amdgpu_ring_get_rptr(r) (r)->funcs->get_rptr((r))
  1512. #define amdgpu_ring_get_wptr(r) (r)->funcs->get_wptr((r))
  1513. #define amdgpu_ring_set_wptr(r) (r)->funcs->set_wptr((r))
  1514. #define amdgpu_ring_emit_ib(r, ib, vm_id, c) (r)->funcs->emit_ib((r), (ib), (vm_id), (c))
  1515. #define amdgpu_ring_emit_pipeline_sync(r) (r)->funcs->emit_pipeline_sync((r))
  1516. #define amdgpu_ring_emit_vm_flush(r, vmid, addr) (r)->funcs->emit_vm_flush((r), (vmid), (addr))
  1517. #define amdgpu_ring_emit_fence(r, addr, seq, flags) (r)->funcs->emit_fence((r), (addr), (seq), (flags))
  1518. #define amdgpu_ring_emit_gds_switch(r, v, db, ds, wb, ws, ab, as) (r)->funcs->emit_gds_switch((r), (v), (db), (ds), (wb), (ws), (ab), (as))
  1519. #define amdgpu_ring_emit_hdp_flush(r) (r)->funcs->emit_hdp_flush((r))
  1520. #define amdgpu_ring_emit_hdp_invalidate(r) (r)->funcs->emit_hdp_invalidate((r))
  1521. #define amdgpu_ring_emit_switch_buffer(r) (r)->funcs->emit_switch_buffer((r))
  1522. #define amdgpu_ring_emit_cntxcntl(r, d) (r)->funcs->emit_cntxcntl((r), (d))
  1523. #define amdgpu_ring_emit_rreg(r, d) (r)->funcs->emit_rreg((r), (d))
  1524. #define amdgpu_ring_emit_wreg(r, d, v) (r)->funcs->emit_wreg((r), (d), (v))
  1525. #define amdgpu_ring_emit_tmz(r, b) (r)->funcs->emit_tmz((r), (b))
  1526. #define amdgpu_ring_pad_ib(r, ib) ((r)->funcs->pad_ib((r), (ib)))
  1527. #define amdgpu_ring_init_cond_exec(r) (r)->funcs->init_cond_exec((r))
  1528. #define amdgpu_ring_patch_cond_exec(r,o) (r)->funcs->patch_cond_exec((r),(o))
  1529. #define amdgpu_ih_get_wptr(adev) (adev)->irq.ih_funcs->get_wptr((adev))
  1530. #define amdgpu_ih_prescreen_iv(adev) (adev)->irq.ih_funcs->prescreen_iv((adev))
  1531. #define amdgpu_ih_decode_iv(adev, iv) (adev)->irq.ih_funcs->decode_iv((adev), (iv))
  1532. #define amdgpu_ih_set_rptr(adev) (adev)->irq.ih_funcs->set_rptr((adev))
  1533. #define amdgpu_display_vblank_get_counter(adev, crtc) (adev)->mode_info.funcs->vblank_get_counter((adev), (crtc))
  1534. #define amdgpu_display_vblank_wait(adev, crtc) (adev)->mode_info.funcs->vblank_wait((adev), (crtc))
  1535. #define amdgpu_display_backlight_set_level(adev, e, l) (adev)->mode_info.funcs->backlight_set_level((e), (l))
  1536. #define amdgpu_display_backlight_get_level(adev, e) (adev)->mode_info.funcs->backlight_get_level((e))
  1537. #define amdgpu_display_hpd_sense(adev, h) (adev)->mode_info.funcs->hpd_sense((adev), (h))
  1538. #define amdgpu_display_hpd_set_polarity(adev, h) (adev)->mode_info.funcs->hpd_set_polarity((adev), (h))
  1539. #define amdgpu_display_hpd_get_gpio_reg(adev) (adev)->mode_info.funcs->hpd_get_gpio_reg((adev))
  1540. #define amdgpu_display_bandwidth_update(adev) (adev)->mode_info.funcs->bandwidth_update((adev))
  1541. #define amdgpu_display_page_flip(adev, crtc, base, async) (adev)->mode_info.funcs->page_flip((adev), (crtc), (base), (async))
  1542. #define amdgpu_display_page_flip_get_scanoutpos(adev, crtc, vbl, pos) (adev)->mode_info.funcs->page_flip_get_scanoutpos((adev), (crtc), (vbl), (pos))
  1543. #define amdgpu_display_add_encoder(adev, e, s, c) (adev)->mode_info.funcs->add_encoder((adev), (e), (s), (c))
  1544. #define amdgpu_display_add_connector(adev, ci, sd, ct, ib, coi, h, r) (adev)->mode_info.funcs->add_connector((adev), (ci), (sd), (ct), (ib), (coi), (h), (r))
  1545. #define amdgpu_emit_copy_buffer(adev, ib, s, d, b) (adev)->mman.buffer_funcs->emit_copy_buffer((ib), (s), (d), (b))
  1546. #define amdgpu_emit_fill_buffer(adev, ib, s, d, b) (adev)->mman.buffer_funcs->emit_fill_buffer((ib), (s), (d), (b))
  1547. #define amdgpu_gfx_get_gpu_clock_counter(adev) (adev)->gfx.funcs->get_gpu_clock_counter((adev))
  1548. #define amdgpu_gfx_select_se_sh(adev, se, sh, instance) (adev)->gfx.funcs->select_se_sh((adev), (se), (sh), (instance))
  1549. #define amdgpu_gds_switch(adev, r, v, d, w, a) (adev)->gds.funcs->patch_gds_switch((r), (v), (d), (w), (a))
  1550. #define amdgpu_psp_check_fw_loading_status(adev, i) (adev)->firmware.funcs->check_fw_loading_status((adev), (i))
  1551. /* Common functions */
  1552. int amdgpu_gpu_reset(struct amdgpu_device *adev);
  1553. bool amdgpu_need_backup(struct amdgpu_device *adev);
  1554. void amdgpu_pci_config_reset(struct amdgpu_device *adev);
  1555. bool amdgpu_need_post(struct amdgpu_device *adev);
  1556. void amdgpu_update_display_priority(struct amdgpu_device *adev);
  1557. void amdgpu_cs_report_moved_bytes(struct amdgpu_device *adev, u64 num_bytes,
  1558. u64 num_vis_bytes);
  1559. void amdgpu_ttm_placement_from_domain(struct amdgpu_bo *abo, u32 domain);
  1560. bool amdgpu_ttm_bo_is_amdgpu_bo(struct ttm_buffer_object *bo);
  1561. void amdgpu_vram_location(struct amdgpu_device *adev, struct amdgpu_mc *mc, u64 base);
  1562. void amdgpu_gart_location(struct amdgpu_device *adev, struct amdgpu_mc *mc);
  1563. void amdgpu_ttm_set_active_vram_size(struct amdgpu_device *adev, u64 size);
  1564. int amdgpu_ttm_init(struct amdgpu_device *adev);
  1565. void amdgpu_ttm_fini(struct amdgpu_device *adev);
  1566. void amdgpu_program_register_sequence(struct amdgpu_device *adev,
  1567. const u32 *registers,
  1568. const u32 array_size);
  1569. bool amdgpu_device_is_px(struct drm_device *dev);
  1570. /* atpx handler */
  1571. #if defined(CONFIG_VGA_SWITCHEROO)
  1572. void amdgpu_register_atpx_handler(void);
  1573. void amdgpu_unregister_atpx_handler(void);
  1574. bool amdgpu_has_atpx_dgpu_power_cntl(void);
  1575. bool amdgpu_is_atpx_hybrid(void);
  1576. bool amdgpu_atpx_dgpu_req_power_for_displays(void);
  1577. bool amdgpu_has_atpx(void);
  1578. #else
  1579. static inline void amdgpu_register_atpx_handler(void) {}
  1580. static inline void amdgpu_unregister_atpx_handler(void) {}
  1581. static inline bool amdgpu_has_atpx_dgpu_power_cntl(void) { return false; }
  1582. static inline bool amdgpu_is_atpx_hybrid(void) { return false; }
  1583. static inline bool amdgpu_atpx_dgpu_req_power_for_displays(void) { return false; }
  1584. static inline bool amdgpu_has_atpx(void) { return false; }
  1585. #endif
  1586. /*
  1587. * KMS
  1588. */
  1589. extern const struct drm_ioctl_desc amdgpu_ioctls_kms[];
  1590. extern const int amdgpu_max_kms_ioctl;
  1591. bool amdgpu_kms_vram_lost(struct amdgpu_device *adev,
  1592. struct amdgpu_fpriv *fpriv);
  1593. int amdgpu_driver_load_kms(struct drm_device *dev, unsigned long flags);
  1594. void amdgpu_driver_unload_kms(struct drm_device *dev);
  1595. void amdgpu_driver_lastclose_kms(struct drm_device *dev);
  1596. int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv);
  1597. void amdgpu_driver_postclose_kms(struct drm_device *dev,
  1598. struct drm_file *file_priv);
  1599. int amdgpu_suspend(struct amdgpu_device *adev);
  1600. int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon);
  1601. int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon);
  1602. u32 amdgpu_get_vblank_counter_kms(struct drm_device *dev, unsigned int pipe);
  1603. int amdgpu_enable_vblank_kms(struct drm_device *dev, unsigned int pipe);
  1604. void amdgpu_disable_vblank_kms(struct drm_device *dev, unsigned int pipe);
  1605. long amdgpu_kms_compat_ioctl(struct file *filp, unsigned int cmd,
  1606. unsigned long arg);
  1607. /*
  1608. * functions used by amdgpu_encoder.c
  1609. */
  1610. struct amdgpu_afmt_acr {
  1611. u32 clock;
  1612. int n_32khz;
  1613. int cts_32khz;
  1614. int n_44_1khz;
  1615. int cts_44_1khz;
  1616. int n_48khz;
  1617. int cts_48khz;
  1618. };
  1619. struct amdgpu_afmt_acr amdgpu_afmt_acr(uint32_t clock);
  1620. /* amdgpu_acpi.c */
  1621. #if defined(CONFIG_ACPI)
  1622. int amdgpu_acpi_init(struct amdgpu_device *adev);
  1623. void amdgpu_acpi_fini(struct amdgpu_device *adev);
  1624. bool amdgpu_acpi_is_pcie_performance_request_supported(struct amdgpu_device *adev);
  1625. int amdgpu_acpi_pcie_performance_request(struct amdgpu_device *adev,
  1626. u8 perf_req, bool advertise);
  1627. int amdgpu_acpi_pcie_notify_device_ready(struct amdgpu_device *adev);
  1628. #else
  1629. static inline int amdgpu_acpi_init(struct amdgpu_device *adev) { return 0; }
  1630. static inline void amdgpu_acpi_fini(struct amdgpu_device *adev) { }
  1631. #endif
  1632. int amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser,
  1633. uint64_t addr, struct amdgpu_bo **bo,
  1634. struct amdgpu_bo_va_mapping **mapping);
  1635. #include "amdgpu_object.h"
  1636. #endif