amdgpu.h 77 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518
  1. /*
  2. * Copyright 2008 Advanced Micro Devices, Inc.
  3. * Copyright 2008 Red Hat Inc.
  4. * Copyright 2009 Jerome Glisse.
  5. *
  6. * Permission is hereby granted, free of charge, to any person obtaining a
  7. * copy of this software and associated documentation files (the "Software"),
  8. * to deal in the Software without restriction, including without limitation
  9. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  10. * and/or sell copies of the Software, and to permit persons to whom the
  11. * Software is furnished to do so, subject to the following conditions:
  12. *
  13. * The above copyright notice and this permission notice shall be included in
  14. * all copies or substantial portions of the Software.
  15. *
  16. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  17. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  18. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  19. * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  20. * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  21. * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  22. * OTHER DEALINGS IN THE SOFTWARE.
  23. *
  24. * Authors: Dave Airlie
  25. * Alex Deucher
  26. * Jerome Glisse
  27. */
  28. #ifndef __AMDGPU_H__
  29. #define __AMDGPU_H__
  30. #include <linux/atomic.h>
  31. #include <linux/wait.h>
  32. #include <linux/list.h>
  33. #include <linux/kref.h>
  34. #include <linux/interval_tree.h>
  35. #include <linux/hashtable.h>
  36. #include <linux/fence.h>
  37. #include <ttm/ttm_bo_api.h>
  38. #include <ttm/ttm_bo_driver.h>
  39. #include <ttm/ttm_placement.h>
  40. #include <ttm/ttm_module.h>
  41. #include <ttm/ttm_execbuf_util.h>
  42. #include <drm/drmP.h>
  43. #include <drm/drm_gem.h>
  44. #include <drm/amdgpu_drm.h>
  45. #include "amd_shared.h"
  46. #include "amdgpu_mode.h"
  47. #include "amdgpu_ih.h"
  48. #include "amdgpu_irq.h"
  49. #include "amdgpu_ucode.h"
  50. #include "amdgpu_gds.h"
  51. #include "amd_powerplay.h"
  52. #include "amdgpu_acp.h"
  53. #include "gpu_scheduler.h"
  54. /*
  55. * Modules parameters.
  56. */
  57. extern int amdgpu_modeset;
  58. extern int amdgpu_vram_limit;
  59. extern int amdgpu_gart_size;
  60. extern int amdgpu_benchmarking;
  61. extern int amdgpu_testing;
  62. extern int amdgpu_audio;
  63. extern int amdgpu_disp_priority;
  64. extern int amdgpu_hw_i2c;
  65. extern int amdgpu_pcie_gen2;
  66. extern int amdgpu_msi;
  67. extern int amdgpu_lockup_timeout;
  68. extern int amdgpu_dpm;
  69. extern int amdgpu_smc_load_fw;
  70. extern int amdgpu_aspm;
  71. extern int amdgpu_runtime_pm;
  72. extern unsigned amdgpu_ip_block_mask;
  73. extern int amdgpu_bapm;
  74. extern int amdgpu_deep_color;
  75. extern int amdgpu_vm_size;
  76. extern int amdgpu_vm_block_size;
  77. extern int amdgpu_vm_fault_stop;
  78. extern int amdgpu_vm_debug;
  79. extern int amdgpu_sched_jobs;
  80. extern int amdgpu_sched_hw_submission;
  81. extern int amdgpu_powerplay;
  82. extern int amdgpu_powercontainment;
  83. extern unsigned amdgpu_pcie_gen_cap;
  84. extern unsigned amdgpu_pcie_lane_cap;
  85. extern unsigned amdgpu_cg_mask;
  86. extern unsigned amdgpu_pg_mask;
  87. extern char *amdgpu_disable_cu;
  88. extern int amdgpu_sclk_deep_sleep_en;
  89. extern char *amdgpu_virtual_display;
  90. #define AMDGPU_WAIT_IDLE_TIMEOUT_IN_MS 3000
  91. #define AMDGPU_MAX_USEC_TIMEOUT 100000 /* 100 ms */
  92. #define AMDGPU_FENCE_JIFFIES_TIMEOUT (HZ / 2)
  93. /* AMDGPU_IB_POOL_SIZE must be a power of 2 */
  94. #define AMDGPU_IB_POOL_SIZE 16
  95. #define AMDGPU_DEBUGFS_MAX_COMPONENTS 32
  96. #define AMDGPUFB_CONN_LIMIT 4
  97. #define AMDGPU_BIOS_NUM_SCRATCH 8
  98. /* max number of rings */
  99. #define AMDGPU_MAX_RINGS 16
  100. #define AMDGPU_MAX_GFX_RINGS 1
  101. #define AMDGPU_MAX_COMPUTE_RINGS 8
  102. #define AMDGPU_MAX_VCE_RINGS 2
  103. /* max number of IP instances */
  104. #define AMDGPU_MAX_SDMA_INSTANCES 2
  105. /* hardcode that limit for now */
  106. #define AMDGPU_VA_RESERVED_SIZE (8 << 20)
  107. /* hard reset data */
  108. #define AMDGPU_ASIC_RESET_DATA 0x39d5e86b
  109. /* reset flags */
  110. #define AMDGPU_RESET_GFX (1 << 0)
  111. #define AMDGPU_RESET_COMPUTE (1 << 1)
  112. #define AMDGPU_RESET_DMA (1 << 2)
  113. #define AMDGPU_RESET_CP (1 << 3)
  114. #define AMDGPU_RESET_GRBM (1 << 4)
  115. #define AMDGPU_RESET_DMA1 (1 << 5)
  116. #define AMDGPU_RESET_RLC (1 << 6)
  117. #define AMDGPU_RESET_SEM (1 << 7)
  118. #define AMDGPU_RESET_IH (1 << 8)
  119. #define AMDGPU_RESET_VMC (1 << 9)
  120. #define AMDGPU_RESET_MC (1 << 10)
  121. #define AMDGPU_RESET_DISPLAY (1 << 11)
  122. #define AMDGPU_RESET_UVD (1 << 12)
  123. #define AMDGPU_RESET_VCE (1 << 13)
  124. #define AMDGPU_RESET_VCE1 (1 << 14)
  125. /* GFX current status */
  126. #define AMDGPU_GFX_NORMAL_MODE 0x00000000L
  127. #define AMDGPU_GFX_SAFE_MODE 0x00000001L
  128. #define AMDGPU_GFX_PG_DISABLED_MODE 0x00000002L
  129. #define AMDGPU_GFX_CG_DISABLED_MODE 0x00000004L
  130. #define AMDGPU_GFX_LBPW_DISABLED_MODE 0x00000008L
  131. /* max cursor sizes (in pixels) */
  132. #define CIK_CURSOR_WIDTH 128
  133. #define CIK_CURSOR_HEIGHT 128
  134. struct amdgpu_device;
  135. struct amdgpu_ib;
  136. struct amdgpu_vm;
  137. struct amdgpu_ring;
  138. struct amdgpu_cs_parser;
  139. struct amdgpu_job;
  140. struct amdgpu_irq_src;
  141. struct amdgpu_fpriv;
  142. enum amdgpu_cp_irq {
  143. AMDGPU_CP_IRQ_GFX_EOP = 0,
  144. AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP,
  145. AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE1_EOP,
  146. AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE2_EOP,
  147. AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE3_EOP,
  148. AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE0_EOP,
  149. AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE1_EOP,
  150. AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE2_EOP,
  151. AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE3_EOP,
  152. AMDGPU_CP_IRQ_LAST
  153. };
  154. enum amdgpu_sdma_irq {
  155. AMDGPU_SDMA_IRQ_TRAP0 = 0,
  156. AMDGPU_SDMA_IRQ_TRAP1,
  157. AMDGPU_SDMA_IRQ_LAST
  158. };
  159. enum amdgpu_thermal_irq {
  160. AMDGPU_THERMAL_IRQ_LOW_TO_HIGH = 0,
  161. AMDGPU_THERMAL_IRQ_HIGH_TO_LOW,
  162. AMDGPU_THERMAL_IRQ_LAST
  163. };
  164. int amdgpu_set_clockgating_state(struct amdgpu_device *adev,
  165. enum amd_ip_block_type block_type,
  166. enum amd_clockgating_state state);
  167. int amdgpu_set_powergating_state(struct amdgpu_device *adev,
  168. enum amd_ip_block_type block_type,
  169. enum amd_powergating_state state);
  170. int amdgpu_wait_for_idle(struct amdgpu_device *adev,
  171. enum amd_ip_block_type block_type);
  172. bool amdgpu_is_idle(struct amdgpu_device *adev,
  173. enum amd_ip_block_type block_type);
  174. struct amdgpu_ip_block_version {
  175. enum amd_ip_block_type type;
  176. u32 major;
  177. u32 minor;
  178. u32 rev;
  179. const struct amd_ip_funcs *funcs;
  180. };
  181. int amdgpu_ip_block_version_cmp(struct amdgpu_device *adev,
  182. enum amd_ip_block_type type,
  183. u32 major, u32 minor);
  184. const struct amdgpu_ip_block_version * amdgpu_get_ip_block(
  185. struct amdgpu_device *adev,
  186. enum amd_ip_block_type type);
  187. /* provided by hw blocks that can move/clear data. e.g., gfx or sdma */
  188. struct amdgpu_buffer_funcs {
  189. /* maximum bytes in a single operation */
  190. uint32_t copy_max_bytes;
  191. /* number of dw to reserve per operation */
  192. unsigned copy_num_dw;
  193. /* used for buffer migration */
  194. void (*emit_copy_buffer)(struct amdgpu_ib *ib,
  195. /* src addr in bytes */
  196. uint64_t src_offset,
  197. /* dst addr in bytes */
  198. uint64_t dst_offset,
  199. /* number of byte to transfer */
  200. uint32_t byte_count);
  201. /* maximum bytes in a single operation */
  202. uint32_t fill_max_bytes;
  203. /* number of dw to reserve per operation */
  204. unsigned fill_num_dw;
  205. /* used for buffer clearing */
  206. void (*emit_fill_buffer)(struct amdgpu_ib *ib,
  207. /* value to write to memory */
  208. uint32_t src_data,
  209. /* dst addr in bytes */
  210. uint64_t dst_offset,
  211. /* number of byte to fill */
  212. uint32_t byte_count);
  213. };
  214. /* provided by hw blocks that can write ptes, e.g., sdma */
  215. struct amdgpu_vm_pte_funcs {
  216. /* copy pte entries from GART */
  217. void (*copy_pte)(struct amdgpu_ib *ib,
  218. uint64_t pe, uint64_t src,
  219. unsigned count);
  220. /* write pte one entry at a time with addr mapping */
  221. void (*write_pte)(struct amdgpu_ib *ib,
  222. const dma_addr_t *pages_addr, uint64_t pe,
  223. uint64_t addr, unsigned count,
  224. uint32_t incr, uint32_t flags);
  225. /* for linear pte/pde updates without addr mapping */
  226. void (*set_pte_pde)(struct amdgpu_ib *ib,
  227. uint64_t pe,
  228. uint64_t addr, unsigned count,
  229. uint32_t incr, uint32_t flags);
  230. };
  231. /* provided by the gmc block */
  232. struct amdgpu_gart_funcs {
  233. /* flush the vm tlb via mmio */
  234. void (*flush_gpu_tlb)(struct amdgpu_device *adev,
  235. uint32_t vmid);
  236. /* write pte/pde updates using the cpu */
  237. int (*set_pte_pde)(struct amdgpu_device *adev,
  238. void *cpu_pt_addr, /* cpu addr of page table */
  239. uint32_t gpu_page_idx, /* pte/pde to update */
  240. uint64_t addr, /* addr to write into pte/pde */
  241. uint32_t flags); /* access flags */
  242. };
  243. /* provided by the ih block */
  244. struct amdgpu_ih_funcs {
  245. /* ring read/write ptr handling, called from interrupt context */
  246. u32 (*get_wptr)(struct amdgpu_device *adev);
  247. void (*decode_iv)(struct amdgpu_device *adev,
  248. struct amdgpu_iv_entry *entry);
  249. void (*set_rptr)(struct amdgpu_device *adev);
  250. };
  251. /* provided by hw blocks that expose a ring buffer for commands */
  252. struct amdgpu_ring_funcs {
  253. /* ring read/write ptr handling */
  254. u32 (*get_rptr)(struct amdgpu_ring *ring);
  255. u32 (*get_wptr)(struct amdgpu_ring *ring);
  256. void (*set_wptr)(struct amdgpu_ring *ring);
  257. /* validating and patching of IBs */
  258. int (*parse_cs)(struct amdgpu_cs_parser *p, uint32_t ib_idx);
  259. /* command emit functions */
  260. void (*emit_ib)(struct amdgpu_ring *ring,
  261. struct amdgpu_ib *ib,
  262. unsigned vm_id, bool ctx_switch);
  263. void (*emit_fence)(struct amdgpu_ring *ring, uint64_t addr,
  264. uint64_t seq, unsigned flags);
  265. void (*emit_pipeline_sync)(struct amdgpu_ring *ring);
  266. void (*emit_vm_flush)(struct amdgpu_ring *ring, unsigned vm_id,
  267. uint64_t pd_addr);
  268. void (*emit_hdp_flush)(struct amdgpu_ring *ring);
  269. void (*emit_hdp_invalidate)(struct amdgpu_ring *ring);
  270. void (*emit_gds_switch)(struct amdgpu_ring *ring, uint32_t vmid,
  271. uint32_t gds_base, uint32_t gds_size,
  272. uint32_t gws_base, uint32_t gws_size,
  273. uint32_t oa_base, uint32_t oa_size);
  274. /* testing functions */
  275. int (*test_ring)(struct amdgpu_ring *ring);
  276. int (*test_ib)(struct amdgpu_ring *ring, long timeout);
  277. /* insert NOP packets */
  278. void (*insert_nop)(struct amdgpu_ring *ring, uint32_t count);
  279. /* pad the indirect buffer to the necessary number of dw */
  280. void (*pad_ib)(struct amdgpu_ring *ring, struct amdgpu_ib *ib);
  281. unsigned (*init_cond_exec)(struct amdgpu_ring *ring);
  282. void (*patch_cond_exec)(struct amdgpu_ring *ring, unsigned offset);
  283. /* note usage for clock and power gating */
  284. void (*begin_use)(struct amdgpu_ring *ring);
  285. void (*end_use)(struct amdgpu_ring *ring);
  286. };
  287. /*
  288. * BIOS.
  289. */
  290. bool amdgpu_get_bios(struct amdgpu_device *adev);
  291. bool amdgpu_read_bios(struct amdgpu_device *adev);
  292. /*
  293. * Dummy page
  294. */
  295. struct amdgpu_dummy_page {
  296. struct page *page;
  297. dma_addr_t addr;
  298. };
  299. int amdgpu_dummy_page_init(struct amdgpu_device *adev);
  300. void amdgpu_dummy_page_fini(struct amdgpu_device *adev);
  301. /*
  302. * Clocks
  303. */
  304. #define AMDGPU_MAX_PPLL 3
  305. struct amdgpu_clock {
  306. struct amdgpu_pll ppll[AMDGPU_MAX_PPLL];
  307. struct amdgpu_pll spll;
  308. struct amdgpu_pll mpll;
  309. /* 10 Khz units */
  310. uint32_t default_mclk;
  311. uint32_t default_sclk;
  312. uint32_t default_dispclk;
  313. uint32_t current_dispclk;
  314. uint32_t dp_extclk;
  315. uint32_t max_pixel_clock;
  316. };
  317. /*
  318. * Fences.
  319. */
  320. struct amdgpu_fence_driver {
  321. uint64_t gpu_addr;
  322. volatile uint32_t *cpu_addr;
  323. /* sync_seq is protected by ring emission lock */
  324. uint32_t sync_seq;
  325. atomic_t last_seq;
  326. bool initialized;
  327. struct amdgpu_irq_src *irq_src;
  328. unsigned irq_type;
  329. struct timer_list fallback_timer;
  330. unsigned num_fences_mask;
  331. spinlock_t lock;
  332. struct fence **fences;
  333. };
  334. /* some special values for the owner field */
  335. #define AMDGPU_FENCE_OWNER_UNDEFINED ((void*)0ul)
  336. #define AMDGPU_FENCE_OWNER_VM ((void*)1ul)
  337. #define AMDGPU_FENCE_FLAG_64BIT (1 << 0)
  338. #define AMDGPU_FENCE_FLAG_INT (1 << 1)
  339. int amdgpu_fence_driver_init(struct amdgpu_device *adev);
  340. void amdgpu_fence_driver_fini(struct amdgpu_device *adev);
  341. void amdgpu_fence_driver_force_completion(struct amdgpu_device *adev);
  342. int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring,
  343. unsigned num_hw_submission);
  344. int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring,
  345. struct amdgpu_irq_src *irq_src,
  346. unsigned irq_type);
  347. void amdgpu_fence_driver_suspend(struct amdgpu_device *adev);
  348. void amdgpu_fence_driver_resume(struct amdgpu_device *adev);
  349. int amdgpu_fence_emit(struct amdgpu_ring *ring, struct fence **fence);
  350. void amdgpu_fence_process(struct amdgpu_ring *ring);
  351. int amdgpu_fence_wait_empty(struct amdgpu_ring *ring);
  352. unsigned amdgpu_fence_count_emitted(struct amdgpu_ring *ring);
  353. /*
  354. * TTM.
  355. */
  356. #define AMDGPU_TTM_LRU_SIZE 20
  357. struct amdgpu_mman_lru {
  358. struct list_head *lru[TTM_NUM_MEM_TYPES];
  359. struct list_head *swap_lru;
  360. };
  361. struct amdgpu_mman {
  362. struct ttm_bo_global_ref bo_global_ref;
  363. struct drm_global_reference mem_global_ref;
  364. struct ttm_bo_device bdev;
  365. bool mem_global_referenced;
  366. bool initialized;
  367. #if defined(CONFIG_DEBUG_FS)
  368. struct dentry *vram;
  369. struct dentry *gtt;
  370. #endif
  371. /* buffer handling */
  372. const struct amdgpu_buffer_funcs *buffer_funcs;
  373. struct amdgpu_ring *buffer_funcs_ring;
  374. /* Scheduler entity for buffer moves */
  375. struct amd_sched_entity entity;
  376. /* custom LRU management */
  377. struct amdgpu_mman_lru log2_size[AMDGPU_TTM_LRU_SIZE];
  378. };
  379. int amdgpu_copy_buffer(struct amdgpu_ring *ring,
  380. uint64_t src_offset,
  381. uint64_t dst_offset,
  382. uint32_t byte_count,
  383. struct reservation_object *resv,
  384. struct fence **fence);
  385. int amdgpu_fill_buffer(struct amdgpu_bo *bo,
  386. uint32_t src_data,
  387. struct reservation_object *resv,
  388. struct fence **fence);
  389. int amdgpu_mmap(struct file *filp, struct vm_area_struct *vma);
  390. struct amdgpu_bo_list_entry {
  391. struct amdgpu_bo *robj;
  392. struct ttm_validate_buffer tv;
  393. struct amdgpu_bo_va *bo_va;
  394. uint32_t priority;
  395. struct page **user_pages;
  396. int user_invalidated;
  397. };
  398. struct amdgpu_bo_va_mapping {
  399. struct list_head list;
  400. struct interval_tree_node it;
  401. uint64_t offset;
  402. uint32_t flags;
  403. };
  404. /* bo virtual addresses in a specific vm */
  405. struct amdgpu_bo_va {
  406. /* protected by bo being reserved */
  407. struct list_head bo_list;
  408. struct fence *last_pt_update;
  409. unsigned ref_count;
  410. /* protected by vm mutex and spinlock */
  411. struct list_head vm_status;
  412. /* mappings for this bo_va */
  413. struct list_head invalids;
  414. struct list_head valids;
  415. /* constant after initialization */
  416. struct amdgpu_vm *vm;
  417. struct amdgpu_bo *bo;
  418. };
  419. #define AMDGPU_GEM_DOMAIN_MAX 0x3
  420. struct amdgpu_bo {
  421. /* Protected by gem.mutex */
  422. struct list_head list;
  423. /* Protected by tbo.reserved */
  424. u32 prefered_domains;
  425. u32 allowed_domains;
  426. struct ttm_place placements[AMDGPU_GEM_DOMAIN_MAX + 1];
  427. struct ttm_placement placement;
  428. struct ttm_buffer_object tbo;
  429. struct ttm_bo_kmap_obj kmap;
  430. u64 flags;
  431. unsigned pin_count;
  432. void *kptr;
  433. u64 tiling_flags;
  434. u64 metadata_flags;
  435. void *metadata;
  436. u32 metadata_size;
  437. /* list of all virtual address to which this bo
  438. * is associated to
  439. */
  440. struct list_head va;
  441. /* Constant after initialization */
  442. struct amdgpu_device *adev;
  443. struct drm_gem_object gem_base;
  444. struct amdgpu_bo *parent;
  445. struct ttm_bo_kmap_obj dma_buf_vmap;
  446. struct amdgpu_mn *mn;
  447. struct list_head mn_list;
  448. };
  449. #define gem_to_amdgpu_bo(gobj) container_of((gobj), struct amdgpu_bo, gem_base)
  450. void amdgpu_gem_object_free(struct drm_gem_object *obj);
  451. int amdgpu_gem_object_open(struct drm_gem_object *obj,
  452. struct drm_file *file_priv);
  453. void amdgpu_gem_object_close(struct drm_gem_object *obj,
  454. struct drm_file *file_priv);
  455. unsigned long amdgpu_gem_timeout(uint64_t timeout_ns);
  456. struct sg_table *amdgpu_gem_prime_get_sg_table(struct drm_gem_object *obj);
  457. struct drm_gem_object *
  458. amdgpu_gem_prime_import_sg_table(struct drm_device *dev,
  459. struct dma_buf_attachment *attach,
  460. struct sg_table *sg);
  461. struct dma_buf *amdgpu_gem_prime_export(struct drm_device *dev,
  462. struct drm_gem_object *gobj,
  463. int flags);
  464. int amdgpu_gem_prime_pin(struct drm_gem_object *obj);
  465. void amdgpu_gem_prime_unpin(struct drm_gem_object *obj);
  466. struct reservation_object *amdgpu_gem_prime_res_obj(struct drm_gem_object *);
  467. void *amdgpu_gem_prime_vmap(struct drm_gem_object *obj);
  468. void amdgpu_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr);
  469. int amdgpu_gem_debugfs_init(struct amdgpu_device *adev);
  470. /* sub-allocation manager, it has to be protected by another lock.
  471. * By conception this is an helper for other part of the driver
  472. * like the indirect buffer or semaphore, which both have their
  473. * locking.
  474. *
  475. * Principe is simple, we keep a list of sub allocation in offset
  476. * order (first entry has offset == 0, last entry has the highest
  477. * offset).
  478. *
  479. * When allocating new object we first check if there is room at
  480. * the end total_size - (last_object_offset + last_object_size) >=
  481. * alloc_size. If so we allocate new object there.
  482. *
  483. * When there is not enough room at the end, we start waiting for
  484. * each sub object until we reach object_offset+object_size >=
  485. * alloc_size, this object then become the sub object we return.
  486. *
  487. * Alignment can't be bigger than page size.
  488. *
  489. * Hole are not considered for allocation to keep things simple.
  490. * Assumption is that there won't be hole (all object on same
  491. * alignment).
  492. */
  493. #define AMDGPU_SA_NUM_FENCE_LISTS 32
  494. struct amdgpu_sa_manager {
  495. wait_queue_head_t wq;
  496. struct amdgpu_bo *bo;
  497. struct list_head *hole;
  498. struct list_head flist[AMDGPU_SA_NUM_FENCE_LISTS];
  499. struct list_head olist;
  500. unsigned size;
  501. uint64_t gpu_addr;
  502. void *cpu_ptr;
  503. uint32_t domain;
  504. uint32_t align;
  505. };
  506. /* sub-allocation buffer */
  507. struct amdgpu_sa_bo {
  508. struct list_head olist;
  509. struct list_head flist;
  510. struct amdgpu_sa_manager *manager;
  511. unsigned soffset;
  512. unsigned eoffset;
  513. struct fence *fence;
  514. };
  515. /*
  516. * GEM objects.
  517. */
  518. void amdgpu_gem_force_release(struct amdgpu_device *adev);
  519. int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size,
  520. int alignment, u32 initial_domain,
  521. u64 flags, bool kernel,
  522. struct drm_gem_object **obj);
  523. int amdgpu_mode_dumb_create(struct drm_file *file_priv,
  524. struct drm_device *dev,
  525. struct drm_mode_create_dumb *args);
  526. int amdgpu_mode_dumb_mmap(struct drm_file *filp,
  527. struct drm_device *dev,
  528. uint32_t handle, uint64_t *offset_p);
  529. /*
  530. * Synchronization
  531. */
  532. struct amdgpu_sync {
  533. DECLARE_HASHTABLE(fences, 4);
  534. struct fence *last_vm_update;
  535. };
  536. void amdgpu_sync_create(struct amdgpu_sync *sync);
  537. int amdgpu_sync_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync,
  538. struct fence *f);
  539. int amdgpu_sync_resv(struct amdgpu_device *adev,
  540. struct amdgpu_sync *sync,
  541. struct reservation_object *resv,
  542. void *owner);
  543. struct fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync,
  544. struct amdgpu_ring *ring);
  545. struct fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync);
  546. void amdgpu_sync_free(struct amdgpu_sync *sync);
  547. int amdgpu_sync_init(void);
  548. void amdgpu_sync_fini(void);
  549. int amdgpu_fence_slab_init(void);
  550. void amdgpu_fence_slab_fini(void);
  551. /*
  552. * GART structures, functions & helpers
  553. */
  554. struct amdgpu_mc;
  555. #define AMDGPU_GPU_PAGE_SIZE 4096
  556. #define AMDGPU_GPU_PAGE_MASK (AMDGPU_GPU_PAGE_SIZE - 1)
  557. #define AMDGPU_GPU_PAGE_SHIFT 12
  558. #define AMDGPU_GPU_PAGE_ALIGN(a) (((a) + AMDGPU_GPU_PAGE_MASK) & ~AMDGPU_GPU_PAGE_MASK)
  559. struct amdgpu_gart {
  560. dma_addr_t table_addr;
  561. struct amdgpu_bo *robj;
  562. void *ptr;
  563. unsigned num_gpu_pages;
  564. unsigned num_cpu_pages;
  565. unsigned table_size;
  566. #ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
  567. struct page **pages;
  568. #endif
  569. bool ready;
  570. const struct amdgpu_gart_funcs *gart_funcs;
  571. };
  572. int amdgpu_gart_table_ram_alloc(struct amdgpu_device *adev);
  573. void amdgpu_gart_table_ram_free(struct amdgpu_device *adev);
  574. int amdgpu_gart_table_vram_alloc(struct amdgpu_device *adev);
  575. void amdgpu_gart_table_vram_free(struct amdgpu_device *adev);
  576. int amdgpu_gart_table_vram_pin(struct amdgpu_device *adev);
  577. void amdgpu_gart_table_vram_unpin(struct amdgpu_device *adev);
  578. int amdgpu_gart_init(struct amdgpu_device *adev);
  579. void amdgpu_gart_fini(struct amdgpu_device *adev);
  580. void amdgpu_gart_unbind(struct amdgpu_device *adev, unsigned offset,
  581. int pages);
  582. int amdgpu_gart_bind(struct amdgpu_device *adev, unsigned offset,
  583. int pages, struct page **pagelist,
  584. dma_addr_t *dma_addr, uint32_t flags);
  585. /*
  586. * GPU MC structures, functions & helpers
  587. */
  588. struct amdgpu_mc {
  589. resource_size_t aper_size;
  590. resource_size_t aper_base;
  591. resource_size_t agp_base;
  592. /* for some chips with <= 32MB we need to lie
  593. * about vram size near mc fb location */
  594. u64 mc_vram_size;
  595. u64 visible_vram_size;
  596. u64 gtt_size;
  597. u64 gtt_start;
  598. u64 gtt_end;
  599. u64 vram_start;
  600. u64 vram_end;
  601. unsigned vram_width;
  602. u64 real_vram_size;
  603. int vram_mtrr;
  604. u64 gtt_base_align;
  605. u64 mc_mask;
  606. const struct firmware *fw; /* MC firmware */
  607. uint32_t fw_version;
  608. struct amdgpu_irq_src vm_fault;
  609. uint32_t vram_type;
  610. uint32_t srbm_soft_reset;
  611. struct amdgpu_mode_mc_save save;
  612. };
  613. /*
  614. * GPU doorbell structures, functions & helpers
  615. */
  616. typedef enum _AMDGPU_DOORBELL_ASSIGNMENT
  617. {
  618. AMDGPU_DOORBELL_KIQ = 0x000,
  619. AMDGPU_DOORBELL_HIQ = 0x001,
  620. AMDGPU_DOORBELL_DIQ = 0x002,
  621. AMDGPU_DOORBELL_MEC_RING0 = 0x010,
  622. AMDGPU_DOORBELL_MEC_RING1 = 0x011,
  623. AMDGPU_DOORBELL_MEC_RING2 = 0x012,
  624. AMDGPU_DOORBELL_MEC_RING3 = 0x013,
  625. AMDGPU_DOORBELL_MEC_RING4 = 0x014,
  626. AMDGPU_DOORBELL_MEC_RING5 = 0x015,
  627. AMDGPU_DOORBELL_MEC_RING6 = 0x016,
  628. AMDGPU_DOORBELL_MEC_RING7 = 0x017,
  629. AMDGPU_DOORBELL_GFX_RING0 = 0x020,
  630. AMDGPU_DOORBELL_sDMA_ENGINE0 = 0x1E0,
  631. AMDGPU_DOORBELL_sDMA_ENGINE1 = 0x1E1,
  632. AMDGPU_DOORBELL_IH = 0x1E8,
  633. AMDGPU_DOORBELL_MAX_ASSIGNMENT = 0x3FF,
  634. AMDGPU_DOORBELL_INVALID = 0xFFFF
  635. } AMDGPU_DOORBELL_ASSIGNMENT;
  636. struct amdgpu_doorbell {
  637. /* doorbell mmio */
  638. resource_size_t base;
  639. resource_size_t size;
  640. u32 __iomem *ptr;
  641. u32 num_doorbells; /* Number of doorbells actually reserved for amdgpu. */
  642. };
  643. void amdgpu_doorbell_get_kfd_info(struct amdgpu_device *adev,
  644. phys_addr_t *aperture_base,
  645. size_t *aperture_size,
  646. size_t *start_offset);
  647. /*
  648. * IRQS.
  649. */
  650. struct amdgpu_flip_work {
  651. struct delayed_work flip_work;
  652. struct work_struct unpin_work;
  653. struct amdgpu_device *adev;
  654. int crtc_id;
  655. u32 target_vblank;
  656. uint64_t base;
  657. struct drm_pending_vblank_event *event;
  658. struct amdgpu_bo *old_rbo;
  659. struct fence *excl;
  660. unsigned shared_count;
  661. struct fence **shared;
  662. struct fence_cb cb;
  663. bool async;
  664. };
  665. /*
  666. * CP & rings.
  667. */
  668. struct amdgpu_ib {
  669. struct amdgpu_sa_bo *sa_bo;
  670. uint32_t length_dw;
  671. uint64_t gpu_addr;
  672. uint32_t *ptr;
  673. uint32_t flags;
  674. };
  675. enum amdgpu_ring_type {
  676. AMDGPU_RING_TYPE_GFX,
  677. AMDGPU_RING_TYPE_COMPUTE,
  678. AMDGPU_RING_TYPE_SDMA,
  679. AMDGPU_RING_TYPE_UVD,
  680. AMDGPU_RING_TYPE_VCE
  681. };
  682. extern const struct amd_sched_backend_ops amdgpu_sched_ops;
  683. int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs,
  684. struct amdgpu_job **job, struct amdgpu_vm *vm);
  685. int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, unsigned size,
  686. struct amdgpu_job **job);
  687. void amdgpu_job_free_resources(struct amdgpu_job *job);
  688. void amdgpu_job_free(struct amdgpu_job *job);
  689. int amdgpu_job_submit(struct amdgpu_job *job, struct amdgpu_ring *ring,
  690. struct amd_sched_entity *entity, void *owner,
  691. struct fence **f);
  692. struct amdgpu_ring {
  693. struct amdgpu_device *adev;
  694. const struct amdgpu_ring_funcs *funcs;
  695. struct amdgpu_fence_driver fence_drv;
  696. struct amd_gpu_scheduler sched;
  697. struct amdgpu_bo *ring_obj;
  698. volatile uint32_t *ring;
  699. unsigned rptr_offs;
  700. unsigned wptr;
  701. unsigned wptr_old;
  702. unsigned ring_size;
  703. unsigned max_dw;
  704. int count_dw;
  705. uint64_t gpu_addr;
  706. uint32_t align_mask;
  707. uint32_t ptr_mask;
  708. bool ready;
  709. u32 nop;
  710. u32 idx;
  711. u32 me;
  712. u32 pipe;
  713. u32 queue;
  714. struct amdgpu_bo *mqd_obj;
  715. u32 doorbell_index;
  716. bool use_doorbell;
  717. unsigned wptr_offs;
  718. unsigned fence_offs;
  719. uint64_t current_ctx;
  720. enum amdgpu_ring_type type;
  721. char name[16];
  722. unsigned cond_exe_offs;
  723. u64 cond_exe_gpu_addr;
  724. volatile u32 *cond_exe_cpu_addr;
  725. #if defined(CONFIG_DEBUG_FS)
  726. struct dentry *ent;
  727. #endif
  728. };
  729. /*
  730. * VM
  731. */
  732. /* maximum number of VMIDs */
  733. #define AMDGPU_NUM_VM 16
  734. /* number of entries in page table */
  735. #define AMDGPU_VM_PTE_COUNT (1 << amdgpu_vm_block_size)
  736. /* PTBs (Page Table Blocks) need to be aligned to 32K */
  737. #define AMDGPU_VM_PTB_ALIGN_SIZE 32768
  738. /* LOG2 number of continuous pages for the fragment field */
  739. #define AMDGPU_LOG2_PAGES_PER_FRAG 4
  740. #define AMDGPU_PTE_VALID (1 << 0)
  741. #define AMDGPU_PTE_SYSTEM (1 << 1)
  742. #define AMDGPU_PTE_SNOOPED (1 << 2)
  743. /* VI only */
  744. #define AMDGPU_PTE_EXECUTABLE (1 << 4)
  745. #define AMDGPU_PTE_READABLE (1 << 5)
  746. #define AMDGPU_PTE_WRITEABLE (1 << 6)
  747. #define AMDGPU_PTE_FRAG(x) ((x & 0x1f) << 7)
  748. /* How to programm VM fault handling */
  749. #define AMDGPU_VM_FAULT_STOP_NEVER 0
  750. #define AMDGPU_VM_FAULT_STOP_FIRST 1
  751. #define AMDGPU_VM_FAULT_STOP_ALWAYS 2
  752. struct amdgpu_vm_pt {
  753. struct amdgpu_bo_list_entry entry;
  754. uint64_t addr;
  755. };
  756. struct amdgpu_vm {
  757. /* tree of virtual addresses mapped */
  758. struct rb_root va;
  759. /* protecting invalidated */
  760. spinlock_t status_lock;
  761. /* BOs moved, but not yet updated in the PT */
  762. struct list_head invalidated;
  763. /* BOs cleared in the PT because of a move */
  764. struct list_head cleared;
  765. /* BO mappings freed, but not yet updated in the PT */
  766. struct list_head freed;
  767. /* contains the page directory */
  768. struct amdgpu_bo *page_directory;
  769. unsigned max_pde_used;
  770. struct fence *page_directory_fence;
  771. uint64_t last_eviction_counter;
  772. /* array of page tables, one for each page directory entry */
  773. struct amdgpu_vm_pt *page_tables;
  774. /* for id and flush management per ring */
  775. struct amdgpu_vm_id *ids[AMDGPU_MAX_RINGS];
  776. /* protecting freed */
  777. spinlock_t freed_lock;
  778. /* Scheduler entity for page table updates */
  779. struct amd_sched_entity entity;
  780. /* client id */
  781. u64 client_id;
  782. };
  783. struct amdgpu_vm_id {
  784. struct list_head list;
  785. struct fence *first;
  786. struct amdgpu_sync active;
  787. struct fence *last_flush;
  788. atomic64_t owner;
  789. uint64_t pd_gpu_addr;
  790. /* last flushed PD/PT update */
  791. struct fence *flushed_updates;
  792. uint32_t current_gpu_reset_count;
  793. uint32_t gds_base;
  794. uint32_t gds_size;
  795. uint32_t gws_base;
  796. uint32_t gws_size;
  797. uint32_t oa_base;
  798. uint32_t oa_size;
  799. };
  800. struct amdgpu_vm_manager {
  801. /* Handling of VMIDs */
  802. struct mutex lock;
  803. unsigned num_ids;
  804. struct list_head ids_lru;
  805. struct amdgpu_vm_id ids[AMDGPU_NUM_VM];
  806. /* Handling of VM fences */
  807. u64 fence_context;
  808. unsigned seqno[AMDGPU_MAX_RINGS];
  809. uint32_t max_pfn;
  810. /* vram base address for page table entry */
  811. u64 vram_base_offset;
  812. /* is vm enabled? */
  813. bool enabled;
  814. /* vm pte handling */
  815. const struct amdgpu_vm_pte_funcs *vm_pte_funcs;
  816. struct amdgpu_ring *vm_pte_rings[AMDGPU_MAX_RINGS];
  817. unsigned vm_pte_num_rings;
  818. atomic_t vm_pte_next_ring;
  819. /* client id counter */
  820. atomic64_t client_counter;
  821. };
  822. void amdgpu_vm_manager_init(struct amdgpu_device *adev);
  823. void amdgpu_vm_manager_fini(struct amdgpu_device *adev);
  824. int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm);
  825. void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm);
  826. void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm,
  827. struct list_head *validated,
  828. struct amdgpu_bo_list_entry *entry);
  829. void amdgpu_vm_get_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
  830. struct list_head *duplicates);
  831. void amdgpu_vm_move_pt_bos_in_lru(struct amdgpu_device *adev,
  832. struct amdgpu_vm *vm);
  833. int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
  834. struct amdgpu_sync *sync, struct fence *fence,
  835. struct amdgpu_job *job);
  836. int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job);
  837. void amdgpu_vm_reset_id(struct amdgpu_device *adev, unsigned vm_id);
  838. uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr);
  839. int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
  840. struct amdgpu_vm *vm);
  841. int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
  842. struct amdgpu_vm *vm);
  843. int amdgpu_vm_clear_invalids(struct amdgpu_device *adev, struct amdgpu_vm *vm,
  844. struct amdgpu_sync *sync);
  845. int amdgpu_vm_bo_update(struct amdgpu_device *adev,
  846. struct amdgpu_bo_va *bo_va,
  847. struct ttm_mem_reg *mem);
  848. void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
  849. struct amdgpu_bo *bo);
  850. struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm,
  851. struct amdgpu_bo *bo);
  852. struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev,
  853. struct amdgpu_vm *vm,
  854. struct amdgpu_bo *bo);
  855. int amdgpu_vm_bo_map(struct amdgpu_device *adev,
  856. struct amdgpu_bo_va *bo_va,
  857. uint64_t addr, uint64_t offset,
  858. uint64_t size, uint32_t flags);
  859. int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
  860. struct amdgpu_bo_va *bo_va,
  861. uint64_t addr);
  862. void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
  863. struct amdgpu_bo_va *bo_va);
  864. /*
  865. * context related structures
  866. */
  867. struct amdgpu_ctx_ring {
  868. uint64_t sequence;
  869. struct fence **fences;
  870. struct amd_sched_entity entity;
  871. };
  872. struct amdgpu_ctx {
  873. struct kref refcount;
  874. struct amdgpu_device *adev;
  875. unsigned reset_counter;
  876. spinlock_t ring_lock;
  877. struct fence **fences;
  878. struct amdgpu_ctx_ring rings[AMDGPU_MAX_RINGS];
  879. };
  880. struct amdgpu_ctx_mgr {
  881. struct amdgpu_device *adev;
  882. struct mutex lock;
  883. /* protected by lock */
  884. struct idr ctx_handles;
  885. };
  886. struct amdgpu_ctx *amdgpu_ctx_get(struct amdgpu_fpriv *fpriv, uint32_t id);
  887. int amdgpu_ctx_put(struct amdgpu_ctx *ctx);
  888. uint64_t amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, struct amdgpu_ring *ring,
  889. struct fence *fence);
  890. struct fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx,
  891. struct amdgpu_ring *ring, uint64_t seq);
  892. int amdgpu_ctx_ioctl(struct drm_device *dev, void *data,
  893. struct drm_file *filp);
  894. void amdgpu_ctx_mgr_init(struct amdgpu_ctx_mgr *mgr);
  895. void amdgpu_ctx_mgr_fini(struct amdgpu_ctx_mgr *mgr);
  896. /*
  897. * file private structure
  898. */
  899. struct amdgpu_fpriv {
  900. struct amdgpu_vm vm;
  901. struct mutex bo_list_lock;
  902. struct idr bo_list_handles;
  903. struct amdgpu_ctx_mgr ctx_mgr;
  904. };
  905. /*
  906. * residency list
  907. */
  908. struct amdgpu_bo_list {
  909. struct mutex lock;
  910. struct amdgpu_bo *gds_obj;
  911. struct amdgpu_bo *gws_obj;
  912. struct amdgpu_bo *oa_obj;
  913. unsigned first_userptr;
  914. unsigned num_entries;
  915. struct amdgpu_bo_list_entry *array;
  916. };
  917. struct amdgpu_bo_list *
  918. amdgpu_bo_list_get(struct amdgpu_fpriv *fpriv, int id);
  919. void amdgpu_bo_list_get_list(struct amdgpu_bo_list *list,
  920. struct list_head *validated);
  921. void amdgpu_bo_list_put(struct amdgpu_bo_list *list);
  922. void amdgpu_bo_list_free(struct amdgpu_bo_list *list);
  923. /*
  924. * GFX stuff
  925. */
  926. #include "clearstate_defs.h"
  927. struct amdgpu_rlc_funcs {
  928. void (*enter_safe_mode)(struct amdgpu_device *adev);
  929. void (*exit_safe_mode)(struct amdgpu_device *adev);
  930. };
  931. struct amdgpu_rlc {
  932. /* for power gating */
  933. struct amdgpu_bo *save_restore_obj;
  934. uint64_t save_restore_gpu_addr;
  935. volatile uint32_t *sr_ptr;
  936. const u32 *reg_list;
  937. u32 reg_list_size;
  938. /* for clear state */
  939. struct amdgpu_bo *clear_state_obj;
  940. uint64_t clear_state_gpu_addr;
  941. volatile uint32_t *cs_ptr;
  942. const struct cs_section_def *cs_data;
  943. u32 clear_state_size;
  944. /* for cp tables */
  945. struct amdgpu_bo *cp_table_obj;
  946. uint64_t cp_table_gpu_addr;
  947. volatile uint32_t *cp_table_ptr;
  948. u32 cp_table_size;
  949. /* safe mode for updating CG/PG state */
  950. bool in_safe_mode;
  951. const struct amdgpu_rlc_funcs *funcs;
  952. /* for firmware data */
  953. u32 save_and_restore_offset;
  954. u32 clear_state_descriptor_offset;
  955. u32 avail_scratch_ram_locations;
  956. u32 reg_restore_list_size;
  957. u32 reg_list_format_start;
  958. u32 reg_list_format_separate_start;
  959. u32 starting_offsets_start;
  960. u32 reg_list_format_size_bytes;
  961. u32 reg_list_size_bytes;
  962. u32 *register_list_format;
  963. u32 *register_restore;
  964. };
  965. struct amdgpu_mec {
  966. struct amdgpu_bo *hpd_eop_obj;
  967. u64 hpd_eop_gpu_addr;
  968. u32 num_pipe;
  969. u32 num_mec;
  970. u32 num_queue;
  971. };
  972. /*
  973. * GPU scratch registers structures, functions & helpers
  974. */
  975. struct amdgpu_scratch {
  976. unsigned num_reg;
  977. uint32_t reg_base;
  978. bool free[32];
  979. uint32_t reg[32];
  980. };
  981. /*
  982. * GFX configurations
  983. */
  984. struct amdgpu_gca_config {
  985. unsigned max_shader_engines;
  986. unsigned max_tile_pipes;
  987. unsigned max_cu_per_sh;
  988. unsigned max_sh_per_se;
  989. unsigned max_backends_per_se;
  990. unsigned max_texture_channel_caches;
  991. unsigned max_gprs;
  992. unsigned max_gs_threads;
  993. unsigned max_hw_contexts;
  994. unsigned sc_prim_fifo_size_frontend;
  995. unsigned sc_prim_fifo_size_backend;
  996. unsigned sc_hiz_tile_fifo_size;
  997. unsigned sc_earlyz_tile_fifo_size;
  998. unsigned num_tile_pipes;
  999. unsigned backend_enable_mask;
  1000. unsigned mem_max_burst_length_bytes;
  1001. unsigned mem_row_size_in_kb;
  1002. unsigned shader_engine_tile_size;
  1003. unsigned num_gpus;
  1004. unsigned multi_gpu_tile_size;
  1005. unsigned mc_arb_ramcfg;
  1006. unsigned gb_addr_config;
  1007. unsigned num_rbs;
  1008. uint32_t tile_mode_array[32];
  1009. uint32_t macrotile_mode_array[16];
  1010. };
  1011. struct amdgpu_cu_info {
  1012. uint32_t number; /* total active CU number */
  1013. uint32_t ao_cu_mask;
  1014. uint32_t bitmap[4][4];
  1015. };
  1016. struct amdgpu_gfx_funcs {
  1017. /* get the gpu clock counter */
  1018. uint64_t (*get_gpu_clock_counter)(struct amdgpu_device *adev);
  1019. void (*select_se_sh)(struct amdgpu_device *adev, u32 se_num, u32 sh_num, u32 instance);
  1020. };
  1021. struct amdgpu_gfx {
  1022. struct mutex gpu_clock_mutex;
  1023. struct amdgpu_gca_config config;
  1024. struct amdgpu_rlc rlc;
  1025. struct amdgpu_mec mec;
  1026. struct amdgpu_scratch scratch;
  1027. const struct firmware *me_fw; /* ME firmware */
  1028. uint32_t me_fw_version;
  1029. const struct firmware *pfp_fw; /* PFP firmware */
  1030. uint32_t pfp_fw_version;
  1031. const struct firmware *ce_fw; /* CE firmware */
  1032. uint32_t ce_fw_version;
  1033. const struct firmware *rlc_fw; /* RLC firmware */
  1034. uint32_t rlc_fw_version;
  1035. const struct firmware *mec_fw; /* MEC firmware */
  1036. uint32_t mec_fw_version;
  1037. const struct firmware *mec2_fw; /* MEC2 firmware */
  1038. uint32_t mec2_fw_version;
  1039. uint32_t me_feature_version;
  1040. uint32_t ce_feature_version;
  1041. uint32_t pfp_feature_version;
  1042. uint32_t rlc_feature_version;
  1043. uint32_t mec_feature_version;
  1044. uint32_t mec2_feature_version;
  1045. struct amdgpu_ring gfx_ring[AMDGPU_MAX_GFX_RINGS];
  1046. unsigned num_gfx_rings;
  1047. struct amdgpu_ring compute_ring[AMDGPU_MAX_COMPUTE_RINGS];
  1048. unsigned num_compute_rings;
  1049. struct amdgpu_irq_src eop_irq;
  1050. struct amdgpu_irq_src priv_reg_irq;
  1051. struct amdgpu_irq_src priv_inst_irq;
  1052. /* gfx status */
  1053. uint32_t gfx_current_status;
  1054. /* ce ram size*/
  1055. unsigned ce_ram_size;
  1056. struct amdgpu_cu_info cu_info;
  1057. const struct amdgpu_gfx_funcs *funcs;
  1058. /* reset mask */
  1059. uint32_t grbm_soft_reset;
  1060. uint32_t srbm_soft_reset;
  1061. };
  1062. int amdgpu_ib_get(struct amdgpu_device *adev, struct amdgpu_vm *vm,
  1063. unsigned size, struct amdgpu_ib *ib);
  1064. void amdgpu_ib_free(struct amdgpu_device *adev, struct amdgpu_ib *ib,
  1065. struct fence *f);
  1066. int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
  1067. struct amdgpu_ib *ib, struct fence *last_vm_update,
  1068. struct amdgpu_job *job, struct fence **f);
  1069. int amdgpu_ib_pool_init(struct amdgpu_device *adev);
  1070. void amdgpu_ib_pool_fini(struct amdgpu_device *adev);
  1071. int amdgpu_ib_ring_tests(struct amdgpu_device *adev);
  1072. int amdgpu_ring_alloc(struct amdgpu_ring *ring, unsigned ndw);
  1073. void amdgpu_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count);
  1074. void amdgpu_ring_generic_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib);
  1075. void amdgpu_ring_commit(struct amdgpu_ring *ring);
  1076. void amdgpu_ring_undo(struct amdgpu_ring *ring);
  1077. int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
  1078. unsigned ring_size, u32 nop, u32 align_mask,
  1079. struct amdgpu_irq_src *irq_src, unsigned irq_type,
  1080. enum amdgpu_ring_type ring_type);
  1081. void amdgpu_ring_fini(struct amdgpu_ring *ring);
  1082. /*
  1083. * CS.
  1084. */
  1085. struct amdgpu_cs_chunk {
  1086. uint32_t chunk_id;
  1087. uint32_t length_dw;
  1088. void *kdata;
  1089. };
  1090. struct amdgpu_cs_parser {
  1091. struct amdgpu_device *adev;
  1092. struct drm_file *filp;
  1093. struct amdgpu_ctx *ctx;
  1094. /* chunks */
  1095. unsigned nchunks;
  1096. struct amdgpu_cs_chunk *chunks;
  1097. /* scheduler job object */
  1098. struct amdgpu_job *job;
  1099. /* buffer objects */
  1100. struct ww_acquire_ctx ticket;
  1101. struct amdgpu_bo_list *bo_list;
  1102. struct amdgpu_bo_list_entry vm_pd;
  1103. struct list_head validated;
  1104. struct fence *fence;
  1105. uint64_t bytes_moved_threshold;
  1106. uint64_t bytes_moved;
  1107. /* user fence */
  1108. struct amdgpu_bo_list_entry uf_entry;
  1109. };
  1110. struct amdgpu_job {
  1111. struct amd_sched_job base;
  1112. struct amdgpu_device *adev;
  1113. struct amdgpu_vm *vm;
  1114. struct amdgpu_ring *ring;
  1115. struct amdgpu_sync sync;
  1116. struct amdgpu_ib *ibs;
  1117. struct fence *fence; /* the hw fence */
  1118. uint32_t num_ibs;
  1119. void *owner;
  1120. uint64_t ctx;
  1121. bool vm_needs_flush;
  1122. unsigned vm_id;
  1123. uint64_t vm_pd_addr;
  1124. uint32_t gds_base, gds_size;
  1125. uint32_t gws_base, gws_size;
  1126. uint32_t oa_base, oa_size;
  1127. /* user fence handling */
  1128. uint64_t uf_addr;
  1129. uint64_t uf_sequence;
  1130. };
  1131. #define to_amdgpu_job(sched_job) \
  1132. container_of((sched_job), struct amdgpu_job, base)
  1133. static inline u32 amdgpu_get_ib_value(struct amdgpu_cs_parser *p,
  1134. uint32_t ib_idx, int idx)
  1135. {
  1136. return p->job->ibs[ib_idx].ptr[idx];
  1137. }
  1138. static inline void amdgpu_set_ib_value(struct amdgpu_cs_parser *p,
  1139. uint32_t ib_idx, int idx,
  1140. uint32_t value)
  1141. {
  1142. p->job->ibs[ib_idx].ptr[idx] = value;
  1143. }
  1144. /*
  1145. * Writeback
  1146. */
  1147. #define AMDGPU_MAX_WB 1024 /* Reserve at most 1024 WB slots for amdgpu-owned rings. */
  1148. struct amdgpu_wb {
  1149. struct amdgpu_bo *wb_obj;
  1150. volatile uint32_t *wb;
  1151. uint64_t gpu_addr;
  1152. u32 num_wb; /* Number of wb slots actually reserved for amdgpu. */
  1153. unsigned long used[DIV_ROUND_UP(AMDGPU_MAX_WB, BITS_PER_LONG)];
  1154. };
  1155. int amdgpu_wb_get(struct amdgpu_device *adev, u32 *wb);
  1156. void amdgpu_wb_free(struct amdgpu_device *adev, u32 wb);
  1157. enum amdgpu_int_thermal_type {
  1158. THERMAL_TYPE_NONE,
  1159. THERMAL_TYPE_EXTERNAL,
  1160. THERMAL_TYPE_EXTERNAL_GPIO,
  1161. THERMAL_TYPE_RV6XX,
  1162. THERMAL_TYPE_RV770,
  1163. THERMAL_TYPE_ADT7473_WITH_INTERNAL,
  1164. THERMAL_TYPE_EVERGREEN,
  1165. THERMAL_TYPE_SUMO,
  1166. THERMAL_TYPE_NI,
  1167. THERMAL_TYPE_SI,
  1168. THERMAL_TYPE_EMC2103_WITH_INTERNAL,
  1169. THERMAL_TYPE_CI,
  1170. THERMAL_TYPE_KV,
  1171. };
  1172. enum amdgpu_dpm_auto_throttle_src {
  1173. AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL,
  1174. AMDGPU_DPM_AUTO_THROTTLE_SRC_EXTERNAL
  1175. };
  1176. enum amdgpu_dpm_event_src {
  1177. AMDGPU_DPM_EVENT_SRC_ANALOG = 0,
  1178. AMDGPU_DPM_EVENT_SRC_EXTERNAL = 1,
  1179. AMDGPU_DPM_EVENT_SRC_DIGITAL = 2,
  1180. AMDGPU_DPM_EVENT_SRC_ANALOG_OR_EXTERNAL = 3,
  1181. AMDGPU_DPM_EVENT_SRC_DIGIAL_OR_EXTERNAL = 4
  1182. };
  1183. #define AMDGPU_MAX_VCE_LEVELS 6
  1184. enum amdgpu_vce_level {
  1185. AMDGPU_VCE_LEVEL_AC_ALL = 0, /* AC, All cases */
  1186. AMDGPU_VCE_LEVEL_DC_EE = 1, /* DC, entropy encoding */
  1187. AMDGPU_VCE_LEVEL_DC_LL_LOW = 2, /* DC, low latency queue, res <= 720 */
  1188. AMDGPU_VCE_LEVEL_DC_LL_HIGH = 3, /* DC, low latency queue, 1080 >= res > 720 */
  1189. AMDGPU_VCE_LEVEL_DC_GP_LOW = 4, /* DC, general purpose queue, res <= 720 */
  1190. AMDGPU_VCE_LEVEL_DC_GP_HIGH = 5, /* DC, general purpose queue, 1080 >= res > 720 */
  1191. };
  1192. struct amdgpu_ps {
  1193. u32 caps; /* vbios flags */
  1194. u32 class; /* vbios flags */
  1195. u32 class2; /* vbios flags */
  1196. /* UVD clocks */
  1197. u32 vclk;
  1198. u32 dclk;
  1199. /* VCE clocks */
  1200. u32 evclk;
  1201. u32 ecclk;
  1202. bool vce_active;
  1203. enum amdgpu_vce_level vce_level;
  1204. /* asic priv */
  1205. void *ps_priv;
  1206. };
  1207. struct amdgpu_dpm_thermal {
  1208. /* thermal interrupt work */
  1209. struct work_struct work;
  1210. /* low temperature threshold */
  1211. int min_temp;
  1212. /* high temperature threshold */
  1213. int max_temp;
  1214. /* was last interrupt low to high or high to low */
  1215. bool high_to_low;
  1216. /* interrupt source */
  1217. struct amdgpu_irq_src irq;
  1218. };
  1219. enum amdgpu_clk_action
  1220. {
  1221. AMDGPU_SCLK_UP = 1,
  1222. AMDGPU_SCLK_DOWN
  1223. };
  1224. struct amdgpu_blacklist_clocks
  1225. {
  1226. u32 sclk;
  1227. u32 mclk;
  1228. enum amdgpu_clk_action action;
  1229. };
  1230. struct amdgpu_clock_and_voltage_limits {
  1231. u32 sclk;
  1232. u32 mclk;
  1233. u16 vddc;
  1234. u16 vddci;
  1235. };
  1236. struct amdgpu_clock_array {
  1237. u32 count;
  1238. u32 *values;
  1239. };
  1240. struct amdgpu_clock_voltage_dependency_entry {
  1241. u32 clk;
  1242. u16 v;
  1243. };
  1244. struct amdgpu_clock_voltage_dependency_table {
  1245. u32 count;
  1246. struct amdgpu_clock_voltage_dependency_entry *entries;
  1247. };
  1248. union amdgpu_cac_leakage_entry {
  1249. struct {
  1250. u16 vddc;
  1251. u32 leakage;
  1252. };
  1253. struct {
  1254. u16 vddc1;
  1255. u16 vddc2;
  1256. u16 vddc3;
  1257. };
  1258. };
  1259. struct amdgpu_cac_leakage_table {
  1260. u32 count;
  1261. union amdgpu_cac_leakage_entry *entries;
  1262. };
  1263. struct amdgpu_phase_shedding_limits_entry {
  1264. u16 voltage;
  1265. u32 sclk;
  1266. u32 mclk;
  1267. };
  1268. struct amdgpu_phase_shedding_limits_table {
  1269. u32 count;
  1270. struct amdgpu_phase_shedding_limits_entry *entries;
  1271. };
  1272. struct amdgpu_uvd_clock_voltage_dependency_entry {
  1273. u32 vclk;
  1274. u32 dclk;
  1275. u16 v;
  1276. };
  1277. struct amdgpu_uvd_clock_voltage_dependency_table {
  1278. u8 count;
  1279. struct amdgpu_uvd_clock_voltage_dependency_entry *entries;
  1280. };
  1281. struct amdgpu_vce_clock_voltage_dependency_entry {
  1282. u32 ecclk;
  1283. u32 evclk;
  1284. u16 v;
  1285. };
  1286. struct amdgpu_vce_clock_voltage_dependency_table {
  1287. u8 count;
  1288. struct amdgpu_vce_clock_voltage_dependency_entry *entries;
  1289. };
  1290. struct amdgpu_ppm_table {
  1291. u8 ppm_design;
  1292. u16 cpu_core_number;
  1293. u32 platform_tdp;
  1294. u32 small_ac_platform_tdp;
  1295. u32 platform_tdc;
  1296. u32 small_ac_platform_tdc;
  1297. u32 apu_tdp;
  1298. u32 dgpu_tdp;
  1299. u32 dgpu_ulv_power;
  1300. u32 tj_max;
  1301. };
  1302. struct amdgpu_cac_tdp_table {
  1303. u16 tdp;
  1304. u16 configurable_tdp;
  1305. u16 tdc;
  1306. u16 battery_power_limit;
  1307. u16 small_power_limit;
  1308. u16 low_cac_leakage;
  1309. u16 high_cac_leakage;
  1310. u16 maximum_power_delivery_limit;
  1311. };
  1312. struct amdgpu_dpm_dynamic_state {
  1313. struct amdgpu_clock_voltage_dependency_table vddc_dependency_on_sclk;
  1314. struct amdgpu_clock_voltage_dependency_table vddci_dependency_on_mclk;
  1315. struct amdgpu_clock_voltage_dependency_table vddc_dependency_on_mclk;
  1316. struct amdgpu_clock_voltage_dependency_table mvdd_dependency_on_mclk;
  1317. struct amdgpu_clock_voltage_dependency_table vddc_dependency_on_dispclk;
  1318. struct amdgpu_uvd_clock_voltage_dependency_table uvd_clock_voltage_dependency_table;
  1319. struct amdgpu_vce_clock_voltage_dependency_table vce_clock_voltage_dependency_table;
  1320. struct amdgpu_clock_voltage_dependency_table samu_clock_voltage_dependency_table;
  1321. struct amdgpu_clock_voltage_dependency_table acp_clock_voltage_dependency_table;
  1322. struct amdgpu_clock_voltage_dependency_table vddgfx_dependency_on_sclk;
  1323. struct amdgpu_clock_array valid_sclk_values;
  1324. struct amdgpu_clock_array valid_mclk_values;
  1325. struct amdgpu_clock_and_voltage_limits max_clock_voltage_on_dc;
  1326. struct amdgpu_clock_and_voltage_limits max_clock_voltage_on_ac;
  1327. u32 mclk_sclk_ratio;
  1328. u32 sclk_mclk_delta;
  1329. u16 vddc_vddci_delta;
  1330. u16 min_vddc_for_pcie_gen2;
  1331. struct amdgpu_cac_leakage_table cac_leakage_table;
  1332. struct amdgpu_phase_shedding_limits_table phase_shedding_limits_table;
  1333. struct amdgpu_ppm_table *ppm_table;
  1334. struct amdgpu_cac_tdp_table *cac_tdp_table;
  1335. };
  1336. struct amdgpu_dpm_fan {
  1337. u16 t_min;
  1338. u16 t_med;
  1339. u16 t_high;
  1340. u16 pwm_min;
  1341. u16 pwm_med;
  1342. u16 pwm_high;
  1343. u8 t_hyst;
  1344. u32 cycle_delay;
  1345. u16 t_max;
  1346. u8 control_mode;
  1347. u16 default_max_fan_pwm;
  1348. u16 default_fan_output_sensitivity;
  1349. u16 fan_output_sensitivity;
  1350. bool ucode_fan_control;
  1351. };
  1352. enum amdgpu_pcie_gen {
  1353. AMDGPU_PCIE_GEN1 = 0,
  1354. AMDGPU_PCIE_GEN2 = 1,
  1355. AMDGPU_PCIE_GEN3 = 2,
  1356. AMDGPU_PCIE_GEN_INVALID = 0xffff
  1357. };
  1358. enum amdgpu_dpm_forced_level {
  1359. AMDGPU_DPM_FORCED_LEVEL_AUTO = 0,
  1360. AMDGPU_DPM_FORCED_LEVEL_LOW = 1,
  1361. AMDGPU_DPM_FORCED_LEVEL_HIGH = 2,
  1362. AMDGPU_DPM_FORCED_LEVEL_MANUAL = 3,
  1363. };
  1364. struct amdgpu_vce_state {
  1365. /* vce clocks */
  1366. u32 evclk;
  1367. u32 ecclk;
  1368. /* gpu clocks */
  1369. u32 sclk;
  1370. u32 mclk;
  1371. u8 clk_idx;
  1372. u8 pstate;
  1373. };
  1374. struct amdgpu_dpm_funcs {
  1375. int (*get_temperature)(struct amdgpu_device *adev);
  1376. int (*pre_set_power_state)(struct amdgpu_device *adev);
  1377. int (*set_power_state)(struct amdgpu_device *adev);
  1378. void (*post_set_power_state)(struct amdgpu_device *adev);
  1379. void (*display_configuration_changed)(struct amdgpu_device *adev);
  1380. u32 (*get_sclk)(struct amdgpu_device *adev, bool low);
  1381. u32 (*get_mclk)(struct amdgpu_device *adev, bool low);
  1382. void (*print_power_state)(struct amdgpu_device *adev, struct amdgpu_ps *ps);
  1383. void (*debugfs_print_current_performance_level)(struct amdgpu_device *adev, struct seq_file *m);
  1384. int (*force_performance_level)(struct amdgpu_device *adev, enum amdgpu_dpm_forced_level level);
  1385. bool (*vblank_too_short)(struct amdgpu_device *adev);
  1386. void (*powergate_uvd)(struct amdgpu_device *adev, bool gate);
  1387. void (*powergate_vce)(struct amdgpu_device *adev, bool gate);
  1388. void (*enable_bapm)(struct amdgpu_device *adev, bool enable);
  1389. void (*set_fan_control_mode)(struct amdgpu_device *adev, u32 mode);
  1390. u32 (*get_fan_control_mode)(struct amdgpu_device *adev);
  1391. int (*set_fan_speed_percent)(struct amdgpu_device *adev, u32 speed);
  1392. int (*get_fan_speed_percent)(struct amdgpu_device *adev, u32 *speed);
  1393. int (*force_clock_level)(struct amdgpu_device *adev, enum pp_clock_type type, uint32_t mask);
  1394. int (*print_clock_levels)(struct amdgpu_device *adev, enum pp_clock_type type, char *buf);
  1395. int (*get_sclk_od)(struct amdgpu_device *adev);
  1396. int (*set_sclk_od)(struct amdgpu_device *adev, uint32_t value);
  1397. int (*get_mclk_od)(struct amdgpu_device *adev);
  1398. int (*set_mclk_od)(struct amdgpu_device *adev, uint32_t value);
  1399. };
  1400. struct amdgpu_dpm {
  1401. struct amdgpu_ps *ps;
  1402. /* number of valid power states */
  1403. int num_ps;
  1404. /* current power state that is active */
  1405. struct amdgpu_ps *current_ps;
  1406. /* requested power state */
  1407. struct amdgpu_ps *requested_ps;
  1408. /* boot up power state */
  1409. struct amdgpu_ps *boot_ps;
  1410. /* default uvd power state */
  1411. struct amdgpu_ps *uvd_ps;
  1412. /* vce requirements */
  1413. struct amdgpu_vce_state vce_states[AMDGPU_MAX_VCE_LEVELS];
  1414. enum amdgpu_vce_level vce_level;
  1415. enum amd_pm_state_type state;
  1416. enum amd_pm_state_type user_state;
  1417. u32 platform_caps;
  1418. u32 voltage_response_time;
  1419. u32 backbias_response_time;
  1420. void *priv;
  1421. u32 new_active_crtcs;
  1422. int new_active_crtc_count;
  1423. u32 current_active_crtcs;
  1424. int current_active_crtc_count;
  1425. struct amdgpu_dpm_dynamic_state dyn_state;
  1426. struct amdgpu_dpm_fan fan;
  1427. u32 tdp_limit;
  1428. u32 near_tdp_limit;
  1429. u32 near_tdp_limit_adjusted;
  1430. u32 sq_ramping_threshold;
  1431. u32 cac_leakage;
  1432. u16 tdp_od_limit;
  1433. u32 tdp_adjustment;
  1434. u16 load_line_slope;
  1435. bool power_control;
  1436. bool ac_power;
  1437. /* special states active */
  1438. bool thermal_active;
  1439. bool uvd_active;
  1440. bool vce_active;
  1441. /* thermal handling */
  1442. struct amdgpu_dpm_thermal thermal;
  1443. /* forced levels */
  1444. enum amdgpu_dpm_forced_level forced_level;
  1445. };
  1446. struct amdgpu_pm {
  1447. struct mutex mutex;
  1448. u32 current_sclk;
  1449. u32 current_mclk;
  1450. u32 default_sclk;
  1451. u32 default_mclk;
  1452. struct amdgpu_i2c_chan *i2c_bus;
  1453. /* internal thermal controller on rv6xx+ */
  1454. enum amdgpu_int_thermal_type int_thermal_type;
  1455. struct device *int_hwmon_dev;
  1456. /* fan control parameters */
  1457. bool no_fan;
  1458. u8 fan_pulses_per_revolution;
  1459. u8 fan_min_rpm;
  1460. u8 fan_max_rpm;
  1461. /* dpm */
  1462. bool dpm_enabled;
  1463. bool sysfs_initialized;
  1464. struct amdgpu_dpm dpm;
  1465. const struct firmware *fw; /* SMC firmware */
  1466. uint32_t fw_version;
  1467. const struct amdgpu_dpm_funcs *funcs;
  1468. uint32_t pcie_gen_mask;
  1469. uint32_t pcie_mlw_mask;
  1470. struct amd_pp_display_configuration pm_display_cfg;/* set by DAL */
  1471. };
  1472. void amdgpu_get_pcie_info(struct amdgpu_device *adev);
  1473. /*
  1474. * UVD
  1475. */
  1476. #define AMDGPU_DEFAULT_UVD_HANDLES 10
  1477. #define AMDGPU_MAX_UVD_HANDLES 40
  1478. #define AMDGPU_UVD_STACK_SIZE (200*1024)
  1479. #define AMDGPU_UVD_HEAP_SIZE (256*1024)
  1480. #define AMDGPU_UVD_SESSION_SIZE (50*1024)
  1481. #define AMDGPU_UVD_FIRMWARE_OFFSET 256
  1482. struct amdgpu_uvd {
  1483. struct amdgpu_bo *vcpu_bo;
  1484. void *cpu_addr;
  1485. uint64_t gpu_addr;
  1486. unsigned fw_version;
  1487. void *saved_bo;
  1488. unsigned max_handles;
  1489. atomic_t handles[AMDGPU_MAX_UVD_HANDLES];
  1490. struct drm_file *filp[AMDGPU_MAX_UVD_HANDLES];
  1491. struct delayed_work idle_work;
  1492. const struct firmware *fw; /* UVD firmware */
  1493. struct amdgpu_ring ring;
  1494. struct amdgpu_irq_src irq;
  1495. bool address_64_bit;
  1496. bool use_ctx_buf;
  1497. struct amd_sched_entity entity;
  1498. uint32_t srbm_soft_reset;
  1499. };
  1500. /*
  1501. * VCE
  1502. */
  1503. #define AMDGPU_MAX_VCE_HANDLES 16
  1504. #define AMDGPU_VCE_FIRMWARE_OFFSET 256
  1505. #define AMDGPU_VCE_HARVEST_VCE0 (1 << 0)
  1506. #define AMDGPU_VCE_HARVEST_VCE1 (1 << 1)
  1507. struct amdgpu_vce {
  1508. struct amdgpu_bo *vcpu_bo;
  1509. uint64_t gpu_addr;
  1510. unsigned fw_version;
  1511. unsigned fb_version;
  1512. atomic_t handles[AMDGPU_MAX_VCE_HANDLES];
  1513. struct drm_file *filp[AMDGPU_MAX_VCE_HANDLES];
  1514. uint32_t img_size[AMDGPU_MAX_VCE_HANDLES];
  1515. struct delayed_work idle_work;
  1516. struct mutex idle_mutex;
  1517. const struct firmware *fw; /* VCE firmware */
  1518. struct amdgpu_ring ring[AMDGPU_MAX_VCE_RINGS];
  1519. struct amdgpu_irq_src irq;
  1520. unsigned harvest_config;
  1521. struct amd_sched_entity entity;
  1522. uint32_t srbm_soft_reset;
  1523. };
  1524. /*
  1525. * SDMA
  1526. */
  1527. struct amdgpu_sdma_instance {
  1528. /* SDMA firmware */
  1529. const struct firmware *fw;
  1530. uint32_t fw_version;
  1531. uint32_t feature_version;
  1532. struct amdgpu_ring ring;
  1533. bool burst_nop;
  1534. };
  1535. struct amdgpu_sdma {
  1536. struct amdgpu_sdma_instance instance[AMDGPU_MAX_SDMA_INSTANCES];
  1537. struct amdgpu_irq_src trap_irq;
  1538. struct amdgpu_irq_src illegal_inst_irq;
  1539. int num_instances;
  1540. uint32_t srbm_soft_reset;
  1541. };
  1542. /*
  1543. * Firmware
  1544. */
  1545. struct amdgpu_firmware {
  1546. struct amdgpu_firmware_info ucode[AMDGPU_UCODE_ID_MAXIMUM];
  1547. bool smu_load;
  1548. struct amdgpu_bo *fw_buf;
  1549. unsigned int fw_size;
  1550. };
  1551. /*
  1552. * Benchmarking
  1553. */
  1554. void amdgpu_benchmark(struct amdgpu_device *adev, int test_number);
  1555. /*
  1556. * Testing
  1557. */
  1558. void amdgpu_test_moves(struct amdgpu_device *adev);
  1559. void amdgpu_test_ring_sync(struct amdgpu_device *adev,
  1560. struct amdgpu_ring *cpA,
  1561. struct amdgpu_ring *cpB);
  1562. void amdgpu_test_syncing(struct amdgpu_device *adev);
  1563. /*
  1564. * MMU Notifier
  1565. */
  1566. #if defined(CONFIG_MMU_NOTIFIER)
  1567. int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr);
  1568. void amdgpu_mn_unregister(struct amdgpu_bo *bo);
  1569. #else
  1570. static inline int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr)
  1571. {
  1572. return -ENODEV;
  1573. }
  1574. static inline void amdgpu_mn_unregister(struct amdgpu_bo *bo) {}
  1575. #endif
  1576. /*
  1577. * Debugfs
  1578. */
  1579. struct amdgpu_debugfs {
  1580. const struct drm_info_list *files;
  1581. unsigned num_files;
  1582. };
  1583. int amdgpu_debugfs_add_files(struct amdgpu_device *adev,
  1584. const struct drm_info_list *files,
  1585. unsigned nfiles);
  1586. int amdgpu_debugfs_fence_init(struct amdgpu_device *adev);
  1587. #if defined(CONFIG_DEBUG_FS)
  1588. int amdgpu_debugfs_init(struct drm_minor *minor);
  1589. void amdgpu_debugfs_cleanup(struct drm_minor *minor);
  1590. #endif
  1591. int amdgpu_debugfs_firmware_init(struct amdgpu_device *adev);
  1592. /*
  1593. * amdgpu smumgr functions
  1594. */
  1595. struct amdgpu_smumgr_funcs {
  1596. int (*check_fw_load_finish)(struct amdgpu_device *adev, uint32_t fwtype);
  1597. int (*request_smu_load_fw)(struct amdgpu_device *adev);
  1598. int (*request_smu_specific_fw)(struct amdgpu_device *adev, uint32_t fwtype);
  1599. };
  1600. /*
  1601. * amdgpu smumgr
  1602. */
  1603. struct amdgpu_smumgr {
  1604. struct amdgpu_bo *toc_buf;
  1605. struct amdgpu_bo *smu_buf;
  1606. /* asic priv smu data */
  1607. void *priv;
  1608. spinlock_t smu_lock;
  1609. /* smumgr functions */
  1610. const struct amdgpu_smumgr_funcs *smumgr_funcs;
  1611. /* ucode loading complete flag */
  1612. uint32_t fw_flags;
  1613. };
  1614. /*
  1615. * ASIC specific register table accessible by UMD
  1616. */
  1617. struct amdgpu_allowed_register_entry {
  1618. uint32_t reg_offset;
  1619. bool untouched;
  1620. bool grbm_indexed;
  1621. };
  1622. /*
  1623. * ASIC specific functions.
  1624. */
  1625. struct amdgpu_asic_funcs {
  1626. bool (*read_disabled_bios)(struct amdgpu_device *adev);
  1627. bool (*read_bios_from_rom)(struct amdgpu_device *adev,
  1628. u8 *bios, u32 length_bytes);
  1629. int (*read_register)(struct amdgpu_device *adev, u32 se_num,
  1630. u32 sh_num, u32 reg_offset, u32 *value);
  1631. void (*set_vga_state)(struct amdgpu_device *adev, bool state);
  1632. int (*reset)(struct amdgpu_device *adev);
  1633. /* get the reference clock */
  1634. u32 (*get_xclk)(struct amdgpu_device *adev);
  1635. /* MM block clocks */
  1636. int (*set_uvd_clocks)(struct amdgpu_device *adev, u32 vclk, u32 dclk);
  1637. int (*set_vce_clocks)(struct amdgpu_device *adev, u32 evclk, u32 ecclk);
  1638. /* query virtual capabilities */
  1639. u32 (*get_virtual_caps)(struct amdgpu_device *adev);
  1640. };
  1641. /*
  1642. * IOCTL.
  1643. */
  1644. int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data,
  1645. struct drm_file *filp);
  1646. int amdgpu_bo_list_ioctl(struct drm_device *dev, void *data,
  1647. struct drm_file *filp);
  1648. int amdgpu_gem_info_ioctl(struct drm_device *dev, void *data,
  1649. struct drm_file *filp);
  1650. int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data,
  1651. struct drm_file *filp);
  1652. int amdgpu_gem_mmap_ioctl(struct drm_device *dev, void *data,
  1653. struct drm_file *filp);
  1654. int amdgpu_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
  1655. struct drm_file *filp);
  1656. int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
  1657. struct drm_file *filp);
  1658. int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data,
  1659. struct drm_file *filp);
  1660. int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp);
  1661. int amdgpu_cs_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *filp);
  1662. int amdgpu_gem_metadata_ioctl(struct drm_device *dev, void *data,
  1663. struct drm_file *filp);
  1664. /* VRAM scratch page for HDP bug, default vram page */
  1665. struct amdgpu_vram_scratch {
  1666. struct amdgpu_bo *robj;
  1667. volatile uint32_t *ptr;
  1668. u64 gpu_addr;
  1669. };
  1670. /*
  1671. * ACPI
  1672. */
  1673. struct amdgpu_atif_notification_cfg {
  1674. bool enabled;
  1675. int command_code;
  1676. };
  1677. struct amdgpu_atif_notifications {
  1678. bool display_switch;
  1679. bool expansion_mode_change;
  1680. bool thermal_state;
  1681. bool forced_power_state;
  1682. bool system_power_state;
  1683. bool display_conf_change;
  1684. bool px_gfx_switch;
  1685. bool brightness_change;
  1686. bool dgpu_display_event;
  1687. };
  1688. struct amdgpu_atif_functions {
  1689. bool system_params;
  1690. bool sbios_requests;
  1691. bool select_active_disp;
  1692. bool lid_state;
  1693. bool get_tv_standard;
  1694. bool set_tv_standard;
  1695. bool get_panel_expansion_mode;
  1696. bool set_panel_expansion_mode;
  1697. bool temperature_change;
  1698. bool graphics_device_types;
  1699. };
  1700. struct amdgpu_atif {
  1701. struct amdgpu_atif_notifications notifications;
  1702. struct amdgpu_atif_functions functions;
  1703. struct amdgpu_atif_notification_cfg notification_cfg;
  1704. struct amdgpu_encoder *encoder_for_bl;
  1705. };
  1706. struct amdgpu_atcs_functions {
  1707. bool get_ext_state;
  1708. bool pcie_perf_req;
  1709. bool pcie_dev_rdy;
  1710. bool pcie_bus_width;
  1711. };
  1712. struct amdgpu_atcs {
  1713. struct amdgpu_atcs_functions functions;
  1714. };
  1715. /*
  1716. * CGS
  1717. */
  1718. struct cgs_device *amdgpu_cgs_create_device(struct amdgpu_device *adev);
  1719. void amdgpu_cgs_destroy_device(struct cgs_device *cgs_device);
  1720. /* GPU virtualization */
  1721. #define AMDGPU_VIRT_CAPS_SRIOV_EN (1 << 0)
  1722. #define AMDGPU_VIRT_CAPS_IS_VF (1 << 1)
  1723. struct amdgpu_virtualization {
  1724. bool supports_sr_iov;
  1725. bool is_virtual;
  1726. u32 caps;
  1727. };
  1728. /*
  1729. * Core structure, functions and helpers.
  1730. */
  1731. typedef uint32_t (*amdgpu_rreg_t)(struct amdgpu_device*, uint32_t);
  1732. typedef void (*amdgpu_wreg_t)(struct amdgpu_device*, uint32_t, uint32_t);
  1733. typedef uint32_t (*amdgpu_block_rreg_t)(struct amdgpu_device*, uint32_t, uint32_t);
  1734. typedef void (*amdgpu_block_wreg_t)(struct amdgpu_device*, uint32_t, uint32_t, uint32_t);
  1735. struct amdgpu_ip_block_status {
  1736. bool valid;
  1737. bool sw;
  1738. bool hw;
  1739. bool hang;
  1740. };
  1741. struct amdgpu_device {
  1742. struct device *dev;
  1743. struct drm_device *ddev;
  1744. struct pci_dev *pdev;
  1745. #ifdef CONFIG_DRM_AMD_ACP
  1746. struct amdgpu_acp acp;
  1747. #endif
  1748. /* ASIC */
  1749. enum amd_asic_type asic_type;
  1750. uint32_t family;
  1751. uint32_t rev_id;
  1752. uint32_t external_rev_id;
  1753. unsigned long flags;
  1754. int usec_timeout;
  1755. const struct amdgpu_asic_funcs *asic_funcs;
  1756. bool shutdown;
  1757. bool need_dma32;
  1758. bool accel_working;
  1759. struct work_struct reset_work;
  1760. struct notifier_block acpi_nb;
  1761. struct amdgpu_i2c_chan *i2c_bus[AMDGPU_MAX_I2C_BUS];
  1762. struct amdgpu_debugfs debugfs[AMDGPU_DEBUGFS_MAX_COMPONENTS];
  1763. unsigned debugfs_count;
  1764. #if defined(CONFIG_DEBUG_FS)
  1765. struct dentry *debugfs_regs[AMDGPU_DEBUGFS_MAX_COMPONENTS];
  1766. #endif
  1767. struct amdgpu_atif atif;
  1768. struct amdgpu_atcs atcs;
  1769. struct mutex srbm_mutex;
  1770. /* GRBM index mutex. Protects concurrent access to GRBM index */
  1771. struct mutex grbm_idx_mutex;
  1772. struct dev_pm_domain vga_pm_domain;
  1773. bool have_disp_power_ref;
  1774. /* BIOS */
  1775. uint8_t *bios;
  1776. bool is_atom_bios;
  1777. struct amdgpu_bo *stollen_vga_memory;
  1778. uint32_t bios_scratch[AMDGPU_BIOS_NUM_SCRATCH];
  1779. /* Register/doorbell mmio */
  1780. resource_size_t rmmio_base;
  1781. resource_size_t rmmio_size;
  1782. void __iomem *rmmio;
  1783. /* protects concurrent MM_INDEX/DATA based register access */
  1784. spinlock_t mmio_idx_lock;
  1785. /* protects concurrent SMC based register access */
  1786. spinlock_t smc_idx_lock;
  1787. amdgpu_rreg_t smc_rreg;
  1788. amdgpu_wreg_t smc_wreg;
  1789. /* protects concurrent PCIE register access */
  1790. spinlock_t pcie_idx_lock;
  1791. amdgpu_rreg_t pcie_rreg;
  1792. amdgpu_wreg_t pcie_wreg;
  1793. /* protects concurrent UVD register access */
  1794. spinlock_t uvd_ctx_idx_lock;
  1795. amdgpu_rreg_t uvd_ctx_rreg;
  1796. amdgpu_wreg_t uvd_ctx_wreg;
  1797. /* protects concurrent DIDT register access */
  1798. spinlock_t didt_idx_lock;
  1799. amdgpu_rreg_t didt_rreg;
  1800. amdgpu_wreg_t didt_wreg;
  1801. /* protects concurrent gc_cac register access */
  1802. spinlock_t gc_cac_idx_lock;
  1803. amdgpu_rreg_t gc_cac_rreg;
  1804. amdgpu_wreg_t gc_cac_wreg;
  1805. /* protects concurrent ENDPOINT (audio) register access */
  1806. spinlock_t audio_endpt_idx_lock;
  1807. amdgpu_block_rreg_t audio_endpt_rreg;
  1808. amdgpu_block_wreg_t audio_endpt_wreg;
  1809. void __iomem *rio_mem;
  1810. resource_size_t rio_mem_size;
  1811. struct amdgpu_doorbell doorbell;
  1812. /* clock/pll info */
  1813. struct amdgpu_clock clock;
  1814. /* MC */
  1815. struct amdgpu_mc mc;
  1816. struct amdgpu_gart gart;
  1817. struct amdgpu_dummy_page dummy_page;
  1818. struct amdgpu_vm_manager vm_manager;
  1819. /* memory management */
  1820. struct amdgpu_mman mman;
  1821. struct amdgpu_vram_scratch vram_scratch;
  1822. struct amdgpu_wb wb;
  1823. atomic64_t vram_usage;
  1824. atomic64_t vram_vis_usage;
  1825. atomic64_t gtt_usage;
  1826. atomic64_t num_bytes_moved;
  1827. atomic64_t num_evictions;
  1828. atomic_t gpu_reset_counter;
  1829. /* display */
  1830. bool enable_virtual_display;
  1831. struct amdgpu_mode_info mode_info;
  1832. struct work_struct hotplug_work;
  1833. struct amdgpu_irq_src crtc_irq;
  1834. struct amdgpu_irq_src pageflip_irq;
  1835. struct amdgpu_irq_src hpd_irq;
  1836. /* rings */
  1837. u64 fence_context;
  1838. unsigned num_rings;
  1839. struct amdgpu_ring *rings[AMDGPU_MAX_RINGS];
  1840. bool ib_pool_ready;
  1841. struct amdgpu_sa_manager ring_tmp_bo;
  1842. /* interrupts */
  1843. struct amdgpu_irq irq;
  1844. /* powerplay */
  1845. struct amd_powerplay powerplay;
  1846. bool pp_enabled;
  1847. bool pp_force_state_enabled;
  1848. /* dpm */
  1849. struct amdgpu_pm pm;
  1850. u32 cg_flags;
  1851. u32 pg_flags;
  1852. /* amdgpu smumgr */
  1853. struct amdgpu_smumgr smu;
  1854. /* gfx */
  1855. struct amdgpu_gfx gfx;
  1856. /* sdma */
  1857. struct amdgpu_sdma sdma;
  1858. /* uvd */
  1859. struct amdgpu_uvd uvd;
  1860. /* vce */
  1861. struct amdgpu_vce vce;
  1862. /* firmwares */
  1863. struct amdgpu_firmware firmware;
  1864. /* GDS */
  1865. struct amdgpu_gds gds;
  1866. const struct amdgpu_ip_block_version *ip_blocks;
  1867. int num_ip_blocks;
  1868. struct amdgpu_ip_block_status *ip_block_status;
  1869. struct mutex mn_lock;
  1870. DECLARE_HASHTABLE(mn_hash, 7);
  1871. /* tracking pinned memory */
  1872. u64 vram_pin_size;
  1873. u64 invisible_pin_size;
  1874. u64 gart_pin_size;
  1875. /* amdkfd interface */
  1876. struct kfd_dev *kfd;
  1877. struct amdgpu_virtualization virtualization;
  1878. };
  1879. bool amdgpu_device_is_px(struct drm_device *dev);
  1880. int amdgpu_device_init(struct amdgpu_device *adev,
  1881. struct drm_device *ddev,
  1882. struct pci_dev *pdev,
  1883. uint32_t flags);
  1884. void amdgpu_device_fini(struct amdgpu_device *adev);
  1885. int amdgpu_gpu_wait_for_idle(struct amdgpu_device *adev);
  1886. uint32_t amdgpu_mm_rreg(struct amdgpu_device *adev, uint32_t reg,
  1887. bool always_indirect);
  1888. void amdgpu_mm_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v,
  1889. bool always_indirect);
  1890. u32 amdgpu_io_rreg(struct amdgpu_device *adev, u32 reg);
  1891. void amdgpu_io_wreg(struct amdgpu_device *adev, u32 reg, u32 v);
  1892. u32 amdgpu_mm_rdoorbell(struct amdgpu_device *adev, u32 index);
  1893. void amdgpu_mm_wdoorbell(struct amdgpu_device *adev, u32 index, u32 v);
  1894. /*
  1895. * Registers read & write functions.
  1896. */
  1897. #define RREG32(reg) amdgpu_mm_rreg(adev, (reg), false)
  1898. #define RREG32_IDX(reg) amdgpu_mm_rreg(adev, (reg), true)
  1899. #define DREG32(reg) printk(KERN_INFO "REGISTER: " #reg " : 0x%08X\n", amdgpu_mm_rreg(adev, (reg), false))
  1900. #define WREG32(reg, v) amdgpu_mm_wreg(adev, (reg), (v), false)
  1901. #define WREG32_IDX(reg, v) amdgpu_mm_wreg(adev, (reg), (v), true)
  1902. #define REG_SET(FIELD, v) (((v) << FIELD##_SHIFT) & FIELD##_MASK)
  1903. #define REG_GET(FIELD, v) (((v) << FIELD##_SHIFT) & FIELD##_MASK)
  1904. #define RREG32_PCIE(reg) adev->pcie_rreg(adev, (reg))
  1905. #define WREG32_PCIE(reg, v) adev->pcie_wreg(adev, (reg), (v))
  1906. #define RREG32_SMC(reg) adev->smc_rreg(adev, (reg))
  1907. #define WREG32_SMC(reg, v) adev->smc_wreg(adev, (reg), (v))
  1908. #define RREG32_UVD_CTX(reg) adev->uvd_ctx_rreg(adev, (reg))
  1909. #define WREG32_UVD_CTX(reg, v) adev->uvd_ctx_wreg(adev, (reg), (v))
  1910. #define RREG32_DIDT(reg) adev->didt_rreg(adev, (reg))
  1911. #define WREG32_DIDT(reg, v) adev->didt_wreg(adev, (reg), (v))
  1912. #define RREG32_GC_CAC(reg) adev->gc_cac_rreg(adev, (reg))
  1913. #define WREG32_GC_CAC(reg, v) adev->gc_cac_wreg(adev, (reg), (v))
  1914. #define RREG32_AUDIO_ENDPT(block, reg) adev->audio_endpt_rreg(adev, (block), (reg))
  1915. #define WREG32_AUDIO_ENDPT(block, reg, v) adev->audio_endpt_wreg(adev, (block), (reg), (v))
  1916. #define WREG32_P(reg, val, mask) \
  1917. do { \
  1918. uint32_t tmp_ = RREG32(reg); \
  1919. tmp_ &= (mask); \
  1920. tmp_ |= ((val) & ~(mask)); \
  1921. WREG32(reg, tmp_); \
  1922. } while (0)
  1923. #define WREG32_AND(reg, and) WREG32_P(reg, 0, and)
  1924. #define WREG32_OR(reg, or) WREG32_P(reg, or, ~(or))
  1925. #define WREG32_PLL_P(reg, val, mask) \
  1926. do { \
  1927. uint32_t tmp_ = RREG32_PLL(reg); \
  1928. tmp_ &= (mask); \
  1929. tmp_ |= ((val) & ~(mask)); \
  1930. WREG32_PLL(reg, tmp_); \
  1931. } while (0)
  1932. #define DREG32_SYS(sqf, adev, reg) seq_printf((sqf), #reg " : 0x%08X\n", amdgpu_mm_rreg((adev), (reg), false))
  1933. #define RREG32_IO(reg) amdgpu_io_rreg(adev, (reg))
  1934. #define WREG32_IO(reg, v) amdgpu_io_wreg(adev, (reg), (v))
  1935. #define RDOORBELL32(index) amdgpu_mm_rdoorbell(adev, (index))
  1936. #define WDOORBELL32(index, v) amdgpu_mm_wdoorbell(adev, (index), (v))
  1937. #define REG_FIELD_SHIFT(reg, field) reg##__##field##__SHIFT
  1938. #define REG_FIELD_MASK(reg, field) reg##__##field##_MASK
  1939. #define REG_SET_FIELD(orig_val, reg, field, field_val) \
  1940. (((orig_val) & ~REG_FIELD_MASK(reg, field)) | \
  1941. (REG_FIELD_MASK(reg, field) & ((field_val) << REG_FIELD_SHIFT(reg, field))))
  1942. #define REG_GET_FIELD(value, reg, field) \
  1943. (((value) & REG_FIELD_MASK(reg, field)) >> REG_FIELD_SHIFT(reg, field))
  1944. #define WREG32_FIELD(reg, field, val) \
  1945. WREG32(mm##reg, (RREG32(mm##reg) & ~REG_FIELD_MASK(reg, field)) | (val) << REG_FIELD_SHIFT(reg, field))
  1946. /*
  1947. * BIOS helpers.
  1948. */
  1949. #define RBIOS8(i) (adev->bios[i])
  1950. #define RBIOS16(i) (RBIOS8(i) | (RBIOS8((i)+1) << 8))
  1951. #define RBIOS32(i) ((RBIOS16(i)) | (RBIOS16((i)+2) << 16))
  1952. /*
  1953. * RING helpers.
  1954. */
  1955. static inline void amdgpu_ring_write(struct amdgpu_ring *ring, uint32_t v)
  1956. {
  1957. if (ring->count_dw <= 0)
  1958. DRM_ERROR("amdgpu: writing more dwords to the ring than expected!\n");
  1959. ring->ring[ring->wptr++] = v;
  1960. ring->wptr &= ring->ptr_mask;
  1961. ring->count_dw--;
  1962. }
  1963. static inline struct amdgpu_sdma_instance *
  1964. amdgpu_get_sdma_instance(struct amdgpu_ring *ring)
  1965. {
  1966. struct amdgpu_device *adev = ring->adev;
  1967. int i;
  1968. for (i = 0; i < adev->sdma.num_instances; i++)
  1969. if (&adev->sdma.instance[i].ring == ring)
  1970. break;
  1971. if (i < AMDGPU_MAX_SDMA_INSTANCES)
  1972. return &adev->sdma.instance[i];
  1973. else
  1974. return NULL;
  1975. }
  1976. /*
  1977. * ASICs macro.
  1978. */
  1979. #define amdgpu_asic_set_vga_state(adev, state) (adev)->asic_funcs->set_vga_state((adev), (state))
  1980. #define amdgpu_asic_reset(adev) (adev)->asic_funcs->reset((adev))
  1981. #define amdgpu_asic_get_xclk(adev) (adev)->asic_funcs->get_xclk((adev))
  1982. #define amdgpu_asic_set_uvd_clocks(adev, v, d) (adev)->asic_funcs->set_uvd_clocks((adev), (v), (d))
  1983. #define amdgpu_asic_set_vce_clocks(adev, ev, ec) (adev)->asic_funcs->set_vce_clocks((adev), (ev), (ec))
  1984. #define amdgpu_asic_get_virtual_caps(adev) ((adev)->asic_funcs->get_virtual_caps((adev)))
  1985. #define amdgpu_asic_read_disabled_bios(adev) (adev)->asic_funcs->read_disabled_bios((adev))
  1986. #define amdgpu_asic_read_bios_from_rom(adev, b, l) (adev)->asic_funcs->read_bios_from_rom((adev), (b), (l))
  1987. #define amdgpu_asic_read_register(adev, se, sh, offset, v)((adev)->asic_funcs->read_register((adev), (se), (sh), (offset), (v)))
  1988. #define amdgpu_gart_flush_gpu_tlb(adev, vmid) (adev)->gart.gart_funcs->flush_gpu_tlb((adev), (vmid))
  1989. #define amdgpu_gart_set_pte_pde(adev, pt, idx, addr, flags) (adev)->gart.gart_funcs->set_pte_pde((adev), (pt), (idx), (addr), (flags))
  1990. #define amdgpu_vm_copy_pte(adev, ib, pe, src, count) ((adev)->vm_manager.vm_pte_funcs->copy_pte((ib), (pe), (src), (count)))
  1991. #define amdgpu_vm_write_pte(adev, ib, pa, pe, addr, count, incr, flags) ((adev)->vm_manager.vm_pte_funcs->write_pte((ib), (pa), (pe), (addr), (count), (incr), (flags)))
  1992. #define amdgpu_vm_set_pte_pde(adev, ib, pe, addr, count, incr, flags) ((adev)->vm_manager.vm_pte_funcs->set_pte_pde((ib), (pe), (addr), (count), (incr), (flags)))
  1993. #define amdgpu_ring_parse_cs(r, p, ib) ((r)->funcs->parse_cs((p), (ib)))
  1994. #define amdgpu_ring_test_ring(r) (r)->funcs->test_ring((r))
  1995. #define amdgpu_ring_test_ib(r, t) (r)->funcs->test_ib((r), (t))
  1996. #define amdgpu_ring_get_rptr(r) (r)->funcs->get_rptr((r))
  1997. #define amdgpu_ring_get_wptr(r) (r)->funcs->get_wptr((r))
  1998. #define amdgpu_ring_set_wptr(r) (r)->funcs->set_wptr((r))
  1999. #define amdgpu_ring_emit_ib(r, ib, vm_id, c) (r)->funcs->emit_ib((r), (ib), (vm_id), (c))
  2000. #define amdgpu_ring_emit_pipeline_sync(r) (r)->funcs->emit_pipeline_sync((r))
  2001. #define amdgpu_ring_emit_vm_flush(r, vmid, addr) (r)->funcs->emit_vm_flush((r), (vmid), (addr))
  2002. #define amdgpu_ring_emit_fence(r, addr, seq, flags) (r)->funcs->emit_fence((r), (addr), (seq), (flags))
  2003. #define amdgpu_ring_emit_gds_switch(r, v, db, ds, wb, ws, ab, as) (r)->funcs->emit_gds_switch((r), (v), (db), (ds), (wb), (ws), (ab), (as))
  2004. #define amdgpu_ring_emit_hdp_flush(r) (r)->funcs->emit_hdp_flush((r))
  2005. #define amdgpu_ring_emit_hdp_invalidate(r) (r)->funcs->emit_hdp_invalidate((r))
  2006. #define amdgpu_ring_pad_ib(r, ib) ((r)->funcs->pad_ib((r), (ib)))
  2007. #define amdgpu_ring_init_cond_exec(r) (r)->funcs->init_cond_exec((r))
  2008. #define amdgpu_ring_patch_cond_exec(r,o) (r)->funcs->patch_cond_exec((r),(o))
  2009. #define amdgpu_ih_get_wptr(adev) (adev)->irq.ih_funcs->get_wptr((adev))
  2010. #define amdgpu_ih_decode_iv(adev, iv) (adev)->irq.ih_funcs->decode_iv((adev), (iv))
  2011. #define amdgpu_ih_set_rptr(adev) (adev)->irq.ih_funcs->set_rptr((adev))
  2012. #define amdgpu_display_set_vga_render_state(adev, r) (adev)->mode_info.funcs->set_vga_render_state((adev), (r))
  2013. #define amdgpu_display_vblank_get_counter(adev, crtc) (adev)->mode_info.funcs->vblank_get_counter((adev), (crtc))
  2014. #define amdgpu_display_vblank_wait(adev, crtc) (adev)->mode_info.funcs->vblank_wait((adev), (crtc))
  2015. #define amdgpu_display_is_display_hung(adev) (adev)->mode_info.funcs->is_display_hung((adev))
  2016. #define amdgpu_display_backlight_set_level(adev, e, l) (adev)->mode_info.funcs->backlight_set_level((e), (l))
  2017. #define amdgpu_display_backlight_get_level(adev, e) (adev)->mode_info.funcs->backlight_get_level((e))
  2018. #define amdgpu_display_hpd_sense(adev, h) (adev)->mode_info.funcs->hpd_sense((adev), (h))
  2019. #define amdgpu_display_hpd_set_polarity(adev, h) (adev)->mode_info.funcs->hpd_set_polarity((adev), (h))
  2020. #define amdgpu_display_hpd_get_gpio_reg(adev) (adev)->mode_info.funcs->hpd_get_gpio_reg((adev))
  2021. #define amdgpu_display_bandwidth_update(adev) (adev)->mode_info.funcs->bandwidth_update((adev))
  2022. #define amdgpu_display_page_flip(adev, crtc, base, async) (adev)->mode_info.funcs->page_flip((adev), (crtc), (base), (async))
  2023. #define amdgpu_display_page_flip_get_scanoutpos(adev, crtc, vbl, pos) (adev)->mode_info.funcs->page_flip_get_scanoutpos((adev), (crtc), (vbl), (pos))
  2024. #define amdgpu_display_add_encoder(adev, e, s, c) (adev)->mode_info.funcs->add_encoder((adev), (e), (s), (c))
  2025. #define amdgpu_display_add_connector(adev, ci, sd, ct, ib, coi, h, r) (adev)->mode_info.funcs->add_connector((adev), (ci), (sd), (ct), (ib), (coi), (h), (r))
  2026. #define amdgpu_display_stop_mc_access(adev, s) (adev)->mode_info.funcs->stop_mc_access((adev), (s))
  2027. #define amdgpu_display_resume_mc_access(adev, s) (adev)->mode_info.funcs->resume_mc_access((adev), (s))
  2028. #define amdgpu_emit_copy_buffer(adev, ib, s, d, b) (adev)->mman.buffer_funcs->emit_copy_buffer((ib), (s), (d), (b))
  2029. #define amdgpu_emit_fill_buffer(adev, ib, s, d, b) (adev)->mman.buffer_funcs->emit_fill_buffer((ib), (s), (d), (b))
  2030. #define amdgpu_dpm_pre_set_power_state(adev) (adev)->pm.funcs->pre_set_power_state((adev))
  2031. #define amdgpu_dpm_set_power_state(adev) (adev)->pm.funcs->set_power_state((adev))
  2032. #define amdgpu_dpm_post_set_power_state(adev) (adev)->pm.funcs->post_set_power_state((adev))
  2033. #define amdgpu_dpm_display_configuration_changed(adev) (adev)->pm.funcs->display_configuration_changed((adev))
  2034. #define amdgpu_dpm_print_power_state(adev, ps) (adev)->pm.funcs->print_power_state((adev), (ps))
  2035. #define amdgpu_dpm_vblank_too_short(adev) (adev)->pm.funcs->vblank_too_short((adev))
  2036. #define amdgpu_dpm_enable_bapm(adev, e) (adev)->pm.funcs->enable_bapm((adev), (e))
  2037. #define amdgpu_gfx_get_gpu_clock_counter(adev) (adev)->gfx.funcs->get_gpu_clock_counter((adev))
  2038. #define amdgpu_gfx_select_se_sh(adev, se, sh, instance) (adev)->gfx.funcs->select_se_sh((adev), (se), (sh), (instance))
  2039. #define amdgpu_dpm_get_temperature(adev) \
  2040. ((adev)->pp_enabled ? \
  2041. (adev)->powerplay.pp_funcs->get_temperature((adev)->powerplay.pp_handle) : \
  2042. (adev)->pm.funcs->get_temperature((adev)))
  2043. #define amdgpu_dpm_set_fan_control_mode(adev, m) \
  2044. ((adev)->pp_enabled ? \
  2045. (adev)->powerplay.pp_funcs->set_fan_control_mode((adev)->powerplay.pp_handle, (m)) : \
  2046. (adev)->pm.funcs->set_fan_control_mode((adev), (m)))
  2047. #define amdgpu_dpm_get_fan_control_mode(adev) \
  2048. ((adev)->pp_enabled ? \
  2049. (adev)->powerplay.pp_funcs->get_fan_control_mode((adev)->powerplay.pp_handle) : \
  2050. (adev)->pm.funcs->get_fan_control_mode((adev)))
  2051. #define amdgpu_dpm_set_fan_speed_percent(adev, s) \
  2052. ((adev)->pp_enabled ? \
  2053. (adev)->powerplay.pp_funcs->set_fan_speed_percent((adev)->powerplay.pp_handle, (s)) : \
  2054. (adev)->pm.funcs->set_fan_speed_percent((adev), (s)))
  2055. #define amdgpu_dpm_get_fan_speed_percent(adev, s) \
  2056. ((adev)->pp_enabled ? \
  2057. (adev)->powerplay.pp_funcs->get_fan_speed_percent((adev)->powerplay.pp_handle, (s)) : \
  2058. (adev)->pm.funcs->get_fan_speed_percent((adev), (s)))
  2059. #define amdgpu_dpm_get_sclk(adev, l) \
  2060. ((adev)->pp_enabled ? \
  2061. (adev)->powerplay.pp_funcs->get_sclk((adev)->powerplay.pp_handle, (l)) : \
  2062. (adev)->pm.funcs->get_sclk((adev), (l)))
  2063. #define amdgpu_dpm_get_mclk(adev, l) \
  2064. ((adev)->pp_enabled ? \
  2065. (adev)->powerplay.pp_funcs->get_mclk((adev)->powerplay.pp_handle, (l)) : \
  2066. (adev)->pm.funcs->get_mclk((adev), (l)))
  2067. #define amdgpu_dpm_force_performance_level(adev, l) \
  2068. ((adev)->pp_enabled ? \
  2069. (adev)->powerplay.pp_funcs->force_performance_level((adev)->powerplay.pp_handle, (l)) : \
  2070. (adev)->pm.funcs->force_performance_level((adev), (l)))
  2071. #define amdgpu_dpm_powergate_uvd(adev, g) \
  2072. ((adev)->pp_enabled ? \
  2073. (adev)->powerplay.pp_funcs->powergate_uvd((adev)->powerplay.pp_handle, (g)) : \
  2074. (adev)->pm.funcs->powergate_uvd((adev), (g)))
  2075. #define amdgpu_dpm_powergate_vce(adev, g) \
  2076. ((adev)->pp_enabled ? \
  2077. (adev)->powerplay.pp_funcs->powergate_vce((adev)->powerplay.pp_handle, (g)) : \
  2078. (adev)->pm.funcs->powergate_vce((adev), (g)))
  2079. #define amdgpu_dpm_debugfs_print_current_performance_level(adev, m) \
  2080. ((adev)->pp_enabled ? \
  2081. (adev)->powerplay.pp_funcs->print_current_performance_level((adev)->powerplay.pp_handle, (m)) : \
  2082. (adev)->pm.funcs->debugfs_print_current_performance_level((adev), (m)))
  2083. #define amdgpu_dpm_get_current_power_state(adev) \
  2084. (adev)->powerplay.pp_funcs->get_current_power_state((adev)->powerplay.pp_handle)
  2085. #define amdgpu_dpm_get_performance_level(adev) \
  2086. (adev)->powerplay.pp_funcs->get_performance_level((adev)->powerplay.pp_handle)
  2087. #define amdgpu_dpm_get_pp_num_states(adev, data) \
  2088. (adev)->powerplay.pp_funcs->get_pp_num_states((adev)->powerplay.pp_handle, data)
  2089. #define amdgpu_dpm_get_pp_table(adev, table) \
  2090. (adev)->powerplay.pp_funcs->get_pp_table((adev)->powerplay.pp_handle, table)
  2091. #define amdgpu_dpm_set_pp_table(adev, buf, size) \
  2092. (adev)->powerplay.pp_funcs->set_pp_table((adev)->powerplay.pp_handle, buf, size)
  2093. #define amdgpu_dpm_print_clock_levels(adev, type, buf) \
  2094. (adev)->powerplay.pp_funcs->print_clock_levels((adev)->powerplay.pp_handle, type, buf)
  2095. #define amdgpu_dpm_force_clock_level(adev, type, level) \
  2096. (adev)->powerplay.pp_funcs->force_clock_level((adev)->powerplay.pp_handle, type, level)
  2097. #define amdgpu_dpm_get_sclk_od(adev) \
  2098. (adev)->powerplay.pp_funcs->get_sclk_od((adev)->powerplay.pp_handle)
  2099. #define amdgpu_dpm_set_sclk_od(adev, value) \
  2100. (adev)->powerplay.pp_funcs->set_sclk_od((adev)->powerplay.pp_handle, value)
  2101. #define amdgpu_dpm_get_mclk_od(adev) \
  2102. ((adev)->powerplay.pp_funcs->get_mclk_od((adev)->powerplay.pp_handle))
  2103. #define amdgpu_dpm_set_mclk_od(adev, value) \
  2104. ((adev)->powerplay.pp_funcs->set_mclk_od((adev)->powerplay.pp_handle, value))
  2105. #define amdgpu_dpm_dispatch_task(adev, event_id, input, output) \
  2106. (adev)->powerplay.pp_funcs->dispatch_tasks((adev)->powerplay.pp_handle, (event_id), (input), (output))
  2107. #define amdgpu_gds_switch(adev, r, v, d, w, a) (adev)->gds.funcs->patch_gds_switch((r), (v), (d), (w), (a))
  2108. /* Common functions */
  2109. int amdgpu_gpu_reset(struct amdgpu_device *adev);
  2110. void amdgpu_pci_config_reset(struct amdgpu_device *adev);
  2111. bool amdgpu_card_posted(struct amdgpu_device *adev);
  2112. void amdgpu_update_display_priority(struct amdgpu_device *adev);
  2113. int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data);
  2114. int amdgpu_cs_get_ring(struct amdgpu_device *adev, u32 ip_type,
  2115. u32 ip_instance, u32 ring,
  2116. struct amdgpu_ring **out_ring);
  2117. void amdgpu_ttm_placement_from_domain(struct amdgpu_bo *rbo, u32 domain);
  2118. bool amdgpu_ttm_bo_is_amdgpu_bo(struct ttm_buffer_object *bo);
  2119. int amdgpu_ttm_tt_get_user_pages(struct ttm_tt *ttm, struct page **pages);
  2120. int amdgpu_ttm_tt_set_userptr(struct ttm_tt *ttm, uint64_t addr,
  2121. uint32_t flags);
  2122. bool amdgpu_ttm_tt_has_userptr(struct ttm_tt *ttm);
  2123. struct mm_struct *amdgpu_ttm_tt_get_usermm(struct ttm_tt *ttm);
  2124. bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start,
  2125. unsigned long end);
  2126. bool amdgpu_ttm_tt_userptr_invalidated(struct ttm_tt *ttm,
  2127. int *last_invalidated);
  2128. bool amdgpu_ttm_tt_is_readonly(struct ttm_tt *ttm);
  2129. uint32_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm,
  2130. struct ttm_mem_reg *mem);
  2131. void amdgpu_vram_location(struct amdgpu_device *adev, struct amdgpu_mc *mc, u64 base);
  2132. void amdgpu_gtt_location(struct amdgpu_device *adev, struct amdgpu_mc *mc);
  2133. void amdgpu_ttm_set_active_vram_size(struct amdgpu_device *adev, u64 size);
  2134. u64 amdgpu_ttm_get_gtt_mem_size(struct amdgpu_device *adev);
  2135. int amdgpu_ttm_global_init(struct amdgpu_device *adev);
  2136. void amdgpu_program_register_sequence(struct amdgpu_device *adev,
  2137. const u32 *registers,
  2138. const u32 array_size);
  2139. bool amdgpu_device_is_px(struct drm_device *dev);
  2140. /* atpx handler */
  2141. #if defined(CONFIG_VGA_SWITCHEROO)
  2142. void amdgpu_register_atpx_handler(void);
  2143. void amdgpu_unregister_atpx_handler(void);
  2144. bool amdgpu_has_atpx_dgpu_power_cntl(void);
  2145. bool amdgpu_is_atpx_hybrid(void);
  2146. #else
  2147. static inline void amdgpu_register_atpx_handler(void) {}
  2148. static inline void amdgpu_unregister_atpx_handler(void) {}
  2149. static inline bool amdgpu_has_atpx_dgpu_power_cntl(void) { return false; }
  2150. static inline bool amdgpu_is_atpx_hybrid(void) { return false; }
  2151. #endif
  2152. /*
  2153. * KMS
  2154. */
  2155. extern const struct drm_ioctl_desc amdgpu_ioctls_kms[];
  2156. extern const int amdgpu_max_kms_ioctl;
  2157. int amdgpu_driver_load_kms(struct drm_device *dev, unsigned long flags);
  2158. int amdgpu_driver_unload_kms(struct drm_device *dev);
  2159. void amdgpu_driver_lastclose_kms(struct drm_device *dev);
  2160. int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv);
  2161. void amdgpu_driver_postclose_kms(struct drm_device *dev,
  2162. struct drm_file *file_priv);
  2163. void amdgpu_driver_preclose_kms(struct drm_device *dev,
  2164. struct drm_file *file_priv);
  2165. int amdgpu_suspend_kms(struct drm_device *dev, bool suspend, bool fbcon);
  2166. int amdgpu_resume_kms(struct drm_device *dev, bool resume, bool fbcon);
  2167. u32 amdgpu_get_vblank_counter_kms(struct drm_device *dev, unsigned int pipe);
  2168. int amdgpu_enable_vblank_kms(struct drm_device *dev, unsigned int pipe);
  2169. void amdgpu_disable_vblank_kms(struct drm_device *dev, unsigned int pipe);
  2170. int amdgpu_get_vblank_timestamp_kms(struct drm_device *dev, unsigned int pipe,
  2171. int *max_error,
  2172. struct timeval *vblank_time,
  2173. unsigned flags);
  2174. long amdgpu_kms_compat_ioctl(struct file *filp, unsigned int cmd,
  2175. unsigned long arg);
  2176. /*
  2177. * functions used by amdgpu_encoder.c
  2178. */
  2179. struct amdgpu_afmt_acr {
  2180. u32 clock;
  2181. int n_32khz;
  2182. int cts_32khz;
  2183. int n_44_1khz;
  2184. int cts_44_1khz;
  2185. int n_48khz;
  2186. int cts_48khz;
  2187. };
  2188. struct amdgpu_afmt_acr amdgpu_afmt_acr(uint32_t clock);
  2189. /* amdgpu_acpi.c */
  2190. #if defined(CONFIG_ACPI)
  2191. int amdgpu_acpi_init(struct amdgpu_device *adev);
  2192. void amdgpu_acpi_fini(struct amdgpu_device *adev);
  2193. bool amdgpu_acpi_is_pcie_performance_request_supported(struct amdgpu_device *adev);
  2194. int amdgpu_acpi_pcie_performance_request(struct amdgpu_device *adev,
  2195. u8 perf_req, bool advertise);
  2196. int amdgpu_acpi_pcie_notify_device_ready(struct amdgpu_device *adev);
  2197. #else
  2198. static inline int amdgpu_acpi_init(struct amdgpu_device *adev) { return 0; }
  2199. static inline void amdgpu_acpi_fini(struct amdgpu_device *adev) { }
  2200. #endif
  2201. struct amdgpu_bo_va_mapping *
  2202. amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser,
  2203. uint64_t addr, struct amdgpu_bo **bo);
  2204. #include "amdgpu_object.h"
  2205. #endif