amdgpu.h 71 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370
  1. /*
  2. * Copyright 2008 Advanced Micro Devices, Inc.
  3. * Copyright 2008 Red Hat Inc.
  4. * Copyright 2009 Jerome Glisse.
  5. *
  6. * Permission is hereby granted, free of charge, to any person obtaining a
  7. * copy of this software and associated documentation files (the "Software"),
  8. * to deal in the Software without restriction, including without limitation
  9. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  10. * and/or sell copies of the Software, and to permit persons to whom the
  11. * Software is furnished to do so, subject to the following conditions:
  12. *
  13. * The above copyright notice and this permission notice shall be included in
  14. * all copies or substantial portions of the Software.
  15. *
  16. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  17. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  18. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  19. * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  20. * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  21. * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  22. * OTHER DEALINGS IN THE SOFTWARE.
  23. *
  24. * Authors: Dave Airlie
  25. * Alex Deucher
  26. * Jerome Glisse
  27. */
  28. #ifndef __AMDGPU_H__
  29. #define __AMDGPU_H__
  30. #include <linux/atomic.h>
  31. #include <linux/wait.h>
  32. #include <linux/list.h>
  33. #include <linux/kref.h>
  34. #include <linux/interval_tree.h>
  35. #include <linux/hashtable.h>
  36. #include <linux/fence.h>
  37. #include <ttm/ttm_bo_api.h>
  38. #include <ttm/ttm_bo_driver.h>
  39. #include <ttm/ttm_placement.h>
  40. #include <ttm/ttm_module.h>
  41. #include <ttm/ttm_execbuf_util.h>
  42. #include <drm/drmP.h>
  43. #include <drm/drm_gem.h>
  44. #include <drm/amdgpu_drm.h>
  45. #include "amd_shared.h"
  46. #include "amdgpu_mode.h"
  47. #include "amdgpu_ih.h"
  48. #include "amdgpu_irq.h"
  49. #include "amdgpu_ucode.h"
  50. #include "amdgpu_gds.h"
  51. /*
  52. * Modules parameters.
  53. */
  54. extern int amdgpu_modeset;
  55. extern int amdgpu_vram_limit;
  56. extern int amdgpu_gart_size;
  57. extern int amdgpu_benchmarking;
  58. extern int amdgpu_testing;
  59. extern int amdgpu_audio;
  60. extern int amdgpu_disp_priority;
  61. extern int amdgpu_hw_i2c;
  62. extern int amdgpu_pcie_gen2;
  63. extern int amdgpu_msi;
  64. extern int amdgpu_lockup_timeout;
  65. extern int amdgpu_dpm;
  66. extern int amdgpu_smc_load_fw;
  67. extern int amdgpu_aspm;
  68. extern int amdgpu_runtime_pm;
  69. extern int amdgpu_hard_reset;
  70. extern unsigned amdgpu_ip_block_mask;
  71. extern int amdgpu_bapm;
  72. extern int amdgpu_deep_color;
  73. extern int amdgpu_vm_size;
  74. extern int amdgpu_vm_block_size;
  75. #define AMDGPU_MAX_USEC_TIMEOUT 100000 /* 100 ms */
  76. #define AMDGPU_FENCE_JIFFIES_TIMEOUT (HZ / 2)
  77. /* AMDGPU_IB_POOL_SIZE must be a power of 2 */
  78. #define AMDGPU_IB_POOL_SIZE 16
  79. #define AMDGPU_DEBUGFS_MAX_COMPONENTS 32
  80. #define AMDGPUFB_CONN_LIMIT 4
  81. #define AMDGPU_BIOS_NUM_SCRATCH 8
  82. /* max number of rings */
  83. #define AMDGPU_MAX_RINGS 16
  84. #define AMDGPU_MAX_GFX_RINGS 1
  85. #define AMDGPU_MAX_COMPUTE_RINGS 8
  86. #define AMDGPU_MAX_VCE_RINGS 2
  87. /* number of hw syncs before falling back on blocking */
  88. #define AMDGPU_NUM_SYNCS 4
  89. /* hardcode that limit for now */
  90. #define AMDGPU_VA_RESERVED_SIZE (8 << 20)
  91. /* hard reset data */
  92. #define AMDGPU_ASIC_RESET_DATA 0x39d5e86b
  93. /* reset flags */
  94. #define AMDGPU_RESET_GFX (1 << 0)
  95. #define AMDGPU_RESET_COMPUTE (1 << 1)
  96. #define AMDGPU_RESET_DMA (1 << 2)
  97. #define AMDGPU_RESET_CP (1 << 3)
  98. #define AMDGPU_RESET_GRBM (1 << 4)
  99. #define AMDGPU_RESET_DMA1 (1 << 5)
  100. #define AMDGPU_RESET_RLC (1 << 6)
  101. #define AMDGPU_RESET_SEM (1 << 7)
  102. #define AMDGPU_RESET_IH (1 << 8)
  103. #define AMDGPU_RESET_VMC (1 << 9)
  104. #define AMDGPU_RESET_MC (1 << 10)
  105. #define AMDGPU_RESET_DISPLAY (1 << 11)
  106. #define AMDGPU_RESET_UVD (1 << 12)
  107. #define AMDGPU_RESET_VCE (1 << 13)
  108. #define AMDGPU_RESET_VCE1 (1 << 14)
  109. /* CG block flags */
  110. #define AMDGPU_CG_BLOCK_GFX (1 << 0)
  111. #define AMDGPU_CG_BLOCK_MC (1 << 1)
  112. #define AMDGPU_CG_BLOCK_SDMA (1 << 2)
  113. #define AMDGPU_CG_BLOCK_UVD (1 << 3)
  114. #define AMDGPU_CG_BLOCK_VCE (1 << 4)
  115. #define AMDGPU_CG_BLOCK_HDP (1 << 5)
  116. #define AMDGPU_CG_BLOCK_BIF (1 << 6)
  117. /* CG flags */
  118. #define AMDGPU_CG_SUPPORT_GFX_MGCG (1 << 0)
  119. #define AMDGPU_CG_SUPPORT_GFX_MGLS (1 << 1)
  120. #define AMDGPU_CG_SUPPORT_GFX_CGCG (1 << 2)
  121. #define AMDGPU_CG_SUPPORT_GFX_CGLS (1 << 3)
  122. #define AMDGPU_CG_SUPPORT_GFX_CGTS (1 << 4)
  123. #define AMDGPU_CG_SUPPORT_GFX_CGTS_LS (1 << 5)
  124. #define AMDGPU_CG_SUPPORT_GFX_CP_LS (1 << 6)
  125. #define AMDGPU_CG_SUPPORT_GFX_RLC_LS (1 << 7)
  126. #define AMDGPU_CG_SUPPORT_MC_LS (1 << 8)
  127. #define AMDGPU_CG_SUPPORT_MC_MGCG (1 << 9)
  128. #define AMDGPU_CG_SUPPORT_SDMA_LS (1 << 10)
  129. #define AMDGPU_CG_SUPPORT_SDMA_MGCG (1 << 11)
  130. #define AMDGPU_CG_SUPPORT_BIF_LS (1 << 12)
  131. #define AMDGPU_CG_SUPPORT_UVD_MGCG (1 << 13)
  132. #define AMDGPU_CG_SUPPORT_VCE_MGCG (1 << 14)
  133. #define AMDGPU_CG_SUPPORT_HDP_LS (1 << 15)
  134. #define AMDGPU_CG_SUPPORT_HDP_MGCG (1 << 16)
  135. /* PG flags */
  136. #define AMDGPU_PG_SUPPORT_GFX_PG (1 << 0)
  137. #define AMDGPU_PG_SUPPORT_GFX_SMG (1 << 1)
  138. #define AMDGPU_PG_SUPPORT_GFX_DMG (1 << 2)
  139. #define AMDGPU_PG_SUPPORT_UVD (1 << 3)
  140. #define AMDGPU_PG_SUPPORT_VCE (1 << 4)
  141. #define AMDGPU_PG_SUPPORT_CP (1 << 5)
  142. #define AMDGPU_PG_SUPPORT_GDS (1 << 6)
  143. #define AMDGPU_PG_SUPPORT_RLC_SMU_HS (1 << 7)
  144. #define AMDGPU_PG_SUPPORT_SDMA (1 << 8)
  145. #define AMDGPU_PG_SUPPORT_ACP (1 << 9)
  146. #define AMDGPU_PG_SUPPORT_SAMU (1 << 10)
  147. /* GFX current status */
  148. #define AMDGPU_GFX_NORMAL_MODE 0x00000000L
  149. #define AMDGPU_GFX_SAFE_MODE 0x00000001L
  150. #define AMDGPU_GFX_PG_DISABLED_MODE 0x00000002L
  151. #define AMDGPU_GFX_CG_DISABLED_MODE 0x00000004L
  152. #define AMDGPU_GFX_LBPW_DISABLED_MODE 0x00000008L
  153. /* max cursor sizes (in pixels) */
  154. #define CIK_CURSOR_WIDTH 128
  155. #define CIK_CURSOR_HEIGHT 128
  156. struct amdgpu_device;
  157. struct amdgpu_fence;
  158. struct amdgpu_ib;
  159. struct amdgpu_vm;
  160. struct amdgpu_ring;
  161. struct amdgpu_semaphore;
  162. struct amdgpu_cs_parser;
  163. struct amdgpu_irq_src;
  164. struct amdgpu_fpriv;
  165. enum amdgpu_cp_irq {
  166. AMDGPU_CP_IRQ_GFX_EOP = 0,
  167. AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP,
  168. AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE1_EOP,
  169. AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE2_EOP,
  170. AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE3_EOP,
  171. AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE0_EOP,
  172. AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE1_EOP,
  173. AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE2_EOP,
  174. AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE3_EOP,
  175. AMDGPU_CP_IRQ_LAST
  176. };
  177. enum amdgpu_sdma_irq {
  178. AMDGPU_SDMA_IRQ_TRAP0 = 0,
  179. AMDGPU_SDMA_IRQ_TRAP1,
  180. AMDGPU_SDMA_IRQ_LAST
  181. };
  182. enum amdgpu_thermal_irq {
  183. AMDGPU_THERMAL_IRQ_LOW_TO_HIGH = 0,
  184. AMDGPU_THERMAL_IRQ_HIGH_TO_LOW,
  185. AMDGPU_THERMAL_IRQ_LAST
  186. };
  187. int amdgpu_set_clockgating_state(struct amdgpu_device *adev,
  188. enum amd_ip_block_type block_type,
  189. enum amd_clockgating_state state);
  190. int amdgpu_set_powergating_state(struct amdgpu_device *adev,
  191. enum amd_ip_block_type block_type,
  192. enum amd_powergating_state state);
  193. struct amdgpu_ip_block_version {
  194. enum amd_ip_block_type type;
  195. u32 major;
  196. u32 minor;
  197. u32 rev;
  198. const struct amd_ip_funcs *funcs;
  199. };
  200. int amdgpu_ip_block_version_cmp(struct amdgpu_device *adev,
  201. enum amd_ip_block_type type,
  202. u32 major, u32 minor);
  203. const struct amdgpu_ip_block_version * amdgpu_get_ip_block(
  204. struct amdgpu_device *adev,
  205. enum amd_ip_block_type type);
  206. /* provided by hw blocks that can move/clear data. e.g., gfx or sdma */
  207. struct amdgpu_buffer_funcs {
  208. /* maximum bytes in a single operation */
  209. uint32_t copy_max_bytes;
  210. /* number of dw to reserve per operation */
  211. unsigned copy_num_dw;
  212. /* used for buffer migration */
  213. void (*emit_copy_buffer)(struct amdgpu_ring *ring,
  214. /* src addr in bytes */
  215. uint64_t src_offset,
  216. /* dst addr in bytes */
  217. uint64_t dst_offset,
  218. /* number of byte to transfer */
  219. uint32_t byte_count);
  220. /* maximum bytes in a single operation */
  221. uint32_t fill_max_bytes;
  222. /* number of dw to reserve per operation */
  223. unsigned fill_num_dw;
  224. /* used for buffer clearing */
  225. void (*emit_fill_buffer)(struct amdgpu_ring *ring,
  226. /* value to write to memory */
  227. uint32_t src_data,
  228. /* dst addr in bytes */
  229. uint64_t dst_offset,
  230. /* number of byte to fill */
  231. uint32_t byte_count);
  232. };
  233. /* provided by hw blocks that can write ptes, e.g., sdma */
  234. struct amdgpu_vm_pte_funcs {
  235. /* copy pte entries from GART */
  236. void (*copy_pte)(struct amdgpu_ib *ib,
  237. uint64_t pe, uint64_t src,
  238. unsigned count);
  239. /* write pte one entry at a time with addr mapping */
  240. void (*write_pte)(struct amdgpu_ib *ib,
  241. uint64_t pe,
  242. uint64_t addr, unsigned count,
  243. uint32_t incr, uint32_t flags);
  244. /* for linear pte/pde updates without addr mapping */
  245. void (*set_pte_pde)(struct amdgpu_ib *ib,
  246. uint64_t pe,
  247. uint64_t addr, unsigned count,
  248. uint32_t incr, uint32_t flags);
  249. /* pad the indirect buffer to the necessary number of dw */
  250. void (*pad_ib)(struct amdgpu_ib *ib);
  251. };
  252. /* provided by the gmc block */
  253. struct amdgpu_gart_funcs {
  254. /* flush the vm tlb via mmio */
  255. void (*flush_gpu_tlb)(struct amdgpu_device *adev,
  256. uint32_t vmid);
  257. /* write pte/pde updates using the cpu */
  258. int (*set_pte_pde)(struct amdgpu_device *adev,
  259. void *cpu_pt_addr, /* cpu addr of page table */
  260. uint32_t gpu_page_idx, /* pte/pde to update */
  261. uint64_t addr, /* addr to write into pte/pde */
  262. uint32_t flags); /* access flags */
  263. };
  264. /* provided by the ih block */
  265. struct amdgpu_ih_funcs {
  266. /* ring read/write ptr handling, called from interrupt context */
  267. u32 (*get_wptr)(struct amdgpu_device *adev);
  268. void (*decode_iv)(struct amdgpu_device *adev,
  269. struct amdgpu_iv_entry *entry);
  270. void (*set_rptr)(struct amdgpu_device *adev);
  271. };
  272. /* provided by hw blocks that expose a ring buffer for commands */
  273. struct amdgpu_ring_funcs {
  274. /* ring read/write ptr handling */
  275. u32 (*get_rptr)(struct amdgpu_ring *ring);
  276. u32 (*get_wptr)(struct amdgpu_ring *ring);
  277. void (*set_wptr)(struct amdgpu_ring *ring);
  278. /* validating and patching of IBs */
  279. int (*parse_cs)(struct amdgpu_cs_parser *p, uint32_t ib_idx);
  280. /* command emit functions */
  281. void (*emit_ib)(struct amdgpu_ring *ring,
  282. struct amdgpu_ib *ib);
  283. void (*emit_fence)(struct amdgpu_ring *ring, uint64_t addr,
  284. uint64_t seq, unsigned flags);
  285. bool (*emit_semaphore)(struct amdgpu_ring *ring,
  286. struct amdgpu_semaphore *semaphore,
  287. bool emit_wait);
  288. void (*emit_vm_flush)(struct amdgpu_ring *ring, unsigned vm_id,
  289. uint64_t pd_addr);
  290. void (*emit_hdp_flush)(struct amdgpu_ring *ring);
  291. void (*emit_gds_switch)(struct amdgpu_ring *ring, uint32_t vmid,
  292. uint32_t gds_base, uint32_t gds_size,
  293. uint32_t gws_base, uint32_t gws_size,
  294. uint32_t oa_base, uint32_t oa_size);
  295. /* testing functions */
  296. int (*test_ring)(struct amdgpu_ring *ring);
  297. int (*test_ib)(struct amdgpu_ring *ring);
  298. bool (*is_lockup)(struct amdgpu_ring *ring);
  299. };
  300. /*
  301. * BIOS.
  302. */
  303. bool amdgpu_get_bios(struct amdgpu_device *adev);
  304. bool amdgpu_read_bios(struct amdgpu_device *adev);
  305. /*
  306. * Dummy page
  307. */
  308. struct amdgpu_dummy_page {
  309. struct page *page;
  310. dma_addr_t addr;
  311. };
  312. int amdgpu_dummy_page_init(struct amdgpu_device *adev);
  313. void amdgpu_dummy_page_fini(struct amdgpu_device *adev);
  314. /*
  315. * Clocks
  316. */
  317. #define AMDGPU_MAX_PPLL 3
  318. struct amdgpu_clock {
  319. struct amdgpu_pll ppll[AMDGPU_MAX_PPLL];
  320. struct amdgpu_pll spll;
  321. struct amdgpu_pll mpll;
  322. /* 10 Khz units */
  323. uint32_t default_mclk;
  324. uint32_t default_sclk;
  325. uint32_t default_dispclk;
  326. uint32_t current_dispclk;
  327. uint32_t dp_extclk;
  328. uint32_t max_pixel_clock;
  329. };
  330. /*
  331. * Fences.
  332. */
  333. struct amdgpu_fence_driver {
  334. struct amdgpu_ring *ring;
  335. uint64_t gpu_addr;
  336. volatile uint32_t *cpu_addr;
  337. /* sync_seq is protected by ring emission lock */
  338. uint64_t sync_seq[AMDGPU_MAX_RINGS];
  339. atomic64_t last_seq;
  340. bool initialized;
  341. struct amdgpu_irq_src *irq_src;
  342. unsigned irq_type;
  343. struct delayed_work lockup_work;
  344. };
  345. /* some special values for the owner field */
  346. #define AMDGPU_FENCE_OWNER_UNDEFINED ((void*)0ul)
  347. #define AMDGPU_FENCE_OWNER_VM ((void*)1ul)
  348. #define AMDGPU_FENCE_OWNER_MOVE ((void*)2ul)
  349. #define AMDGPU_FENCE_FLAG_64BIT (1 << 0)
  350. #define AMDGPU_FENCE_FLAG_INT (1 << 1)
  351. struct amdgpu_fence {
  352. struct fence base;
  353. /* RB, DMA, etc. */
  354. struct amdgpu_ring *ring;
  355. uint64_t seq;
  356. /* filp or special value for fence creator */
  357. void *owner;
  358. wait_queue_t fence_wake;
  359. };
  360. struct amdgpu_user_fence {
  361. /* write-back bo */
  362. struct amdgpu_bo *bo;
  363. /* write-back address offset to bo start */
  364. uint32_t offset;
  365. };
  366. int amdgpu_fence_driver_init(struct amdgpu_device *adev);
  367. void amdgpu_fence_driver_fini(struct amdgpu_device *adev);
  368. void amdgpu_fence_driver_force_completion(struct amdgpu_device *adev);
  369. void amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring);
  370. int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring,
  371. struct amdgpu_irq_src *irq_src,
  372. unsigned irq_type);
  373. void amdgpu_fence_driver_suspend(struct amdgpu_device *adev);
  374. void amdgpu_fence_driver_resume(struct amdgpu_device *adev);
  375. int amdgpu_fence_emit(struct amdgpu_ring *ring, void *owner,
  376. struct amdgpu_fence **fence);
  377. void amdgpu_fence_process(struct amdgpu_ring *ring);
  378. int amdgpu_fence_wait_next(struct amdgpu_ring *ring);
  379. int amdgpu_fence_wait_empty(struct amdgpu_ring *ring);
  380. unsigned amdgpu_fence_count_emitted(struct amdgpu_ring *ring);
  381. bool amdgpu_fence_signaled(struct amdgpu_fence *fence);
  382. int amdgpu_fence_wait(struct amdgpu_fence *fence, bool interruptible);
  383. int amdgpu_fence_wait_any(struct amdgpu_device *adev,
  384. struct amdgpu_fence **fences,
  385. bool intr);
  386. struct amdgpu_fence *amdgpu_fence_ref(struct amdgpu_fence *fence);
  387. void amdgpu_fence_unref(struct amdgpu_fence **fence);
  388. bool amdgpu_fence_need_sync(struct amdgpu_fence *fence,
  389. struct amdgpu_ring *ring);
  390. void amdgpu_fence_note_sync(struct amdgpu_fence *fence,
  391. struct amdgpu_ring *ring);
  392. static inline struct amdgpu_fence *amdgpu_fence_later(struct amdgpu_fence *a,
  393. struct amdgpu_fence *b)
  394. {
  395. if (!a) {
  396. return b;
  397. }
  398. if (!b) {
  399. return a;
  400. }
  401. BUG_ON(a->ring != b->ring);
  402. if (a->seq > b->seq) {
  403. return a;
  404. } else {
  405. return b;
  406. }
  407. }
  408. static inline bool amdgpu_fence_is_earlier(struct amdgpu_fence *a,
  409. struct amdgpu_fence *b)
  410. {
  411. if (!a) {
  412. return false;
  413. }
  414. if (!b) {
  415. return true;
  416. }
  417. BUG_ON(a->ring != b->ring);
  418. return a->seq < b->seq;
  419. }
  420. int amdgpu_user_fence_emit(struct amdgpu_ring *ring, struct amdgpu_user_fence *user,
  421. void *owner, struct amdgpu_fence **fence);
  422. /*
  423. * TTM.
  424. */
  425. struct amdgpu_mman {
  426. struct ttm_bo_global_ref bo_global_ref;
  427. struct drm_global_reference mem_global_ref;
  428. struct ttm_bo_device bdev;
  429. bool mem_global_referenced;
  430. bool initialized;
  431. #if defined(CONFIG_DEBUG_FS)
  432. struct dentry *vram;
  433. struct dentry *gtt;
  434. #endif
  435. /* buffer handling */
  436. const struct amdgpu_buffer_funcs *buffer_funcs;
  437. struct amdgpu_ring *buffer_funcs_ring;
  438. };
  439. int amdgpu_copy_buffer(struct amdgpu_ring *ring,
  440. uint64_t src_offset,
  441. uint64_t dst_offset,
  442. uint32_t byte_count,
  443. struct reservation_object *resv,
  444. struct amdgpu_fence **fence);
  445. int amdgpu_mmap(struct file *filp, struct vm_area_struct *vma);
  446. struct amdgpu_bo_list_entry {
  447. struct amdgpu_bo *robj;
  448. struct ttm_validate_buffer tv;
  449. struct amdgpu_bo_va *bo_va;
  450. unsigned prefered_domains;
  451. unsigned allowed_domains;
  452. uint32_t priority;
  453. };
  454. struct amdgpu_bo_va_mapping {
  455. struct list_head list;
  456. struct interval_tree_node it;
  457. uint64_t offset;
  458. uint32_t flags;
  459. };
  460. /* bo virtual addresses in a specific vm */
  461. struct amdgpu_bo_va {
  462. /* protected by bo being reserved */
  463. struct list_head bo_list;
  464. uint64_t addr;
  465. struct amdgpu_fence *last_pt_update;
  466. unsigned ref_count;
  467. /* protected by vm mutex */
  468. struct list_head mappings;
  469. struct list_head vm_status;
  470. /* constant after initialization */
  471. struct amdgpu_vm *vm;
  472. struct amdgpu_bo *bo;
  473. };
  474. #define AMDGPU_GEM_DOMAIN_MAX 0x3
  475. struct amdgpu_bo {
  476. /* Protected by gem.mutex */
  477. struct list_head list;
  478. /* Protected by tbo.reserved */
  479. u32 initial_domain;
  480. struct ttm_place placements[AMDGPU_GEM_DOMAIN_MAX + 1];
  481. struct ttm_placement placement;
  482. struct ttm_buffer_object tbo;
  483. struct ttm_bo_kmap_obj kmap;
  484. u64 flags;
  485. unsigned pin_count;
  486. void *kptr;
  487. u64 tiling_flags;
  488. u64 metadata_flags;
  489. void *metadata;
  490. u32 metadata_size;
  491. /* list of all virtual address to which this bo
  492. * is associated to
  493. */
  494. struct list_head va;
  495. /* Constant after initialization */
  496. struct amdgpu_device *adev;
  497. struct drm_gem_object gem_base;
  498. struct ttm_bo_kmap_obj dma_buf_vmap;
  499. pid_t pid;
  500. struct amdgpu_mn *mn;
  501. struct list_head mn_list;
  502. };
  503. #define gem_to_amdgpu_bo(gobj) container_of((gobj), struct amdgpu_bo, gem_base)
  504. void amdgpu_gem_object_free(struct drm_gem_object *obj);
  505. int amdgpu_gem_object_open(struct drm_gem_object *obj,
  506. struct drm_file *file_priv);
  507. void amdgpu_gem_object_close(struct drm_gem_object *obj,
  508. struct drm_file *file_priv);
  509. unsigned long amdgpu_gem_timeout(uint64_t timeout_ns);
  510. struct sg_table *amdgpu_gem_prime_get_sg_table(struct drm_gem_object *obj);
  511. struct drm_gem_object *amdgpu_gem_prime_import_sg_table(struct drm_device *dev,
  512. struct dma_buf_attachment *attach,
  513. struct sg_table *sg);
  514. struct dma_buf *amdgpu_gem_prime_export(struct drm_device *dev,
  515. struct drm_gem_object *gobj,
  516. int flags);
  517. int amdgpu_gem_prime_pin(struct drm_gem_object *obj);
  518. void amdgpu_gem_prime_unpin(struct drm_gem_object *obj);
  519. struct reservation_object *amdgpu_gem_prime_res_obj(struct drm_gem_object *);
  520. void *amdgpu_gem_prime_vmap(struct drm_gem_object *obj);
  521. void amdgpu_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr);
  522. int amdgpu_gem_debugfs_init(struct amdgpu_device *adev);
  523. /* sub-allocation manager, it has to be protected by another lock.
  524. * By conception this is an helper for other part of the driver
  525. * like the indirect buffer or semaphore, which both have their
  526. * locking.
  527. *
  528. * Principe is simple, we keep a list of sub allocation in offset
  529. * order (first entry has offset == 0, last entry has the highest
  530. * offset).
  531. *
  532. * When allocating new object we first check if there is room at
  533. * the end total_size - (last_object_offset + last_object_size) >=
  534. * alloc_size. If so we allocate new object there.
  535. *
  536. * When there is not enough room at the end, we start waiting for
  537. * each sub object until we reach object_offset+object_size >=
  538. * alloc_size, this object then become the sub object we return.
  539. *
  540. * Alignment can't be bigger than page size.
  541. *
  542. * Hole are not considered for allocation to keep things simple.
  543. * Assumption is that there won't be hole (all object on same
  544. * alignment).
  545. */
  546. struct amdgpu_sa_manager {
  547. wait_queue_head_t wq;
  548. struct amdgpu_bo *bo;
  549. struct list_head *hole;
  550. struct list_head flist[AMDGPU_MAX_RINGS];
  551. struct list_head olist;
  552. unsigned size;
  553. uint64_t gpu_addr;
  554. void *cpu_ptr;
  555. uint32_t domain;
  556. uint32_t align;
  557. };
  558. struct amdgpu_sa_bo;
  559. /* sub-allocation buffer */
  560. struct amdgpu_sa_bo {
  561. struct list_head olist;
  562. struct list_head flist;
  563. struct amdgpu_sa_manager *manager;
  564. unsigned soffset;
  565. unsigned eoffset;
  566. struct amdgpu_fence *fence;
  567. };
  568. /*
  569. * GEM objects.
  570. */
  571. struct amdgpu_gem {
  572. struct mutex mutex;
  573. struct list_head objects;
  574. };
  575. int amdgpu_gem_init(struct amdgpu_device *adev);
  576. void amdgpu_gem_fini(struct amdgpu_device *adev);
  577. int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size,
  578. int alignment, u32 initial_domain,
  579. u64 flags, bool kernel,
  580. struct drm_gem_object **obj);
  581. int amdgpu_mode_dumb_create(struct drm_file *file_priv,
  582. struct drm_device *dev,
  583. struct drm_mode_create_dumb *args);
  584. int amdgpu_mode_dumb_mmap(struct drm_file *filp,
  585. struct drm_device *dev,
  586. uint32_t handle, uint64_t *offset_p);
  587. /*
  588. * Semaphores.
  589. */
  590. struct amdgpu_semaphore {
  591. struct amdgpu_sa_bo *sa_bo;
  592. signed waiters;
  593. uint64_t gpu_addr;
  594. };
  595. int amdgpu_semaphore_create(struct amdgpu_device *adev,
  596. struct amdgpu_semaphore **semaphore);
  597. bool amdgpu_semaphore_emit_signal(struct amdgpu_ring *ring,
  598. struct amdgpu_semaphore *semaphore);
  599. bool amdgpu_semaphore_emit_wait(struct amdgpu_ring *ring,
  600. struct amdgpu_semaphore *semaphore);
  601. void amdgpu_semaphore_free(struct amdgpu_device *adev,
  602. struct amdgpu_semaphore **semaphore,
  603. struct amdgpu_fence *fence);
  604. /*
  605. * Synchronization
  606. */
  607. struct amdgpu_sync {
  608. struct amdgpu_semaphore *semaphores[AMDGPU_NUM_SYNCS];
  609. struct amdgpu_fence *sync_to[AMDGPU_MAX_RINGS];
  610. struct amdgpu_fence *last_vm_update;
  611. };
  612. void amdgpu_sync_create(struct amdgpu_sync *sync);
  613. int amdgpu_sync_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync,
  614. struct fence *f);
  615. int amdgpu_sync_resv(struct amdgpu_device *adev,
  616. struct amdgpu_sync *sync,
  617. struct reservation_object *resv,
  618. void *owner);
  619. int amdgpu_sync_rings(struct amdgpu_sync *sync,
  620. struct amdgpu_ring *ring);
  621. void amdgpu_sync_free(struct amdgpu_device *adev, struct amdgpu_sync *sync,
  622. struct amdgpu_fence *fence);
  623. /*
  624. * GART structures, functions & helpers
  625. */
  626. struct amdgpu_mc;
  627. #define AMDGPU_GPU_PAGE_SIZE 4096
  628. #define AMDGPU_GPU_PAGE_MASK (AMDGPU_GPU_PAGE_SIZE - 1)
  629. #define AMDGPU_GPU_PAGE_SHIFT 12
  630. #define AMDGPU_GPU_PAGE_ALIGN(a) (((a) + AMDGPU_GPU_PAGE_MASK) & ~AMDGPU_GPU_PAGE_MASK)
  631. struct amdgpu_gart {
  632. dma_addr_t table_addr;
  633. struct amdgpu_bo *robj;
  634. void *ptr;
  635. unsigned num_gpu_pages;
  636. unsigned num_cpu_pages;
  637. unsigned table_size;
  638. struct page **pages;
  639. dma_addr_t *pages_addr;
  640. bool ready;
  641. const struct amdgpu_gart_funcs *gart_funcs;
  642. };
  643. int amdgpu_gart_table_ram_alloc(struct amdgpu_device *adev);
  644. void amdgpu_gart_table_ram_free(struct amdgpu_device *adev);
  645. int amdgpu_gart_table_vram_alloc(struct amdgpu_device *adev);
  646. void amdgpu_gart_table_vram_free(struct amdgpu_device *adev);
  647. int amdgpu_gart_table_vram_pin(struct amdgpu_device *adev);
  648. void amdgpu_gart_table_vram_unpin(struct amdgpu_device *adev);
  649. int amdgpu_gart_init(struct amdgpu_device *adev);
  650. void amdgpu_gart_fini(struct amdgpu_device *adev);
  651. void amdgpu_gart_unbind(struct amdgpu_device *adev, unsigned offset,
  652. int pages);
  653. int amdgpu_gart_bind(struct amdgpu_device *adev, unsigned offset,
  654. int pages, struct page **pagelist,
  655. dma_addr_t *dma_addr, uint32_t flags);
  656. /*
  657. * GPU MC structures, functions & helpers
  658. */
  659. struct amdgpu_mc {
  660. resource_size_t aper_size;
  661. resource_size_t aper_base;
  662. resource_size_t agp_base;
  663. /* for some chips with <= 32MB we need to lie
  664. * about vram size near mc fb location */
  665. u64 mc_vram_size;
  666. u64 visible_vram_size;
  667. u64 gtt_size;
  668. u64 gtt_start;
  669. u64 gtt_end;
  670. u64 vram_start;
  671. u64 vram_end;
  672. unsigned vram_width;
  673. u64 real_vram_size;
  674. int vram_mtrr;
  675. u64 gtt_base_align;
  676. u64 mc_mask;
  677. const struct firmware *fw; /* MC firmware */
  678. uint32_t fw_version;
  679. struct amdgpu_irq_src vm_fault;
  680. uint32_t vram_type;
  681. };
  682. /*
  683. * GPU doorbell structures, functions & helpers
  684. */
  685. typedef enum _AMDGPU_DOORBELL_ASSIGNMENT
  686. {
  687. AMDGPU_DOORBELL_KIQ = 0x000,
  688. AMDGPU_DOORBELL_HIQ = 0x001,
  689. AMDGPU_DOORBELL_DIQ = 0x002,
  690. AMDGPU_DOORBELL_MEC_RING0 = 0x010,
  691. AMDGPU_DOORBELL_MEC_RING1 = 0x011,
  692. AMDGPU_DOORBELL_MEC_RING2 = 0x012,
  693. AMDGPU_DOORBELL_MEC_RING3 = 0x013,
  694. AMDGPU_DOORBELL_MEC_RING4 = 0x014,
  695. AMDGPU_DOORBELL_MEC_RING5 = 0x015,
  696. AMDGPU_DOORBELL_MEC_RING6 = 0x016,
  697. AMDGPU_DOORBELL_MEC_RING7 = 0x017,
  698. AMDGPU_DOORBELL_GFX_RING0 = 0x020,
  699. AMDGPU_DOORBELL_sDMA_ENGINE0 = 0x1E0,
  700. AMDGPU_DOORBELL_sDMA_ENGINE1 = 0x1E1,
  701. AMDGPU_DOORBELL_IH = 0x1E8,
  702. AMDGPU_DOORBELL_MAX_ASSIGNMENT = 0x3FF,
  703. AMDGPU_DOORBELL_INVALID = 0xFFFF
  704. } AMDGPU_DOORBELL_ASSIGNMENT;
  705. struct amdgpu_doorbell {
  706. /* doorbell mmio */
  707. resource_size_t base;
  708. resource_size_t size;
  709. u32 __iomem *ptr;
  710. u32 num_doorbells; /* Number of doorbells actually reserved for amdgpu. */
  711. };
  712. void amdgpu_doorbell_get_kfd_info(struct amdgpu_device *adev,
  713. phys_addr_t *aperture_base,
  714. size_t *aperture_size,
  715. size_t *start_offset);
  716. /*
  717. * IRQS.
  718. */
  719. struct amdgpu_flip_work {
  720. struct work_struct flip_work;
  721. struct work_struct unpin_work;
  722. struct amdgpu_device *adev;
  723. int crtc_id;
  724. uint64_t base;
  725. struct drm_pending_vblank_event *event;
  726. struct amdgpu_bo *old_rbo;
  727. struct fence *fence;
  728. };
  729. /*
  730. * CP & rings.
  731. */
  732. struct amdgpu_ib {
  733. struct amdgpu_sa_bo *sa_bo;
  734. uint32_t length_dw;
  735. uint64_t gpu_addr;
  736. uint32_t *ptr;
  737. struct amdgpu_ring *ring;
  738. struct amdgpu_fence *fence;
  739. struct amdgpu_user_fence *user;
  740. struct amdgpu_vm *vm;
  741. struct amdgpu_ctx *ctx;
  742. struct amdgpu_sync sync;
  743. uint32_t gds_base, gds_size;
  744. uint32_t gws_base, gws_size;
  745. uint32_t oa_base, oa_size;
  746. uint32_t flags;
  747. /* resulting sequence number */
  748. uint64_t sequence;
  749. };
  750. enum amdgpu_ring_type {
  751. AMDGPU_RING_TYPE_GFX,
  752. AMDGPU_RING_TYPE_COMPUTE,
  753. AMDGPU_RING_TYPE_SDMA,
  754. AMDGPU_RING_TYPE_UVD,
  755. AMDGPU_RING_TYPE_VCE
  756. };
  757. struct amdgpu_ring {
  758. struct amdgpu_device *adev;
  759. const struct amdgpu_ring_funcs *funcs;
  760. struct amdgpu_fence_driver fence_drv;
  761. struct mutex *ring_lock;
  762. struct amdgpu_bo *ring_obj;
  763. volatile uint32_t *ring;
  764. unsigned rptr_offs;
  765. u64 next_rptr_gpu_addr;
  766. volatile u32 *next_rptr_cpu_addr;
  767. unsigned wptr;
  768. unsigned wptr_old;
  769. unsigned ring_size;
  770. unsigned ring_free_dw;
  771. int count_dw;
  772. atomic_t last_rptr;
  773. atomic64_t last_activity;
  774. uint64_t gpu_addr;
  775. uint32_t align_mask;
  776. uint32_t ptr_mask;
  777. bool ready;
  778. u32 nop;
  779. u32 idx;
  780. u64 last_semaphore_signal_addr;
  781. u64 last_semaphore_wait_addr;
  782. u32 me;
  783. u32 pipe;
  784. u32 queue;
  785. struct amdgpu_bo *mqd_obj;
  786. u32 doorbell_index;
  787. bool use_doorbell;
  788. unsigned wptr_offs;
  789. unsigned next_rptr_offs;
  790. unsigned fence_offs;
  791. struct amdgpu_ctx *current_ctx;
  792. enum amdgpu_ring_type type;
  793. char name[16];
  794. };
  795. /*
  796. * VM
  797. */
  798. /* maximum number of VMIDs */
  799. #define AMDGPU_NUM_VM 16
  800. /* number of entries in page table */
  801. #define AMDGPU_VM_PTE_COUNT (1 << amdgpu_vm_block_size)
  802. /* PTBs (Page Table Blocks) need to be aligned to 32K */
  803. #define AMDGPU_VM_PTB_ALIGN_SIZE 32768
  804. #define AMDGPU_VM_PTB_ALIGN_MASK (AMDGPU_VM_PTB_ALIGN_SIZE - 1)
  805. #define AMDGPU_VM_PTB_ALIGN(a) (((a) + AMDGPU_VM_PTB_ALIGN_MASK) & ~AMDGPU_VM_PTB_ALIGN_MASK)
  806. #define AMDGPU_PTE_VALID (1 << 0)
  807. #define AMDGPU_PTE_SYSTEM (1 << 1)
  808. #define AMDGPU_PTE_SNOOPED (1 << 2)
  809. /* VI only */
  810. #define AMDGPU_PTE_EXECUTABLE (1 << 4)
  811. #define AMDGPU_PTE_READABLE (1 << 5)
  812. #define AMDGPU_PTE_WRITEABLE (1 << 6)
  813. /* PTE (Page Table Entry) fragment field for different page sizes */
  814. #define AMDGPU_PTE_FRAG_4KB (0 << 7)
  815. #define AMDGPU_PTE_FRAG_64KB (4 << 7)
  816. #define AMDGPU_LOG2_PAGES_PER_FRAG 4
  817. struct amdgpu_vm_pt {
  818. struct amdgpu_bo *bo;
  819. uint64_t addr;
  820. };
  821. struct amdgpu_vm_id {
  822. unsigned id;
  823. uint64_t pd_gpu_addr;
  824. /* last flushed PD/PT update */
  825. struct amdgpu_fence *flushed_updates;
  826. /* last use of vmid */
  827. struct amdgpu_fence *last_id_use;
  828. };
  829. struct amdgpu_vm {
  830. struct mutex mutex;
  831. struct rb_root va;
  832. /* protecting invalidated and freed */
  833. spinlock_t status_lock;
  834. /* BOs moved, but not yet updated in the PT */
  835. struct list_head invalidated;
  836. /* BOs freed, but not yet updated in the PT */
  837. struct list_head freed;
  838. /* contains the page directory */
  839. struct amdgpu_bo *page_directory;
  840. unsigned max_pde_used;
  841. /* array of page tables, one for each page directory entry */
  842. struct amdgpu_vm_pt *page_tables;
  843. /* for id and flush management per ring */
  844. struct amdgpu_vm_id ids[AMDGPU_MAX_RINGS];
  845. };
  846. struct amdgpu_vm_manager {
  847. struct amdgpu_fence *active[AMDGPU_NUM_VM];
  848. uint32_t max_pfn;
  849. /* number of VMIDs */
  850. unsigned nvm;
  851. /* vram base address for page table entry */
  852. u64 vram_base_offset;
  853. /* is vm enabled? */
  854. bool enabled;
  855. /* for hw to save the PD addr on suspend/resume */
  856. uint32_t saved_table_addr[AMDGPU_NUM_VM];
  857. /* vm pte handling */
  858. const struct amdgpu_vm_pte_funcs *vm_pte_funcs;
  859. struct amdgpu_ring *vm_pte_funcs_ring;
  860. };
  861. /*
  862. * context related structures
  863. */
  864. #define AMDGPU_CTX_MAX_CS_PENDING 16
  865. struct amdgpu_ctx_ring {
  866. uint64_t sequence;
  867. struct fence *fences[AMDGPU_CTX_MAX_CS_PENDING];
  868. };
  869. struct amdgpu_ctx {
  870. struct kref refcount;
  871. unsigned reset_counter;
  872. spinlock_t ring_lock;
  873. struct amdgpu_ctx_ring rings[AMDGPU_MAX_RINGS];
  874. };
  875. struct amdgpu_ctx_mgr {
  876. struct amdgpu_device *adev;
  877. struct mutex lock;
  878. /* protected by lock */
  879. struct idr ctx_handles;
  880. };
  881. int amdgpu_ctx_alloc(struct amdgpu_device *adev, struct amdgpu_fpriv *fpriv,
  882. uint32_t *id);
  883. int amdgpu_ctx_free(struct amdgpu_device *adev, struct amdgpu_fpriv *fpriv,
  884. uint32_t id);
  885. void amdgpu_ctx_fini(struct amdgpu_fpriv *fpriv);
  886. struct amdgpu_ctx *amdgpu_ctx_get(struct amdgpu_fpriv *fpriv, uint32_t id);
  887. int amdgpu_ctx_put(struct amdgpu_ctx *ctx);
  888. uint64_t amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, struct amdgpu_ring *ring,
  889. struct fence *fence);
  890. struct fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx,
  891. struct amdgpu_ring *ring, uint64_t seq);
  892. int amdgpu_ctx_ioctl(struct drm_device *dev, void *data,
  893. struct drm_file *filp);
  894. /*
  895. * file private structure
  896. */
  897. struct amdgpu_fpriv {
  898. struct amdgpu_vm vm;
  899. struct mutex bo_list_lock;
  900. struct idr bo_list_handles;
  901. struct amdgpu_ctx_mgr ctx_mgr;
  902. };
  903. /*
  904. * residency list
  905. */
  906. struct amdgpu_bo_list {
  907. struct mutex lock;
  908. struct amdgpu_bo *gds_obj;
  909. struct amdgpu_bo *gws_obj;
  910. struct amdgpu_bo *oa_obj;
  911. bool has_userptr;
  912. unsigned num_entries;
  913. struct amdgpu_bo_list_entry *array;
  914. };
  915. struct amdgpu_bo_list *
  916. amdgpu_bo_list_get(struct amdgpu_fpriv *fpriv, int id);
  917. void amdgpu_bo_list_put(struct amdgpu_bo_list *list);
  918. void amdgpu_bo_list_free(struct amdgpu_bo_list *list);
  919. /*
  920. * GFX stuff
  921. */
  922. #include "clearstate_defs.h"
  923. struct amdgpu_rlc {
  924. /* for power gating */
  925. struct amdgpu_bo *save_restore_obj;
  926. uint64_t save_restore_gpu_addr;
  927. volatile uint32_t *sr_ptr;
  928. const u32 *reg_list;
  929. u32 reg_list_size;
  930. /* for clear state */
  931. struct amdgpu_bo *clear_state_obj;
  932. uint64_t clear_state_gpu_addr;
  933. volatile uint32_t *cs_ptr;
  934. const struct cs_section_def *cs_data;
  935. u32 clear_state_size;
  936. /* for cp tables */
  937. struct amdgpu_bo *cp_table_obj;
  938. uint64_t cp_table_gpu_addr;
  939. volatile uint32_t *cp_table_ptr;
  940. u32 cp_table_size;
  941. };
  942. struct amdgpu_mec {
  943. struct amdgpu_bo *hpd_eop_obj;
  944. u64 hpd_eop_gpu_addr;
  945. u32 num_pipe;
  946. u32 num_mec;
  947. u32 num_queue;
  948. };
  949. /*
  950. * GPU scratch registers structures, functions & helpers
  951. */
  952. struct amdgpu_scratch {
  953. unsigned num_reg;
  954. uint32_t reg_base;
  955. bool free[32];
  956. uint32_t reg[32];
  957. };
  958. /*
  959. * GFX configurations
  960. */
  961. struct amdgpu_gca_config {
  962. unsigned max_shader_engines;
  963. unsigned max_tile_pipes;
  964. unsigned max_cu_per_sh;
  965. unsigned max_sh_per_se;
  966. unsigned max_backends_per_se;
  967. unsigned max_texture_channel_caches;
  968. unsigned max_gprs;
  969. unsigned max_gs_threads;
  970. unsigned max_hw_contexts;
  971. unsigned sc_prim_fifo_size_frontend;
  972. unsigned sc_prim_fifo_size_backend;
  973. unsigned sc_hiz_tile_fifo_size;
  974. unsigned sc_earlyz_tile_fifo_size;
  975. unsigned num_tile_pipes;
  976. unsigned backend_enable_mask;
  977. unsigned mem_max_burst_length_bytes;
  978. unsigned mem_row_size_in_kb;
  979. unsigned shader_engine_tile_size;
  980. unsigned num_gpus;
  981. unsigned multi_gpu_tile_size;
  982. unsigned mc_arb_ramcfg;
  983. unsigned gb_addr_config;
  984. uint32_t tile_mode_array[32];
  985. uint32_t macrotile_mode_array[16];
  986. };
  987. struct amdgpu_gfx {
  988. struct mutex gpu_clock_mutex;
  989. struct amdgpu_gca_config config;
  990. struct amdgpu_rlc rlc;
  991. struct amdgpu_mec mec;
  992. struct amdgpu_scratch scratch;
  993. const struct firmware *me_fw; /* ME firmware */
  994. uint32_t me_fw_version;
  995. const struct firmware *pfp_fw; /* PFP firmware */
  996. uint32_t pfp_fw_version;
  997. const struct firmware *ce_fw; /* CE firmware */
  998. uint32_t ce_fw_version;
  999. const struct firmware *rlc_fw; /* RLC firmware */
  1000. uint32_t rlc_fw_version;
  1001. const struct firmware *mec_fw; /* MEC firmware */
  1002. uint32_t mec_fw_version;
  1003. const struct firmware *mec2_fw; /* MEC2 firmware */
  1004. uint32_t mec2_fw_version;
  1005. uint32_t me_feature_version;
  1006. uint32_t ce_feature_version;
  1007. uint32_t pfp_feature_version;
  1008. uint32_t rlc_feature_version;
  1009. uint32_t mec_feature_version;
  1010. uint32_t mec2_feature_version;
  1011. struct amdgpu_ring gfx_ring[AMDGPU_MAX_GFX_RINGS];
  1012. unsigned num_gfx_rings;
  1013. struct amdgpu_ring compute_ring[AMDGPU_MAX_COMPUTE_RINGS];
  1014. unsigned num_compute_rings;
  1015. struct amdgpu_irq_src eop_irq;
  1016. struct amdgpu_irq_src priv_reg_irq;
  1017. struct amdgpu_irq_src priv_inst_irq;
  1018. /* gfx status */
  1019. uint32_t gfx_current_status;
  1020. /* sync signal for const engine */
  1021. unsigned ce_sync_offs;
  1022. /* ce ram size*/
  1023. unsigned ce_ram_size;
  1024. };
  1025. int amdgpu_ib_get(struct amdgpu_ring *ring, struct amdgpu_vm *vm,
  1026. unsigned size, struct amdgpu_ib *ib);
  1027. void amdgpu_ib_free(struct amdgpu_device *adev, struct amdgpu_ib *ib);
  1028. int amdgpu_ib_schedule(struct amdgpu_device *adev, unsigned num_ibs,
  1029. struct amdgpu_ib *ib, void *owner);
  1030. int amdgpu_ib_pool_init(struct amdgpu_device *adev);
  1031. void amdgpu_ib_pool_fini(struct amdgpu_device *adev);
  1032. int amdgpu_ib_ring_tests(struct amdgpu_device *adev);
  1033. /* Ring access between begin & end cannot sleep */
  1034. void amdgpu_ring_free_size(struct amdgpu_ring *ring);
  1035. int amdgpu_ring_alloc(struct amdgpu_ring *ring, unsigned ndw);
  1036. int amdgpu_ring_lock(struct amdgpu_ring *ring, unsigned ndw);
  1037. void amdgpu_ring_commit(struct amdgpu_ring *ring);
  1038. void amdgpu_ring_unlock_commit(struct amdgpu_ring *ring);
  1039. void amdgpu_ring_undo(struct amdgpu_ring *ring);
  1040. void amdgpu_ring_unlock_undo(struct amdgpu_ring *ring);
  1041. void amdgpu_ring_lockup_update(struct amdgpu_ring *ring);
  1042. bool amdgpu_ring_test_lockup(struct amdgpu_ring *ring);
  1043. unsigned amdgpu_ring_backup(struct amdgpu_ring *ring,
  1044. uint32_t **data);
  1045. int amdgpu_ring_restore(struct amdgpu_ring *ring,
  1046. unsigned size, uint32_t *data);
  1047. int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
  1048. unsigned ring_size, u32 nop, u32 align_mask,
  1049. struct amdgpu_irq_src *irq_src, unsigned irq_type,
  1050. enum amdgpu_ring_type ring_type);
  1051. void amdgpu_ring_fini(struct amdgpu_ring *ring);
  1052. /*
  1053. * CS.
  1054. */
  1055. struct amdgpu_cs_chunk {
  1056. uint32_t chunk_id;
  1057. uint32_t length_dw;
  1058. uint32_t *kdata;
  1059. void __user *user_ptr;
  1060. };
  1061. struct amdgpu_cs_parser {
  1062. struct amdgpu_device *adev;
  1063. struct drm_file *filp;
  1064. struct amdgpu_ctx *ctx;
  1065. struct amdgpu_bo_list *bo_list;
  1066. /* chunks */
  1067. unsigned nchunks;
  1068. struct amdgpu_cs_chunk *chunks;
  1069. /* relocations */
  1070. struct amdgpu_bo_list_entry *vm_bos;
  1071. struct list_head validated;
  1072. struct amdgpu_ib *ibs;
  1073. uint32_t num_ibs;
  1074. struct ww_acquire_ctx ticket;
  1075. /* user fence */
  1076. struct amdgpu_user_fence uf;
  1077. };
  1078. static inline u32 amdgpu_get_ib_value(struct amdgpu_cs_parser *p, uint32_t ib_idx, int idx)
  1079. {
  1080. return p->ibs[ib_idx].ptr[idx];
  1081. }
  1082. /*
  1083. * Writeback
  1084. */
  1085. #define AMDGPU_MAX_WB 1024 /* Reserve at most 1024 WB slots for amdgpu-owned rings. */
  1086. struct amdgpu_wb {
  1087. struct amdgpu_bo *wb_obj;
  1088. volatile uint32_t *wb;
  1089. uint64_t gpu_addr;
  1090. u32 num_wb; /* Number of wb slots actually reserved for amdgpu. */
  1091. unsigned long used[DIV_ROUND_UP(AMDGPU_MAX_WB, BITS_PER_LONG)];
  1092. };
  1093. int amdgpu_wb_get(struct amdgpu_device *adev, u32 *wb);
  1094. void amdgpu_wb_free(struct amdgpu_device *adev, u32 wb);
  1095. /**
  1096. * struct amdgpu_pm - power management datas
  1097. * It keeps track of various data needed to take powermanagement decision.
  1098. */
  1099. enum amdgpu_pm_state_type {
  1100. /* not used for dpm */
  1101. POWER_STATE_TYPE_DEFAULT,
  1102. POWER_STATE_TYPE_POWERSAVE,
  1103. /* user selectable states */
  1104. POWER_STATE_TYPE_BATTERY,
  1105. POWER_STATE_TYPE_BALANCED,
  1106. POWER_STATE_TYPE_PERFORMANCE,
  1107. /* internal states */
  1108. POWER_STATE_TYPE_INTERNAL_UVD,
  1109. POWER_STATE_TYPE_INTERNAL_UVD_SD,
  1110. POWER_STATE_TYPE_INTERNAL_UVD_HD,
  1111. POWER_STATE_TYPE_INTERNAL_UVD_HD2,
  1112. POWER_STATE_TYPE_INTERNAL_UVD_MVC,
  1113. POWER_STATE_TYPE_INTERNAL_BOOT,
  1114. POWER_STATE_TYPE_INTERNAL_THERMAL,
  1115. POWER_STATE_TYPE_INTERNAL_ACPI,
  1116. POWER_STATE_TYPE_INTERNAL_ULV,
  1117. POWER_STATE_TYPE_INTERNAL_3DPERF,
  1118. };
  1119. enum amdgpu_int_thermal_type {
  1120. THERMAL_TYPE_NONE,
  1121. THERMAL_TYPE_EXTERNAL,
  1122. THERMAL_TYPE_EXTERNAL_GPIO,
  1123. THERMAL_TYPE_RV6XX,
  1124. THERMAL_TYPE_RV770,
  1125. THERMAL_TYPE_ADT7473_WITH_INTERNAL,
  1126. THERMAL_TYPE_EVERGREEN,
  1127. THERMAL_TYPE_SUMO,
  1128. THERMAL_TYPE_NI,
  1129. THERMAL_TYPE_SI,
  1130. THERMAL_TYPE_EMC2103_WITH_INTERNAL,
  1131. THERMAL_TYPE_CI,
  1132. THERMAL_TYPE_KV,
  1133. };
  1134. enum amdgpu_dpm_auto_throttle_src {
  1135. AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL,
  1136. AMDGPU_DPM_AUTO_THROTTLE_SRC_EXTERNAL
  1137. };
  1138. enum amdgpu_dpm_event_src {
  1139. AMDGPU_DPM_EVENT_SRC_ANALOG = 0,
  1140. AMDGPU_DPM_EVENT_SRC_EXTERNAL = 1,
  1141. AMDGPU_DPM_EVENT_SRC_DIGITAL = 2,
  1142. AMDGPU_DPM_EVENT_SRC_ANALOG_OR_EXTERNAL = 3,
  1143. AMDGPU_DPM_EVENT_SRC_DIGIAL_OR_EXTERNAL = 4
  1144. };
  1145. #define AMDGPU_MAX_VCE_LEVELS 6
  1146. enum amdgpu_vce_level {
  1147. AMDGPU_VCE_LEVEL_AC_ALL = 0, /* AC, All cases */
  1148. AMDGPU_VCE_LEVEL_DC_EE = 1, /* DC, entropy encoding */
  1149. AMDGPU_VCE_LEVEL_DC_LL_LOW = 2, /* DC, low latency queue, res <= 720 */
  1150. AMDGPU_VCE_LEVEL_DC_LL_HIGH = 3, /* DC, low latency queue, 1080 >= res > 720 */
  1151. AMDGPU_VCE_LEVEL_DC_GP_LOW = 4, /* DC, general purpose queue, res <= 720 */
  1152. AMDGPU_VCE_LEVEL_DC_GP_HIGH = 5, /* DC, general purpose queue, 1080 >= res > 720 */
  1153. };
  1154. struct amdgpu_ps {
  1155. u32 caps; /* vbios flags */
  1156. u32 class; /* vbios flags */
  1157. u32 class2; /* vbios flags */
  1158. /* UVD clocks */
  1159. u32 vclk;
  1160. u32 dclk;
  1161. /* VCE clocks */
  1162. u32 evclk;
  1163. u32 ecclk;
  1164. bool vce_active;
  1165. enum amdgpu_vce_level vce_level;
  1166. /* asic priv */
  1167. void *ps_priv;
  1168. };
  1169. struct amdgpu_dpm_thermal {
  1170. /* thermal interrupt work */
  1171. struct work_struct work;
  1172. /* low temperature threshold */
  1173. int min_temp;
  1174. /* high temperature threshold */
  1175. int max_temp;
  1176. /* was last interrupt low to high or high to low */
  1177. bool high_to_low;
  1178. /* interrupt source */
  1179. struct amdgpu_irq_src irq;
  1180. };
  1181. enum amdgpu_clk_action
  1182. {
  1183. AMDGPU_SCLK_UP = 1,
  1184. AMDGPU_SCLK_DOWN
  1185. };
  1186. struct amdgpu_blacklist_clocks
  1187. {
  1188. u32 sclk;
  1189. u32 mclk;
  1190. enum amdgpu_clk_action action;
  1191. };
  1192. struct amdgpu_clock_and_voltage_limits {
  1193. u32 sclk;
  1194. u32 mclk;
  1195. u16 vddc;
  1196. u16 vddci;
  1197. };
  1198. struct amdgpu_clock_array {
  1199. u32 count;
  1200. u32 *values;
  1201. };
  1202. struct amdgpu_clock_voltage_dependency_entry {
  1203. u32 clk;
  1204. u16 v;
  1205. };
  1206. struct amdgpu_clock_voltage_dependency_table {
  1207. u32 count;
  1208. struct amdgpu_clock_voltage_dependency_entry *entries;
  1209. };
  1210. union amdgpu_cac_leakage_entry {
  1211. struct {
  1212. u16 vddc;
  1213. u32 leakage;
  1214. };
  1215. struct {
  1216. u16 vddc1;
  1217. u16 vddc2;
  1218. u16 vddc3;
  1219. };
  1220. };
  1221. struct amdgpu_cac_leakage_table {
  1222. u32 count;
  1223. union amdgpu_cac_leakage_entry *entries;
  1224. };
  1225. struct amdgpu_phase_shedding_limits_entry {
  1226. u16 voltage;
  1227. u32 sclk;
  1228. u32 mclk;
  1229. };
  1230. struct amdgpu_phase_shedding_limits_table {
  1231. u32 count;
  1232. struct amdgpu_phase_shedding_limits_entry *entries;
  1233. };
  1234. struct amdgpu_uvd_clock_voltage_dependency_entry {
  1235. u32 vclk;
  1236. u32 dclk;
  1237. u16 v;
  1238. };
  1239. struct amdgpu_uvd_clock_voltage_dependency_table {
  1240. u8 count;
  1241. struct amdgpu_uvd_clock_voltage_dependency_entry *entries;
  1242. };
  1243. struct amdgpu_vce_clock_voltage_dependency_entry {
  1244. u32 ecclk;
  1245. u32 evclk;
  1246. u16 v;
  1247. };
  1248. struct amdgpu_vce_clock_voltage_dependency_table {
  1249. u8 count;
  1250. struct amdgpu_vce_clock_voltage_dependency_entry *entries;
  1251. };
  1252. struct amdgpu_ppm_table {
  1253. u8 ppm_design;
  1254. u16 cpu_core_number;
  1255. u32 platform_tdp;
  1256. u32 small_ac_platform_tdp;
  1257. u32 platform_tdc;
  1258. u32 small_ac_platform_tdc;
  1259. u32 apu_tdp;
  1260. u32 dgpu_tdp;
  1261. u32 dgpu_ulv_power;
  1262. u32 tj_max;
  1263. };
  1264. struct amdgpu_cac_tdp_table {
  1265. u16 tdp;
  1266. u16 configurable_tdp;
  1267. u16 tdc;
  1268. u16 battery_power_limit;
  1269. u16 small_power_limit;
  1270. u16 low_cac_leakage;
  1271. u16 high_cac_leakage;
  1272. u16 maximum_power_delivery_limit;
  1273. };
  1274. struct amdgpu_dpm_dynamic_state {
  1275. struct amdgpu_clock_voltage_dependency_table vddc_dependency_on_sclk;
  1276. struct amdgpu_clock_voltage_dependency_table vddci_dependency_on_mclk;
  1277. struct amdgpu_clock_voltage_dependency_table vddc_dependency_on_mclk;
  1278. struct amdgpu_clock_voltage_dependency_table mvdd_dependency_on_mclk;
  1279. struct amdgpu_clock_voltage_dependency_table vddc_dependency_on_dispclk;
  1280. struct amdgpu_uvd_clock_voltage_dependency_table uvd_clock_voltage_dependency_table;
  1281. struct amdgpu_vce_clock_voltage_dependency_table vce_clock_voltage_dependency_table;
  1282. struct amdgpu_clock_voltage_dependency_table samu_clock_voltage_dependency_table;
  1283. struct amdgpu_clock_voltage_dependency_table acp_clock_voltage_dependency_table;
  1284. struct amdgpu_clock_voltage_dependency_table vddgfx_dependency_on_sclk;
  1285. struct amdgpu_clock_array valid_sclk_values;
  1286. struct amdgpu_clock_array valid_mclk_values;
  1287. struct amdgpu_clock_and_voltage_limits max_clock_voltage_on_dc;
  1288. struct amdgpu_clock_and_voltage_limits max_clock_voltage_on_ac;
  1289. u32 mclk_sclk_ratio;
  1290. u32 sclk_mclk_delta;
  1291. u16 vddc_vddci_delta;
  1292. u16 min_vddc_for_pcie_gen2;
  1293. struct amdgpu_cac_leakage_table cac_leakage_table;
  1294. struct amdgpu_phase_shedding_limits_table phase_shedding_limits_table;
  1295. struct amdgpu_ppm_table *ppm_table;
  1296. struct amdgpu_cac_tdp_table *cac_tdp_table;
  1297. };
  1298. struct amdgpu_dpm_fan {
  1299. u16 t_min;
  1300. u16 t_med;
  1301. u16 t_high;
  1302. u16 pwm_min;
  1303. u16 pwm_med;
  1304. u16 pwm_high;
  1305. u8 t_hyst;
  1306. u32 cycle_delay;
  1307. u16 t_max;
  1308. u8 control_mode;
  1309. u16 default_max_fan_pwm;
  1310. u16 default_fan_output_sensitivity;
  1311. u16 fan_output_sensitivity;
  1312. bool ucode_fan_control;
  1313. };
  1314. enum amdgpu_pcie_gen {
  1315. AMDGPU_PCIE_GEN1 = 0,
  1316. AMDGPU_PCIE_GEN2 = 1,
  1317. AMDGPU_PCIE_GEN3 = 2,
  1318. AMDGPU_PCIE_GEN_INVALID = 0xffff
  1319. };
  1320. enum amdgpu_dpm_forced_level {
  1321. AMDGPU_DPM_FORCED_LEVEL_AUTO = 0,
  1322. AMDGPU_DPM_FORCED_LEVEL_LOW = 1,
  1323. AMDGPU_DPM_FORCED_LEVEL_HIGH = 2,
  1324. };
  1325. struct amdgpu_vce_state {
  1326. /* vce clocks */
  1327. u32 evclk;
  1328. u32 ecclk;
  1329. /* gpu clocks */
  1330. u32 sclk;
  1331. u32 mclk;
  1332. u8 clk_idx;
  1333. u8 pstate;
  1334. };
  1335. struct amdgpu_dpm_funcs {
  1336. int (*get_temperature)(struct amdgpu_device *adev);
  1337. int (*pre_set_power_state)(struct amdgpu_device *adev);
  1338. int (*set_power_state)(struct amdgpu_device *adev);
  1339. void (*post_set_power_state)(struct amdgpu_device *adev);
  1340. void (*display_configuration_changed)(struct amdgpu_device *adev);
  1341. u32 (*get_sclk)(struct amdgpu_device *adev, bool low);
  1342. u32 (*get_mclk)(struct amdgpu_device *adev, bool low);
  1343. void (*print_power_state)(struct amdgpu_device *adev, struct amdgpu_ps *ps);
  1344. void (*debugfs_print_current_performance_level)(struct amdgpu_device *adev, struct seq_file *m);
  1345. int (*force_performance_level)(struct amdgpu_device *adev, enum amdgpu_dpm_forced_level level);
  1346. bool (*vblank_too_short)(struct amdgpu_device *adev);
  1347. void (*powergate_uvd)(struct amdgpu_device *adev, bool gate);
  1348. void (*powergate_vce)(struct amdgpu_device *adev, bool gate);
  1349. void (*enable_bapm)(struct amdgpu_device *adev, bool enable);
  1350. void (*set_fan_control_mode)(struct amdgpu_device *adev, u32 mode);
  1351. u32 (*get_fan_control_mode)(struct amdgpu_device *adev);
  1352. int (*set_fan_speed_percent)(struct amdgpu_device *adev, u32 speed);
  1353. int (*get_fan_speed_percent)(struct amdgpu_device *adev, u32 *speed);
  1354. };
  1355. struct amdgpu_dpm {
  1356. struct amdgpu_ps *ps;
  1357. /* number of valid power states */
  1358. int num_ps;
  1359. /* current power state that is active */
  1360. struct amdgpu_ps *current_ps;
  1361. /* requested power state */
  1362. struct amdgpu_ps *requested_ps;
  1363. /* boot up power state */
  1364. struct amdgpu_ps *boot_ps;
  1365. /* default uvd power state */
  1366. struct amdgpu_ps *uvd_ps;
  1367. /* vce requirements */
  1368. struct amdgpu_vce_state vce_states[AMDGPU_MAX_VCE_LEVELS];
  1369. enum amdgpu_vce_level vce_level;
  1370. enum amdgpu_pm_state_type state;
  1371. enum amdgpu_pm_state_type user_state;
  1372. u32 platform_caps;
  1373. u32 voltage_response_time;
  1374. u32 backbias_response_time;
  1375. void *priv;
  1376. u32 new_active_crtcs;
  1377. int new_active_crtc_count;
  1378. u32 current_active_crtcs;
  1379. int current_active_crtc_count;
  1380. struct amdgpu_dpm_dynamic_state dyn_state;
  1381. struct amdgpu_dpm_fan fan;
  1382. u32 tdp_limit;
  1383. u32 near_tdp_limit;
  1384. u32 near_tdp_limit_adjusted;
  1385. u32 sq_ramping_threshold;
  1386. u32 cac_leakage;
  1387. u16 tdp_od_limit;
  1388. u32 tdp_adjustment;
  1389. u16 load_line_slope;
  1390. bool power_control;
  1391. bool ac_power;
  1392. /* special states active */
  1393. bool thermal_active;
  1394. bool uvd_active;
  1395. bool vce_active;
  1396. /* thermal handling */
  1397. struct amdgpu_dpm_thermal thermal;
  1398. /* forced levels */
  1399. enum amdgpu_dpm_forced_level forced_level;
  1400. };
  1401. struct amdgpu_pm {
  1402. struct mutex mutex;
  1403. u32 current_sclk;
  1404. u32 current_mclk;
  1405. u32 default_sclk;
  1406. u32 default_mclk;
  1407. struct amdgpu_i2c_chan *i2c_bus;
  1408. /* internal thermal controller on rv6xx+ */
  1409. enum amdgpu_int_thermal_type int_thermal_type;
  1410. struct device *int_hwmon_dev;
  1411. /* fan control parameters */
  1412. bool no_fan;
  1413. u8 fan_pulses_per_revolution;
  1414. u8 fan_min_rpm;
  1415. u8 fan_max_rpm;
  1416. /* dpm */
  1417. bool dpm_enabled;
  1418. struct amdgpu_dpm dpm;
  1419. const struct firmware *fw; /* SMC firmware */
  1420. uint32_t fw_version;
  1421. const struct amdgpu_dpm_funcs *funcs;
  1422. };
  1423. /*
  1424. * UVD
  1425. */
  1426. #define AMDGPU_MAX_UVD_HANDLES 10
  1427. #define AMDGPU_UVD_STACK_SIZE (1024*1024)
  1428. #define AMDGPU_UVD_HEAP_SIZE (1024*1024)
  1429. #define AMDGPU_UVD_FIRMWARE_OFFSET 256
  1430. struct amdgpu_uvd {
  1431. struct amdgpu_bo *vcpu_bo;
  1432. void *cpu_addr;
  1433. uint64_t gpu_addr;
  1434. void *saved_bo;
  1435. atomic_t handles[AMDGPU_MAX_UVD_HANDLES];
  1436. struct drm_file *filp[AMDGPU_MAX_UVD_HANDLES];
  1437. struct delayed_work idle_work;
  1438. const struct firmware *fw; /* UVD firmware */
  1439. struct amdgpu_ring ring;
  1440. struct amdgpu_irq_src irq;
  1441. bool address_64_bit;
  1442. };
  1443. /*
  1444. * VCE
  1445. */
  1446. #define AMDGPU_MAX_VCE_HANDLES 16
  1447. #define AMDGPU_VCE_FIRMWARE_OFFSET 256
  1448. #define AMDGPU_VCE_HARVEST_VCE0 (1 << 0)
  1449. #define AMDGPU_VCE_HARVEST_VCE1 (1 << 1)
  1450. struct amdgpu_vce {
  1451. struct amdgpu_bo *vcpu_bo;
  1452. uint64_t gpu_addr;
  1453. unsigned fw_version;
  1454. unsigned fb_version;
  1455. atomic_t handles[AMDGPU_MAX_VCE_HANDLES];
  1456. struct drm_file *filp[AMDGPU_MAX_VCE_HANDLES];
  1457. uint32_t img_size[AMDGPU_MAX_VCE_HANDLES];
  1458. struct delayed_work idle_work;
  1459. const struct firmware *fw; /* VCE firmware */
  1460. struct amdgpu_ring ring[AMDGPU_MAX_VCE_RINGS];
  1461. struct amdgpu_irq_src irq;
  1462. unsigned harvest_config;
  1463. };
  1464. /*
  1465. * SDMA
  1466. */
  1467. struct amdgpu_sdma {
  1468. /* SDMA firmware */
  1469. const struct firmware *fw;
  1470. uint32_t fw_version;
  1471. uint32_t feature_version;
  1472. struct amdgpu_ring ring;
  1473. };
  1474. /*
  1475. * Firmware
  1476. */
  1477. struct amdgpu_firmware {
  1478. struct amdgpu_firmware_info ucode[AMDGPU_UCODE_ID_MAXIMUM];
  1479. bool smu_load;
  1480. struct amdgpu_bo *fw_buf;
  1481. unsigned int fw_size;
  1482. };
  1483. /*
  1484. * Benchmarking
  1485. */
  1486. void amdgpu_benchmark(struct amdgpu_device *adev, int test_number);
  1487. /*
  1488. * Testing
  1489. */
  1490. void amdgpu_test_moves(struct amdgpu_device *adev);
  1491. void amdgpu_test_ring_sync(struct amdgpu_device *adev,
  1492. struct amdgpu_ring *cpA,
  1493. struct amdgpu_ring *cpB);
  1494. void amdgpu_test_syncing(struct amdgpu_device *adev);
  1495. /*
  1496. * MMU Notifier
  1497. */
  1498. #if defined(CONFIG_MMU_NOTIFIER)
  1499. int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr);
  1500. void amdgpu_mn_unregister(struct amdgpu_bo *bo);
  1501. #else
  1502. static int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr)
  1503. {
  1504. return -ENODEV;
  1505. }
  1506. static void amdgpu_mn_unregister(struct amdgpu_bo *bo) {}
  1507. #endif
  1508. /*
  1509. * Debugfs
  1510. */
  1511. struct amdgpu_debugfs {
  1512. struct drm_info_list *files;
  1513. unsigned num_files;
  1514. };
  1515. int amdgpu_debugfs_add_files(struct amdgpu_device *adev,
  1516. struct drm_info_list *files,
  1517. unsigned nfiles);
  1518. int amdgpu_debugfs_fence_init(struct amdgpu_device *adev);
  1519. #if defined(CONFIG_DEBUG_FS)
  1520. int amdgpu_debugfs_init(struct drm_minor *minor);
  1521. void amdgpu_debugfs_cleanup(struct drm_minor *minor);
  1522. #endif
  1523. /*
  1524. * amdgpu smumgr functions
  1525. */
  1526. struct amdgpu_smumgr_funcs {
  1527. int (*check_fw_load_finish)(struct amdgpu_device *adev, uint32_t fwtype);
  1528. int (*request_smu_load_fw)(struct amdgpu_device *adev);
  1529. int (*request_smu_specific_fw)(struct amdgpu_device *adev, uint32_t fwtype);
  1530. };
  1531. /*
  1532. * amdgpu smumgr
  1533. */
  1534. struct amdgpu_smumgr {
  1535. struct amdgpu_bo *toc_buf;
  1536. struct amdgpu_bo *smu_buf;
  1537. /* asic priv smu data */
  1538. void *priv;
  1539. spinlock_t smu_lock;
  1540. /* smumgr functions */
  1541. const struct amdgpu_smumgr_funcs *smumgr_funcs;
  1542. /* ucode loading complete flag */
  1543. uint32_t fw_flags;
  1544. };
  1545. /*
  1546. * ASIC specific register table accessible by UMD
  1547. */
  1548. struct amdgpu_allowed_register_entry {
  1549. uint32_t reg_offset;
  1550. bool untouched;
  1551. bool grbm_indexed;
  1552. };
  1553. struct amdgpu_cu_info {
  1554. uint32_t number; /* total active CU number */
  1555. uint32_t ao_cu_mask;
  1556. uint32_t bitmap[4][4];
  1557. };
  1558. /*
  1559. * ASIC specific functions.
  1560. */
  1561. struct amdgpu_asic_funcs {
  1562. bool (*read_disabled_bios)(struct amdgpu_device *adev);
  1563. int (*read_register)(struct amdgpu_device *adev, u32 se_num,
  1564. u32 sh_num, u32 reg_offset, u32 *value);
  1565. void (*set_vga_state)(struct amdgpu_device *adev, bool state);
  1566. int (*reset)(struct amdgpu_device *adev);
  1567. /* wait for mc_idle */
  1568. int (*wait_for_mc_idle)(struct amdgpu_device *adev);
  1569. /* get the reference clock */
  1570. u32 (*get_xclk)(struct amdgpu_device *adev);
  1571. /* get the gpu clock counter */
  1572. uint64_t (*get_gpu_clock_counter)(struct amdgpu_device *adev);
  1573. int (*get_cu_info)(struct amdgpu_device *adev, struct amdgpu_cu_info *info);
  1574. /* MM block clocks */
  1575. int (*set_uvd_clocks)(struct amdgpu_device *adev, u32 vclk, u32 dclk);
  1576. int (*set_vce_clocks)(struct amdgpu_device *adev, u32 evclk, u32 ecclk);
  1577. };
  1578. /*
  1579. * IOCTL.
  1580. */
  1581. int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data,
  1582. struct drm_file *filp);
  1583. int amdgpu_bo_list_ioctl(struct drm_device *dev, void *data,
  1584. struct drm_file *filp);
  1585. int amdgpu_gem_info_ioctl(struct drm_device *dev, void *data,
  1586. struct drm_file *filp);
  1587. int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data,
  1588. struct drm_file *filp);
  1589. int amdgpu_gem_mmap_ioctl(struct drm_device *dev, void *data,
  1590. struct drm_file *filp);
  1591. int amdgpu_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
  1592. struct drm_file *filp);
  1593. int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
  1594. struct drm_file *filp);
  1595. int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data,
  1596. struct drm_file *filp);
  1597. int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp);
  1598. int amdgpu_cs_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *filp);
  1599. int amdgpu_gem_metadata_ioctl(struct drm_device *dev, void *data,
  1600. struct drm_file *filp);
  1601. /* VRAM scratch page for HDP bug, default vram page */
  1602. struct amdgpu_vram_scratch {
  1603. struct amdgpu_bo *robj;
  1604. volatile uint32_t *ptr;
  1605. u64 gpu_addr;
  1606. };
  1607. /*
  1608. * ACPI
  1609. */
  1610. struct amdgpu_atif_notification_cfg {
  1611. bool enabled;
  1612. int command_code;
  1613. };
  1614. struct amdgpu_atif_notifications {
  1615. bool display_switch;
  1616. bool expansion_mode_change;
  1617. bool thermal_state;
  1618. bool forced_power_state;
  1619. bool system_power_state;
  1620. bool display_conf_change;
  1621. bool px_gfx_switch;
  1622. bool brightness_change;
  1623. bool dgpu_display_event;
  1624. };
  1625. struct amdgpu_atif_functions {
  1626. bool system_params;
  1627. bool sbios_requests;
  1628. bool select_active_disp;
  1629. bool lid_state;
  1630. bool get_tv_standard;
  1631. bool set_tv_standard;
  1632. bool get_panel_expansion_mode;
  1633. bool set_panel_expansion_mode;
  1634. bool temperature_change;
  1635. bool graphics_device_types;
  1636. };
  1637. struct amdgpu_atif {
  1638. struct amdgpu_atif_notifications notifications;
  1639. struct amdgpu_atif_functions functions;
  1640. struct amdgpu_atif_notification_cfg notification_cfg;
  1641. struct amdgpu_encoder *encoder_for_bl;
  1642. };
  1643. struct amdgpu_atcs_functions {
  1644. bool get_ext_state;
  1645. bool pcie_perf_req;
  1646. bool pcie_dev_rdy;
  1647. bool pcie_bus_width;
  1648. };
  1649. struct amdgpu_atcs {
  1650. struct amdgpu_atcs_functions functions;
  1651. };
  1652. /*
  1653. * CGS
  1654. */
  1655. void *amdgpu_cgs_create_device(struct amdgpu_device *adev);
  1656. void amdgpu_cgs_destroy_device(void *cgs_device);
  1657. /*
  1658. * Core structure, functions and helpers.
  1659. */
  1660. typedef uint32_t (*amdgpu_rreg_t)(struct amdgpu_device*, uint32_t);
  1661. typedef void (*amdgpu_wreg_t)(struct amdgpu_device*, uint32_t, uint32_t);
  1662. typedef uint32_t (*amdgpu_block_rreg_t)(struct amdgpu_device*, uint32_t, uint32_t);
  1663. typedef void (*amdgpu_block_wreg_t)(struct amdgpu_device*, uint32_t, uint32_t, uint32_t);
  1664. struct amdgpu_ip_block_status {
  1665. bool valid;
  1666. bool sw;
  1667. bool hw;
  1668. };
  1669. struct amdgpu_device {
  1670. struct device *dev;
  1671. struct drm_device *ddev;
  1672. struct pci_dev *pdev;
  1673. struct rw_semaphore exclusive_lock;
  1674. /* ASIC */
  1675. enum amd_asic_type asic_type;
  1676. uint32_t family;
  1677. uint32_t rev_id;
  1678. uint32_t external_rev_id;
  1679. unsigned long flags;
  1680. int usec_timeout;
  1681. const struct amdgpu_asic_funcs *asic_funcs;
  1682. bool shutdown;
  1683. bool suspend;
  1684. bool need_dma32;
  1685. bool accel_working;
  1686. bool needs_reset;
  1687. struct work_struct reset_work;
  1688. struct notifier_block acpi_nb;
  1689. struct amdgpu_i2c_chan *i2c_bus[AMDGPU_MAX_I2C_BUS];
  1690. struct amdgpu_debugfs debugfs[AMDGPU_DEBUGFS_MAX_COMPONENTS];
  1691. unsigned debugfs_count;
  1692. #if defined(CONFIG_DEBUG_FS)
  1693. struct dentry *debugfs_regs;
  1694. #endif
  1695. struct amdgpu_atif atif;
  1696. struct amdgpu_atcs atcs;
  1697. struct mutex srbm_mutex;
  1698. /* GRBM index mutex. Protects concurrent access to GRBM index */
  1699. struct mutex grbm_idx_mutex;
  1700. struct dev_pm_domain vga_pm_domain;
  1701. bool have_disp_power_ref;
  1702. /* BIOS */
  1703. uint8_t *bios;
  1704. bool is_atom_bios;
  1705. uint16_t bios_header_start;
  1706. struct amdgpu_bo *stollen_vga_memory;
  1707. uint32_t bios_scratch[AMDGPU_BIOS_NUM_SCRATCH];
  1708. /* Register/doorbell mmio */
  1709. resource_size_t rmmio_base;
  1710. resource_size_t rmmio_size;
  1711. void __iomem *rmmio;
  1712. /* protects concurrent MM_INDEX/DATA based register access */
  1713. spinlock_t mmio_idx_lock;
  1714. /* protects concurrent SMC based register access */
  1715. spinlock_t smc_idx_lock;
  1716. amdgpu_rreg_t smc_rreg;
  1717. amdgpu_wreg_t smc_wreg;
  1718. /* protects concurrent PCIE register access */
  1719. spinlock_t pcie_idx_lock;
  1720. amdgpu_rreg_t pcie_rreg;
  1721. amdgpu_wreg_t pcie_wreg;
  1722. /* protects concurrent UVD register access */
  1723. spinlock_t uvd_ctx_idx_lock;
  1724. amdgpu_rreg_t uvd_ctx_rreg;
  1725. amdgpu_wreg_t uvd_ctx_wreg;
  1726. /* protects concurrent DIDT register access */
  1727. spinlock_t didt_idx_lock;
  1728. amdgpu_rreg_t didt_rreg;
  1729. amdgpu_wreg_t didt_wreg;
  1730. /* protects concurrent ENDPOINT (audio) register access */
  1731. spinlock_t audio_endpt_idx_lock;
  1732. amdgpu_block_rreg_t audio_endpt_rreg;
  1733. amdgpu_block_wreg_t audio_endpt_wreg;
  1734. void __iomem *rio_mem;
  1735. resource_size_t rio_mem_size;
  1736. struct amdgpu_doorbell doorbell;
  1737. /* clock/pll info */
  1738. struct amdgpu_clock clock;
  1739. /* MC */
  1740. struct amdgpu_mc mc;
  1741. struct amdgpu_gart gart;
  1742. struct amdgpu_dummy_page dummy_page;
  1743. struct amdgpu_vm_manager vm_manager;
  1744. /* memory management */
  1745. struct amdgpu_mman mman;
  1746. struct amdgpu_gem gem;
  1747. struct amdgpu_vram_scratch vram_scratch;
  1748. struct amdgpu_wb wb;
  1749. atomic64_t vram_usage;
  1750. atomic64_t vram_vis_usage;
  1751. atomic64_t gtt_usage;
  1752. atomic64_t num_bytes_moved;
  1753. atomic_t gpu_reset_counter;
  1754. /* display */
  1755. struct amdgpu_mode_info mode_info;
  1756. struct work_struct hotplug_work;
  1757. struct amdgpu_irq_src crtc_irq;
  1758. struct amdgpu_irq_src pageflip_irq;
  1759. struct amdgpu_irq_src hpd_irq;
  1760. /* rings */
  1761. wait_queue_head_t fence_queue;
  1762. unsigned fence_context;
  1763. struct mutex ring_lock;
  1764. unsigned num_rings;
  1765. struct amdgpu_ring *rings[AMDGPU_MAX_RINGS];
  1766. bool ib_pool_ready;
  1767. struct amdgpu_sa_manager ring_tmp_bo;
  1768. /* interrupts */
  1769. struct amdgpu_irq irq;
  1770. /* dpm */
  1771. struct amdgpu_pm pm;
  1772. u32 cg_flags;
  1773. u32 pg_flags;
  1774. /* amdgpu smumgr */
  1775. struct amdgpu_smumgr smu;
  1776. /* gfx */
  1777. struct amdgpu_gfx gfx;
  1778. /* sdma */
  1779. struct amdgpu_sdma sdma[2];
  1780. struct amdgpu_irq_src sdma_trap_irq;
  1781. struct amdgpu_irq_src sdma_illegal_inst_irq;
  1782. /* uvd */
  1783. bool has_uvd;
  1784. struct amdgpu_uvd uvd;
  1785. /* vce */
  1786. struct amdgpu_vce vce;
  1787. /* firmwares */
  1788. struct amdgpu_firmware firmware;
  1789. /* GDS */
  1790. struct amdgpu_gds gds;
  1791. const struct amdgpu_ip_block_version *ip_blocks;
  1792. int num_ip_blocks;
  1793. struct amdgpu_ip_block_status *ip_block_status;
  1794. struct mutex mn_lock;
  1795. DECLARE_HASHTABLE(mn_hash, 7);
  1796. /* tracking pinned memory */
  1797. u64 vram_pin_size;
  1798. u64 gart_pin_size;
  1799. /* amdkfd interface */
  1800. struct kfd_dev *kfd;
  1801. };
  1802. bool amdgpu_device_is_px(struct drm_device *dev);
  1803. int amdgpu_device_init(struct amdgpu_device *adev,
  1804. struct drm_device *ddev,
  1805. struct pci_dev *pdev,
  1806. uint32_t flags);
  1807. void amdgpu_device_fini(struct amdgpu_device *adev);
  1808. int amdgpu_gpu_wait_for_idle(struct amdgpu_device *adev);
  1809. uint32_t amdgpu_mm_rreg(struct amdgpu_device *adev, uint32_t reg,
  1810. bool always_indirect);
  1811. void amdgpu_mm_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v,
  1812. bool always_indirect);
  1813. u32 amdgpu_io_rreg(struct amdgpu_device *adev, u32 reg);
  1814. void amdgpu_io_wreg(struct amdgpu_device *adev, u32 reg, u32 v);
  1815. u32 amdgpu_mm_rdoorbell(struct amdgpu_device *adev, u32 index);
  1816. void amdgpu_mm_wdoorbell(struct amdgpu_device *adev, u32 index, u32 v);
  1817. /*
  1818. * Cast helper
  1819. */
  1820. extern const struct fence_ops amdgpu_fence_ops;
  1821. static inline struct amdgpu_fence *to_amdgpu_fence(struct fence *f)
  1822. {
  1823. struct amdgpu_fence *__f = container_of(f, struct amdgpu_fence, base);
  1824. if (__f->base.ops == &amdgpu_fence_ops)
  1825. return __f;
  1826. return NULL;
  1827. }
  1828. /*
  1829. * Registers read & write functions.
  1830. */
  1831. #define RREG32(reg) amdgpu_mm_rreg(adev, (reg), false)
  1832. #define RREG32_IDX(reg) amdgpu_mm_rreg(adev, (reg), true)
  1833. #define DREG32(reg) printk(KERN_INFO "REGISTER: " #reg " : 0x%08X\n", amdgpu_mm_rreg(adev, (reg), false))
  1834. #define WREG32(reg, v) amdgpu_mm_wreg(adev, (reg), (v), false)
  1835. #define WREG32_IDX(reg, v) amdgpu_mm_wreg(adev, (reg), (v), true)
  1836. #define REG_SET(FIELD, v) (((v) << FIELD##_SHIFT) & FIELD##_MASK)
  1837. #define REG_GET(FIELD, v) (((v) << FIELD##_SHIFT) & FIELD##_MASK)
  1838. #define RREG32_PCIE(reg) adev->pcie_rreg(adev, (reg))
  1839. #define WREG32_PCIE(reg, v) adev->pcie_wreg(adev, (reg), (v))
  1840. #define RREG32_SMC(reg) adev->smc_rreg(adev, (reg))
  1841. #define WREG32_SMC(reg, v) adev->smc_wreg(adev, (reg), (v))
  1842. #define RREG32_UVD_CTX(reg) adev->uvd_ctx_rreg(adev, (reg))
  1843. #define WREG32_UVD_CTX(reg, v) adev->uvd_ctx_wreg(adev, (reg), (v))
  1844. #define RREG32_DIDT(reg) adev->didt_rreg(adev, (reg))
  1845. #define WREG32_DIDT(reg, v) adev->didt_wreg(adev, (reg), (v))
  1846. #define RREG32_AUDIO_ENDPT(block, reg) adev->audio_endpt_rreg(adev, (block), (reg))
  1847. #define WREG32_AUDIO_ENDPT(block, reg, v) adev->audio_endpt_wreg(adev, (block), (reg), (v))
  1848. #define WREG32_P(reg, val, mask) \
  1849. do { \
  1850. uint32_t tmp_ = RREG32(reg); \
  1851. tmp_ &= (mask); \
  1852. tmp_ |= ((val) & ~(mask)); \
  1853. WREG32(reg, tmp_); \
  1854. } while (0)
  1855. #define WREG32_AND(reg, and) WREG32_P(reg, 0, and)
  1856. #define WREG32_OR(reg, or) WREG32_P(reg, or, ~(or))
  1857. #define WREG32_PLL_P(reg, val, mask) \
  1858. do { \
  1859. uint32_t tmp_ = RREG32_PLL(reg); \
  1860. tmp_ &= (mask); \
  1861. tmp_ |= ((val) & ~(mask)); \
  1862. WREG32_PLL(reg, tmp_); \
  1863. } while (0)
  1864. #define DREG32_SYS(sqf, adev, reg) seq_printf((sqf), #reg " : 0x%08X\n", amdgpu_mm_rreg((adev), (reg), false))
  1865. #define RREG32_IO(reg) amdgpu_io_rreg(adev, (reg))
  1866. #define WREG32_IO(reg, v) amdgpu_io_wreg(adev, (reg), (v))
  1867. #define RDOORBELL32(index) amdgpu_mm_rdoorbell(adev, (index))
  1868. #define WDOORBELL32(index, v) amdgpu_mm_wdoorbell(adev, (index), (v))
  1869. #define REG_FIELD_SHIFT(reg, field) reg##__##field##__SHIFT
  1870. #define REG_FIELD_MASK(reg, field) reg##__##field##_MASK
  1871. #define REG_SET_FIELD(orig_val, reg, field, field_val) \
  1872. (((orig_val) & ~REG_FIELD_MASK(reg, field)) | \
  1873. (REG_FIELD_MASK(reg, field) & ((field_val) << REG_FIELD_SHIFT(reg, field))))
  1874. #define REG_GET_FIELD(value, reg, field) \
  1875. (((value) & REG_FIELD_MASK(reg, field)) >> REG_FIELD_SHIFT(reg, field))
  1876. /*
  1877. * BIOS helpers.
  1878. */
  1879. #define RBIOS8(i) (adev->bios[i])
  1880. #define RBIOS16(i) (RBIOS8(i) | (RBIOS8((i)+1) << 8))
  1881. #define RBIOS32(i) ((RBIOS16(i)) | (RBIOS16((i)+2) << 16))
  1882. /*
  1883. * RING helpers.
  1884. */
  1885. static inline void amdgpu_ring_write(struct amdgpu_ring *ring, uint32_t v)
  1886. {
  1887. if (ring->count_dw <= 0)
  1888. DRM_ERROR("amdgpu: writing more dwords to the ring than expected!\n");
  1889. ring->ring[ring->wptr++] = v;
  1890. ring->wptr &= ring->ptr_mask;
  1891. ring->count_dw--;
  1892. ring->ring_free_dw--;
  1893. }
  1894. /*
  1895. * ASICs macro.
  1896. */
  1897. #define amdgpu_asic_set_vga_state(adev, state) (adev)->asic_funcs->set_vga_state((adev), (state))
  1898. #define amdgpu_asic_reset(adev) (adev)->asic_funcs->reset((adev))
  1899. #define amdgpu_asic_wait_for_mc_idle(adev) (adev)->asic_funcs->wait_for_mc_idle((adev))
  1900. #define amdgpu_asic_get_xclk(adev) (adev)->asic_funcs->get_xclk((adev))
  1901. #define amdgpu_asic_set_uvd_clocks(adev, v, d) (adev)->asic_funcs->set_uvd_clocks((adev), (v), (d))
  1902. #define amdgpu_asic_set_vce_clocks(adev, ev, ec) (adev)->asic_funcs->set_vce_clocks((adev), (ev), (ec))
  1903. #define amdgpu_asic_get_gpu_clock_counter(adev) (adev)->asic_funcs->get_gpu_clock_counter((adev))
  1904. #define amdgpu_asic_read_disabled_bios(adev) (adev)->asic_funcs->read_disabled_bios((adev))
  1905. #define amdgpu_asic_read_register(adev, se, sh, offset, v)((adev)->asic_funcs->read_register((adev), (se), (sh), (offset), (v)))
  1906. #define amdgpu_asic_get_cu_info(adev, info) (adev)->asic_funcs->get_cu_info((adev), (info))
  1907. #define amdgpu_gart_flush_gpu_tlb(adev, vmid) (adev)->gart.gart_funcs->flush_gpu_tlb((adev), (vmid))
  1908. #define amdgpu_gart_set_pte_pde(adev, pt, idx, addr, flags) (adev)->gart.gart_funcs->set_pte_pde((adev), (pt), (idx), (addr), (flags))
  1909. #define amdgpu_vm_copy_pte(adev, ib, pe, src, count) ((adev)->vm_manager.vm_pte_funcs->copy_pte((ib), (pe), (src), (count)))
  1910. #define amdgpu_vm_write_pte(adev, ib, pe, addr, count, incr, flags) ((adev)->vm_manager.vm_pte_funcs->write_pte((ib), (pe), (addr), (count), (incr), (flags)))
  1911. #define amdgpu_vm_set_pte_pde(adev, ib, pe, addr, count, incr, flags) ((adev)->vm_manager.vm_pte_funcs->set_pte_pde((ib), (pe), (addr), (count), (incr), (flags)))
  1912. #define amdgpu_vm_pad_ib(adev, ib) ((adev)->vm_manager.vm_pte_funcs->pad_ib((ib)))
  1913. #define amdgpu_ring_parse_cs(r, p, ib) ((r)->funcs->parse_cs((p), (ib)))
  1914. #define amdgpu_ring_test_ring(r) (r)->funcs->test_ring((r))
  1915. #define amdgpu_ring_test_ib(r) (r)->funcs->test_ib((r))
  1916. #define amdgpu_ring_is_lockup(r) (r)->funcs->is_lockup((r))
  1917. #define amdgpu_ring_get_rptr(r) (r)->funcs->get_rptr((r))
  1918. #define amdgpu_ring_get_wptr(r) (r)->funcs->get_wptr((r))
  1919. #define amdgpu_ring_set_wptr(r) (r)->funcs->set_wptr((r))
  1920. #define amdgpu_ring_emit_ib(r, ib) (r)->funcs->emit_ib((r), (ib))
  1921. #define amdgpu_ring_emit_vm_flush(r, vmid, addr) (r)->funcs->emit_vm_flush((r), (vmid), (addr))
  1922. #define amdgpu_ring_emit_fence(r, addr, seq, flags) (r)->funcs->emit_fence((r), (addr), (seq), (flags))
  1923. #define amdgpu_ring_emit_semaphore(r, semaphore, emit_wait) (r)->funcs->emit_semaphore((r), (semaphore), (emit_wait))
  1924. #define amdgpu_ring_emit_gds_switch(r, v, db, ds, wb, ws, ab, as) (r)->funcs->emit_gds_switch((r), (v), (db), (ds), (wb), (ws), (ab), (as))
  1925. #define amdgpu_ring_emit_hdp_flush(r) (r)->funcs->emit_hdp_flush((r))
  1926. #define amdgpu_ih_get_wptr(adev) (adev)->irq.ih_funcs->get_wptr((adev))
  1927. #define amdgpu_ih_decode_iv(adev, iv) (adev)->irq.ih_funcs->decode_iv((adev), (iv))
  1928. #define amdgpu_ih_set_rptr(adev) (adev)->irq.ih_funcs->set_rptr((adev))
  1929. #define amdgpu_display_set_vga_render_state(adev, r) (adev)->mode_info.funcs->set_vga_render_state((adev), (r))
  1930. #define amdgpu_display_vblank_get_counter(adev, crtc) (adev)->mode_info.funcs->vblank_get_counter((adev), (crtc))
  1931. #define amdgpu_display_vblank_wait(adev, crtc) (adev)->mode_info.funcs->vblank_wait((adev), (crtc))
  1932. #define amdgpu_display_is_display_hung(adev) (adev)->mode_info.funcs->is_display_hung((adev))
  1933. #define amdgpu_display_backlight_set_level(adev, e, l) (adev)->mode_info.funcs->backlight_set_level((e), (l))
  1934. #define amdgpu_display_backlight_get_level(adev, e) (adev)->mode_info.funcs->backlight_get_level((e))
  1935. #define amdgpu_display_hpd_sense(adev, h) (adev)->mode_info.funcs->hpd_sense((adev), (h))
  1936. #define amdgpu_display_hpd_set_polarity(adev, h) (adev)->mode_info.funcs->hpd_set_polarity((adev), (h))
  1937. #define amdgpu_display_hpd_get_gpio_reg(adev) (adev)->mode_info.funcs->hpd_get_gpio_reg((adev))
  1938. #define amdgpu_display_bandwidth_update(adev) (adev)->mode_info.funcs->bandwidth_update((adev))
  1939. #define amdgpu_display_page_flip(adev, crtc, base) (adev)->mode_info.funcs->page_flip((adev), (crtc), (base))
  1940. #define amdgpu_display_page_flip_get_scanoutpos(adev, crtc, vbl, pos) (adev)->mode_info.funcs->page_flip_get_scanoutpos((adev), (crtc), (vbl), (pos))
  1941. #define amdgpu_display_add_encoder(adev, e, s, c) (adev)->mode_info.funcs->add_encoder((adev), (e), (s), (c))
  1942. #define amdgpu_display_add_connector(adev, ci, sd, ct, ib, coi, h, r) (adev)->mode_info.funcs->add_connector((adev), (ci), (sd), (ct), (ib), (coi), (h), (r))
  1943. #define amdgpu_display_stop_mc_access(adev, s) (adev)->mode_info.funcs->stop_mc_access((adev), (s))
  1944. #define amdgpu_display_resume_mc_access(adev, s) (adev)->mode_info.funcs->resume_mc_access((adev), (s))
  1945. #define amdgpu_emit_copy_buffer(adev, r, s, d, b) (adev)->mman.buffer_funcs->emit_copy_buffer((r), (s), (d), (b))
  1946. #define amdgpu_emit_fill_buffer(adev, r, s, d, b) (adev)->mman.buffer_funcs->emit_fill_buffer((r), (s), (d), (b))
  1947. #define amdgpu_dpm_get_temperature(adev) (adev)->pm.funcs->get_temperature((adev))
  1948. #define amdgpu_dpm_pre_set_power_state(adev) (adev)->pm.funcs->pre_set_power_state((adev))
  1949. #define amdgpu_dpm_set_power_state(adev) (adev)->pm.funcs->set_power_state((adev))
  1950. #define amdgpu_dpm_post_set_power_state(adev) (adev)->pm.funcs->post_set_power_state((adev))
  1951. #define amdgpu_dpm_display_configuration_changed(adev) (adev)->pm.funcs->display_configuration_changed((adev))
  1952. #define amdgpu_dpm_get_sclk(adev, l) (adev)->pm.funcs->get_sclk((adev), (l))
  1953. #define amdgpu_dpm_get_mclk(adev, l) (adev)->pm.funcs->get_mclk((adev), (l))
  1954. #define amdgpu_dpm_print_power_state(adev, ps) (adev)->pm.funcs->print_power_state((adev), (ps))
  1955. #define amdgpu_dpm_debugfs_print_current_performance_level(adev, m) (adev)->pm.funcs->debugfs_print_current_performance_level((adev), (m))
  1956. #define amdgpu_dpm_force_performance_level(adev, l) (adev)->pm.funcs->force_performance_level((adev), (l))
  1957. #define amdgpu_dpm_vblank_too_short(adev) (adev)->pm.funcs->vblank_too_short((adev))
  1958. #define amdgpu_dpm_powergate_uvd(adev, g) (adev)->pm.funcs->powergate_uvd((adev), (g))
  1959. #define amdgpu_dpm_powergate_vce(adev, g) (adev)->pm.funcs->powergate_vce((adev), (g))
  1960. #define amdgpu_dpm_enable_bapm(adev, e) (adev)->pm.funcs->enable_bapm((adev), (e))
  1961. #define amdgpu_dpm_set_fan_control_mode(adev, m) (adev)->pm.funcs->set_fan_control_mode((adev), (m))
  1962. #define amdgpu_dpm_get_fan_control_mode(adev) (adev)->pm.funcs->get_fan_control_mode((adev))
  1963. #define amdgpu_dpm_set_fan_speed_percent(adev, s) (adev)->pm.funcs->set_fan_speed_percent((adev), (s))
  1964. #define amdgpu_dpm_get_fan_speed_percent(adev, s) (adev)->pm.funcs->get_fan_speed_percent((adev), (s))
  1965. #define amdgpu_gds_switch(adev, r, v, d, w, a) (adev)->gds.funcs->patch_gds_switch((r), (v), (d), (w), (a))
  1966. /* Common functions */
  1967. int amdgpu_gpu_reset(struct amdgpu_device *adev);
  1968. void amdgpu_pci_config_reset(struct amdgpu_device *adev);
  1969. bool amdgpu_card_posted(struct amdgpu_device *adev);
  1970. void amdgpu_update_display_priority(struct amdgpu_device *adev);
  1971. bool amdgpu_boot_test_post_card(struct amdgpu_device *adev);
  1972. int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data);
  1973. int amdgpu_cs_get_ring(struct amdgpu_device *adev, u32 ip_type,
  1974. u32 ip_instance, u32 ring,
  1975. struct amdgpu_ring **out_ring);
  1976. void amdgpu_ttm_placement_from_domain(struct amdgpu_bo *rbo, u32 domain);
  1977. bool amdgpu_ttm_bo_is_amdgpu_bo(struct ttm_buffer_object *bo);
  1978. int amdgpu_ttm_tt_set_userptr(struct ttm_tt *ttm, uint64_t addr,
  1979. uint32_t flags);
  1980. bool amdgpu_ttm_tt_has_userptr(struct ttm_tt *ttm);
  1981. bool amdgpu_ttm_tt_is_readonly(struct ttm_tt *ttm);
  1982. uint32_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm,
  1983. struct ttm_mem_reg *mem);
  1984. void amdgpu_vram_location(struct amdgpu_device *adev, struct amdgpu_mc *mc, u64 base);
  1985. void amdgpu_gtt_location(struct amdgpu_device *adev, struct amdgpu_mc *mc);
  1986. void amdgpu_ttm_set_active_vram_size(struct amdgpu_device *adev, u64 size);
  1987. void amdgpu_program_register_sequence(struct amdgpu_device *adev,
  1988. const u32 *registers,
  1989. const u32 array_size);
  1990. bool amdgpu_device_is_px(struct drm_device *dev);
  1991. /* atpx handler */
  1992. #if defined(CONFIG_VGA_SWITCHEROO)
  1993. void amdgpu_register_atpx_handler(void);
  1994. void amdgpu_unregister_atpx_handler(void);
  1995. #else
  1996. static inline void amdgpu_register_atpx_handler(void) {}
  1997. static inline void amdgpu_unregister_atpx_handler(void) {}
  1998. #endif
  1999. /*
  2000. * KMS
  2001. */
  2002. extern const struct drm_ioctl_desc amdgpu_ioctls_kms[];
  2003. extern int amdgpu_max_kms_ioctl;
  2004. int amdgpu_driver_load_kms(struct drm_device *dev, unsigned long flags);
  2005. int amdgpu_driver_unload_kms(struct drm_device *dev);
  2006. void amdgpu_driver_lastclose_kms(struct drm_device *dev);
  2007. int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv);
  2008. void amdgpu_driver_postclose_kms(struct drm_device *dev,
  2009. struct drm_file *file_priv);
  2010. void amdgpu_driver_preclose_kms(struct drm_device *dev,
  2011. struct drm_file *file_priv);
  2012. int amdgpu_suspend_kms(struct drm_device *dev, bool suspend, bool fbcon);
  2013. int amdgpu_resume_kms(struct drm_device *dev, bool resume, bool fbcon);
  2014. u32 amdgpu_get_vblank_counter_kms(struct drm_device *dev, int crtc);
  2015. int amdgpu_enable_vblank_kms(struct drm_device *dev, int crtc);
  2016. void amdgpu_disable_vblank_kms(struct drm_device *dev, int crtc);
  2017. int amdgpu_get_vblank_timestamp_kms(struct drm_device *dev, int crtc,
  2018. int *max_error,
  2019. struct timeval *vblank_time,
  2020. unsigned flags);
  2021. long amdgpu_kms_compat_ioctl(struct file *filp, unsigned int cmd,
  2022. unsigned long arg);
  2023. /*
  2024. * vm
  2025. */
  2026. int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm);
  2027. void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm);
  2028. struct amdgpu_bo_list_entry *amdgpu_vm_get_bos(struct amdgpu_device *adev,
  2029. struct amdgpu_vm *vm,
  2030. struct list_head *head);
  2031. int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
  2032. struct amdgpu_sync *sync);
  2033. void amdgpu_vm_flush(struct amdgpu_ring *ring,
  2034. struct amdgpu_vm *vm,
  2035. struct amdgpu_fence *updates);
  2036. void amdgpu_vm_fence(struct amdgpu_device *adev,
  2037. struct amdgpu_vm *vm,
  2038. struct amdgpu_fence *fence);
  2039. uint64_t amdgpu_vm_map_gart(struct amdgpu_device *adev, uint64_t addr);
  2040. int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
  2041. struct amdgpu_vm *vm);
  2042. int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
  2043. struct amdgpu_vm *vm);
  2044. int amdgpu_vm_clear_invalids(struct amdgpu_device *adev,
  2045. struct amdgpu_vm *vm, struct amdgpu_sync *sync);
  2046. int amdgpu_vm_bo_update(struct amdgpu_device *adev,
  2047. struct amdgpu_bo_va *bo_va,
  2048. struct ttm_mem_reg *mem);
  2049. void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
  2050. struct amdgpu_bo *bo);
  2051. struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm,
  2052. struct amdgpu_bo *bo);
  2053. struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev,
  2054. struct amdgpu_vm *vm,
  2055. struct amdgpu_bo *bo);
  2056. int amdgpu_vm_bo_map(struct amdgpu_device *adev,
  2057. struct amdgpu_bo_va *bo_va,
  2058. uint64_t addr, uint64_t offset,
  2059. uint64_t size, uint32_t flags);
  2060. int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
  2061. struct amdgpu_bo_va *bo_va,
  2062. uint64_t addr);
  2063. void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
  2064. struct amdgpu_bo_va *bo_va);
  2065. /*
  2066. * functions used by amdgpu_encoder.c
  2067. */
  2068. struct amdgpu_afmt_acr {
  2069. u32 clock;
  2070. int n_32khz;
  2071. int cts_32khz;
  2072. int n_44_1khz;
  2073. int cts_44_1khz;
  2074. int n_48khz;
  2075. int cts_48khz;
  2076. };
  2077. struct amdgpu_afmt_acr amdgpu_afmt_acr(uint32_t clock);
  2078. /* amdgpu_acpi.c */
  2079. #if defined(CONFIG_ACPI)
  2080. int amdgpu_acpi_init(struct amdgpu_device *adev);
  2081. void amdgpu_acpi_fini(struct amdgpu_device *adev);
  2082. bool amdgpu_acpi_is_pcie_performance_request_supported(struct amdgpu_device *adev);
  2083. int amdgpu_acpi_pcie_performance_request(struct amdgpu_device *adev,
  2084. u8 perf_req, bool advertise);
  2085. int amdgpu_acpi_pcie_notify_device_ready(struct amdgpu_device *adev);
  2086. #else
  2087. static inline int amdgpu_acpi_init(struct amdgpu_device *adev) { return 0; }
  2088. static inline void amdgpu_acpi_fini(struct amdgpu_device *adev) { }
  2089. #endif
  2090. struct amdgpu_bo_va_mapping *
  2091. amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser,
  2092. uint64_t addr, struct amdgpu_bo **bo);
  2093. #include "amdgpu_object.h"
  2094. #endif