amdgpu.h 73 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405
  1. /*
  2. * Copyright 2008 Advanced Micro Devices, Inc.
  3. * Copyright 2008 Red Hat Inc.
  4. * Copyright 2009 Jerome Glisse.
  5. *
  6. * Permission is hereby granted, free of charge, to any person obtaining a
  7. * copy of this software and associated documentation files (the "Software"),
  8. * to deal in the Software without restriction, including without limitation
  9. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  10. * and/or sell copies of the Software, and to permit persons to whom the
  11. * Software is furnished to do so, subject to the following conditions:
  12. *
  13. * The above copyright notice and this permission notice shall be included in
  14. * all copies or substantial portions of the Software.
  15. *
  16. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  17. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  18. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  19. * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  20. * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  21. * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  22. * OTHER DEALINGS IN THE SOFTWARE.
  23. *
  24. * Authors: Dave Airlie
  25. * Alex Deucher
  26. * Jerome Glisse
  27. */
  28. #ifndef __AMDGPU_H__
  29. #define __AMDGPU_H__
  30. #include <linux/atomic.h>
  31. #include <linux/wait.h>
  32. #include <linux/list.h>
  33. #include <linux/kref.h>
  34. #include <linux/interval_tree.h>
  35. #include <linux/hashtable.h>
  36. #include <linux/fence.h>
  37. #include <ttm/ttm_bo_api.h>
  38. #include <ttm/ttm_bo_driver.h>
  39. #include <ttm/ttm_placement.h>
  40. #include <ttm/ttm_module.h>
  41. #include <ttm/ttm_execbuf_util.h>
  42. #include <drm/drmP.h>
  43. #include <drm/drm_gem.h>
  44. #include <drm/amdgpu_drm.h>
  45. #include "amd_shared.h"
  46. #include "amdgpu_mode.h"
  47. #include "amdgpu_ih.h"
  48. #include "amdgpu_irq.h"
  49. #include "amdgpu_ucode.h"
  50. #include "amdgpu_gds.h"
  51. #include "amd_powerplay.h"
  52. #include "amdgpu_acp.h"
  53. #include "gpu_scheduler.h"
  54. /*
  55. * Modules parameters.
  56. */
  57. extern int amdgpu_modeset;
  58. extern int amdgpu_vram_limit;
  59. extern int amdgpu_gart_size;
  60. extern int amdgpu_benchmarking;
  61. extern int amdgpu_testing;
  62. extern int amdgpu_audio;
  63. extern int amdgpu_disp_priority;
  64. extern int amdgpu_hw_i2c;
  65. extern int amdgpu_pcie_gen2;
  66. extern int amdgpu_msi;
  67. extern int amdgpu_lockup_timeout;
  68. extern int amdgpu_dpm;
  69. extern int amdgpu_smc_load_fw;
  70. extern int amdgpu_aspm;
  71. extern int amdgpu_runtime_pm;
  72. extern unsigned amdgpu_ip_block_mask;
  73. extern int amdgpu_bapm;
  74. extern int amdgpu_deep_color;
  75. extern int amdgpu_vm_size;
  76. extern int amdgpu_vm_block_size;
  77. extern int amdgpu_vm_fault_stop;
  78. extern int amdgpu_vm_debug;
  79. extern int amdgpu_sched_jobs;
  80. extern int amdgpu_sched_hw_submission;
  81. extern int amdgpu_powerplay;
  82. extern unsigned amdgpu_pcie_gen_cap;
  83. extern unsigned amdgpu_pcie_lane_cap;
  84. #define AMDGPU_WAIT_IDLE_TIMEOUT_IN_MS 3000
  85. #define AMDGPU_MAX_USEC_TIMEOUT 100000 /* 100 ms */
  86. #define AMDGPU_FENCE_JIFFIES_TIMEOUT (HZ / 2)
  87. /* AMDGPU_IB_POOL_SIZE must be a power of 2 */
  88. #define AMDGPU_IB_POOL_SIZE 16
  89. #define AMDGPU_DEBUGFS_MAX_COMPONENTS 32
  90. #define AMDGPUFB_CONN_LIMIT 4
  91. #define AMDGPU_BIOS_NUM_SCRATCH 8
  92. /* max number of rings */
  93. #define AMDGPU_MAX_RINGS 16
  94. #define AMDGPU_MAX_GFX_RINGS 1
  95. #define AMDGPU_MAX_COMPUTE_RINGS 8
  96. #define AMDGPU_MAX_VCE_RINGS 2
  97. /* max number of IP instances */
  98. #define AMDGPU_MAX_SDMA_INSTANCES 2
  99. /* hardcode that limit for now */
  100. #define AMDGPU_VA_RESERVED_SIZE (8 << 20)
  101. /* hard reset data */
  102. #define AMDGPU_ASIC_RESET_DATA 0x39d5e86b
  103. /* reset flags */
  104. #define AMDGPU_RESET_GFX (1 << 0)
  105. #define AMDGPU_RESET_COMPUTE (1 << 1)
  106. #define AMDGPU_RESET_DMA (1 << 2)
  107. #define AMDGPU_RESET_CP (1 << 3)
  108. #define AMDGPU_RESET_GRBM (1 << 4)
  109. #define AMDGPU_RESET_DMA1 (1 << 5)
  110. #define AMDGPU_RESET_RLC (1 << 6)
  111. #define AMDGPU_RESET_SEM (1 << 7)
  112. #define AMDGPU_RESET_IH (1 << 8)
  113. #define AMDGPU_RESET_VMC (1 << 9)
  114. #define AMDGPU_RESET_MC (1 << 10)
  115. #define AMDGPU_RESET_DISPLAY (1 << 11)
  116. #define AMDGPU_RESET_UVD (1 << 12)
  117. #define AMDGPU_RESET_VCE (1 << 13)
  118. #define AMDGPU_RESET_VCE1 (1 << 14)
  119. /* GFX current status */
  120. #define AMDGPU_GFX_NORMAL_MODE 0x00000000L
  121. #define AMDGPU_GFX_SAFE_MODE 0x00000001L
  122. #define AMDGPU_GFX_PG_DISABLED_MODE 0x00000002L
  123. #define AMDGPU_GFX_CG_DISABLED_MODE 0x00000004L
  124. #define AMDGPU_GFX_LBPW_DISABLED_MODE 0x00000008L
  125. /* max cursor sizes (in pixels) */
  126. #define CIK_CURSOR_WIDTH 128
  127. #define CIK_CURSOR_HEIGHT 128
  128. struct amdgpu_device;
  129. struct amdgpu_ib;
  130. struct amdgpu_vm;
  131. struct amdgpu_ring;
  132. struct amdgpu_cs_parser;
  133. struct amdgpu_job;
  134. struct amdgpu_irq_src;
  135. struct amdgpu_fpriv;
  136. enum amdgpu_cp_irq {
  137. AMDGPU_CP_IRQ_GFX_EOP = 0,
  138. AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP,
  139. AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE1_EOP,
  140. AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE2_EOP,
  141. AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE3_EOP,
  142. AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE0_EOP,
  143. AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE1_EOP,
  144. AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE2_EOP,
  145. AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE3_EOP,
  146. AMDGPU_CP_IRQ_LAST
  147. };
  148. enum amdgpu_sdma_irq {
  149. AMDGPU_SDMA_IRQ_TRAP0 = 0,
  150. AMDGPU_SDMA_IRQ_TRAP1,
  151. AMDGPU_SDMA_IRQ_LAST
  152. };
  153. enum amdgpu_thermal_irq {
  154. AMDGPU_THERMAL_IRQ_LOW_TO_HIGH = 0,
  155. AMDGPU_THERMAL_IRQ_HIGH_TO_LOW,
  156. AMDGPU_THERMAL_IRQ_LAST
  157. };
  158. int amdgpu_set_clockgating_state(struct amdgpu_device *adev,
  159. enum amd_ip_block_type block_type,
  160. enum amd_clockgating_state state);
  161. int amdgpu_set_powergating_state(struct amdgpu_device *adev,
  162. enum amd_ip_block_type block_type,
  163. enum amd_powergating_state state);
  164. struct amdgpu_ip_block_version {
  165. enum amd_ip_block_type type;
  166. u32 major;
  167. u32 minor;
  168. u32 rev;
  169. const struct amd_ip_funcs *funcs;
  170. };
  171. int amdgpu_ip_block_version_cmp(struct amdgpu_device *adev,
  172. enum amd_ip_block_type type,
  173. u32 major, u32 minor);
  174. const struct amdgpu_ip_block_version * amdgpu_get_ip_block(
  175. struct amdgpu_device *adev,
  176. enum amd_ip_block_type type);
  177. /* provided by hw blocks that can move/clear data. e.g., gfx or sdma */
  178. struct amdgpu_buffer_funcs {
  179. /* maximum bytes in a single operation */
  180. uint32_t copy_max_bytes;
  181. /* number of dw to reserve per operation */
  182. unsigned copy_num_dw;
  183. /* used for buffer migration */
  184. void (*emit_copy_buffer)(struct amdgpu_ib *ib,
  185. /* src addr in bytes */
  186. uint64_t src_offset,
  187. /* dst addr in bytes */
  188. uint64_t dst_offset,
  189. /* number of byte to transfer */
  190. uint32_t byte_count);
  191. /* maximum bytes in a single operation */
  192. uint32_t fill_max_bytes;
  193. /* number of dw to reserve per operation */
  194. unsigned fill_num_dw;
  195. /* used for buffer clearing */
  196. void (*emit_fill_buffer)(struct amdgpu_ib *ib,
  197. /* value to write to memory */
  198. uint32_t src_data,
  199. /* dst addr in bytes */
  200. uint64_t dst_offset,
  201. /* number of byte to fill */
  202. uint32_t byte_count);
  203. };
  204. /* provided by hw blocks that can write ptes, e.g., sdma */
  205. struct amdgpu_vm_pte_funcs {
  206. /* copy pte entries from GART */
  207. void (*copy_pte)(struct amdgpu_ib *ib,
  208. uint64_t pe, uint64_t src,
  209. unsigned count);
  210. /* write pte one entry at a time with addr mapping */
  211. void (*write_pte)(struct amdgpu_ib *ib,
  212. const dma_addr_t *pages_addr, uint64_t pe,
  213. uint64_t addr, unsigned count,
  214. uint32_t incr, uint32_t flags);
  215. /* for linear pte/pde updates without addr mapping */
  216. void (*set_pte_pde)(struct amdgpu_ib *ib,
  217. uint64_t pe,
  218. uint64_t addr, unsigned count,
  219. uint32_t incr, uint32_t flags);
  220. };
  221. /* provided by the gmc block */
  222. struct amdgpu_gart_funcs {
  223. /* flush the vm tlb via mmio */
  224. void (*flush_gpu_tlb)(struct amdgpu_device *adev,
  225. uint32_t vmid);
  226. /* write pte/pde updates using the cpu */
  227. int (*set_pte_pde)(struct amdgpu_device *adev,
  228. void *cpu_pt_addr, /* cpu addr of page table */
  229. uint32_t gpu_page_idx, /* pte/pde to update */
  230. uint64_t addr, /* addr to write into pte/pde */
  231. uint32_t flags); /* access flags */
  232. };
  233. /* provided by the ih block */
  234. struct amdgpu_ih_funcs {
  235. /* ring read/write ptr handling, called from interrupt context */
  236. u32 (*get_wptr)(struct amdgpu_device *adev);
  237. void (*decode_iv)(struct amdgpu_device *adev,
  238. struct amdgpu_iv_entry *entry);
  239. void (*set_rptr)(struct amdgpu_device *adev);
  240. };
  241. /* provided by hw blocks that expose a ring buffer for commands */
  242. struct amdgpu_ring_funcs {
  243. /* ring read/write ptr handling */
  244. u32 (*get_rptr)(struct amdgpu_ring *ring);
  245. u32 (*get_wptr)(struct amdgpu_ring *ring);
  246. void (*set_wptr)(struct amdgpu_ring *ring);
  247. /* validating and patching of IBs */
  248. int (*parse_cs)(struct amdgpu_cs_parser *p, uint32_t ib_idx);
  249. /* command emit functions */
  250. void (*emit_ib)(struct amdgpu_ring *ring,
  251. struct amdgpu_ib *ib);
  252. void (*emit_fence)(struct amdgpu_ring *ring, uint64_t addr,
  253. uint64_t seq, unsigned flags);
  254. void (*emit_pipeline_sync)(struct amdgpu_ring *ring);
  255. void (*emit_vm_flush)(struct amdgpu_ring *ring, unsigned vm_id,
  256. uint64_t pd_addr);
  257. void (*emit_hdp_flush)(struct amdgpu_ring *ring);
  258. void (*emit_hdp_invalidate)(struct amdgpu_ring *ring);
  259. void (*emit_gds_switch)(struct amdgpu_ring *ring, uint32_t vmid,
  260. uint32_t gds_base, uint32_t gds_size,
  261. uint32_t gws_base, uint32_t gws_size,
  262. uint32_t oa_base, uint32_t oa_size);
  263. /* testing functions */
  264. int (*test_ring)(struct amdgpu_ring *ring);
  265. int (*test_ib)(struct amdgpu_ring *ring);
  266. /* insert NOP packets */
  267. void (*insert_nop)(struct amdgpu_ring *ring, uint32_t count);
  268. /* pad the indirect buffer to the necessary number of dw */
  269. void (*pad_ib)(struct amdgpu_ring *ring, struct amdgpu_ib *ib);
  270. unsigned (*init_cond_exec)(struct amdgpu_ring *ring);
  271. void (*patch_cond_exec)(struct amdgpu_ring *ring, unsigned offset);
  272. };
  273. /*
  274. * BIOS.
  275. */
  276. bool amdgpu_get_bios(struct amdgpu_device *adev);
  277. bool amdgpu_read_bios(struct amdgpu_device *adev);
  278. /*
  279. * Dummy page
  280. */
  281. struct amdgpu_dummy_page {
  282. struct page *page;
  283. dma_addr_t addr;
  284. };
  285. int amdgpu_dummy_page_init(struct amdgpu_device *adev);
  286. void amdgpu_dummy_page_fini(struct amdgpu_device *adev);
  287. /*
  288. * Clocks
  289. */
  290. #define AMDGPU_MAX_PPLL 3
  291. struct amdgpu_clock {
  292. struct amdgpu_pll ppll[AMDGPU_MAX_PPLL];
  293. struct amdgpu_pll spll;
  294. struct amdgpu_pll mpll;
  295. /* 10 Khz units */
  296. uint32_t default_mclk;
  297. uint32_t default_sclk;
  298. uint32_t default_dispclk;
  299. uint32_t current_dispclk;
  300. uint32_t dp_extclk;
  301. uint32_t max_pixel_clock;
  302. };
  303. /*
  304. * Fences.
  305. */
  306. struct amdgpu_fence_driver {
  307. uint64_t gpu_addr;
  308. volatile uint32_t *cpu_addr;
  309. /* sync_seq is protected by ring emission lock */
  310. uint32_t sync_seq;
  311. atomic_t last_seq;
  312. bool initialized;
  313. struct amdgpu_irq_src *irq_src;
  314. unsigned irq_type;
  315. struct timer_list fallback_timer;
  316. unsigned num_fences_mask;
  317. spinlock_t lock;
  318. struct fence **fences;
  319. };
  320. /* some special values for the owner field */
  321. #define AMDGPU_FENCE_OWNER_UNDEFINED ((void*)0ul)
  322. #define AMDGPU_FENCE_OWNER_VM ((void*)1ul)
  323. #define AMDGPU_FENCE_FLAG_64BIT (1 << 0)
  324. #define AMDGPU_FENCE_FLAG_INT (1 << 1)
  325. struct amdgpu_user_fence {
  326. /* write-back bo */
  327. struct amdgpu_bo *bo;
  328. /* write-back address offset to bo start */
  329. uint32_t offset;
  330. };
  331. int amdgpu_fence_driver_init(struct amdgpu_device *adev);
  332. void amdgpu_fence_driver_fini(struct amdgpu_device *adev);
  333. void amdgpu_fence_driver_force_completion(struct amdgpu_device *adev);
  334. int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring,
  335. unsigned num_hw_submission);
  336. int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring,
  337. struct amdgpu_irq_src *irq_src,
  338. unsigned irq_type);
  339. void amdgpu_fence_driver_suspend(struct amdgpu_device *adev);
  340. void amdgpu_fence_driver_resume(struct amdgpu_device *adev);
  341. int amdgpu_fence_emit(struct amdgpu_ring *ring, struct fence **fence);
  342. void amdgpu_fence_process(struct amdgpu_ring *ring);
  343. int amdgpu_fence_wait_empty(struct amdgpu_ring *ring);
  344. unsigned amdgpu_fence_count_emitted(struct amdgpu_ring *ring);
  345. /*
  346. * TTM.
  347. */
  348. struct amdgpu_mman {
  349. struct ttm_bo_global_ref bo_global_ref;
  350. struct drm_global_reference mem_global_ref;
  351. struct ttm_bo_device bdev;
  352. bool mem_global_referenced;
  353. bool initialized;
  354. #if defined(CONFIG_DEBUG_FS)
  355. struct dentry *vram;
  356. struct dentry *gtt;
  357. #endif
  358. /* buffer handling */
  359. const struct amdgpu_buffer_funcs *buffer_funcs;
  360. struct amdgpu_ring *buffer_funcs_ring;
  361. /* Scheduler entity for buffer moves */
  362. struct amd_sched_entity entity;
  363. };
  364. int amdgpu_copy_buffer(struct amdgpu_ring *ring,
  365. uint64_t src_offset,
  366. uint64_t dst_offset,
  367. uint32_t byte_count,
  368. struct reservation_object *resv,
  369. struct fence **fence);
  370. int amdgpu_mmap(struct file *filp, struct vm_area_struct *vma);
  371. struct amdgpu_bo_list_entry {
  372. struct amdgpu_bo *robj;
  373. struct ttm_validate_buffer tv;
  374. struct amdgpu_bo_va *bo_va;
  375. uint32_t priority;
  376. struct page **user_pages;
  377. int user_invalidated;
  378. };
  379. struct amdgpu_bo_va_mapping {
  380. struct list_head list;
  381. struct interval_tree_node it;
  382. uint64_t offset;
  383. uint32_t flags;
  384. };
  385. /* bo virtual addresses in a specific vm */
  386. struct amdgpu_bo_va {
  387. /* protected by bo being reserved */
  388. struct list_head bo_list;
  389. struct fence *last_pt_update;
  390. unsigned ref_count;
  391. /* protected by vm mutex and spinlock */
  392. struct list_head vm_status;
  393. /* mappings for this bo_va */
  394. struct list_head invalids;
  395. struct list_head valids;
  396. /* constant after initialization */
  397. struct amdgpu_vm *vm;
  398. struct amdgpu_bo *bo;
  399. };
  400. #define AMDGPU_GEM_DOMAIN_MAX 0x3
  401. struct amdgpu_bo {
  402. /* Protected by gem.mutex */
  403. struct list_head list;
  404. /* Protected by tbo.reserved */
  405. u32 prefered_domains;
  406. u32 allowed_domains;
  407. struct ttm_place placements[AMDGPU_GEM_DOMAIN_MAX + 1];
  408. struct ttm_placement placement;
  409. struct ttm_buffer_object tbo;
  410. struct ttm_bo_kmap_obj kmap;
  411. u64 flags;
  412. unsigned pin_count;
  413. void *kptr;
  414. u64 tiling_flags;
  415. u64 metadata_flags;
  416. void *metadata;
  417. u32 metadata_size;
  418. /* list of all virtual address to which this bo
  419. * is associated to
  420. */
  421. struct list_head va;
  422. /* Constant after initialization */
  423. struct amdgpu_device *adev;
  424. struct drm_gem_object gem_base;
  425. struct amdgpu_bo *parent;
  426. struct ttm_bo_kmap_obj dma_buf_vmap;
  427. struct amdgpu_mn *mn;
  428. struct list_head mn_list;
  429. };
  430. #define gem_to_amdgpu_bo(gobj) container_of((gobj), struct amdgpu_bo, gem_base)
  431. void amdgpu_gem_object_free(struct drm_gem_object *obj);
  432. int amdgpu_gem_object_open(struct drm_gem_object *obj,
  433. struct drm_file *file_priv);
  434. void amdgpu_gem_object_close(struct drm_gem_object *obj,
  435. struct drm_file *file_priv);
  436. unsigned long amdgpu_gem_timeout(uint64_t timeout_ns);
  437. struct sg_table *amdgpu_gem_prime_get_sg_table(struct drm_gem_object *obj);
  438. struct drm_gem_object *amdgpu_gem_prime_import_sg_table(struct drm_device *dev,
  439. struct dma_buf_attachment *attach,
  440. struct sg_table *sg);
  441. struct dma_buf *amdgpu_gem_prime_export(struct drm_device *dev,
  442. struct drm_gem_object *gobj,
  443. int flags);
  444. int amdgpu_gem_prime_pin(struct drm_gem_object *obj);
  445. void amdgpu_gem_prime_unpin(struct drm_gem_object *obj);
  446. struct reservation_object *amdgpu_gem_prime_res_obj(struct drm_gem_object *);
  447. void *amdgpu_gem_prime_vmap(struct drm_gem_object *obj);
  448. void amdgpu_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr);
  449. int amdgpu_gem_debugfs_init(struct amdgpu_device *adev);
  450. /* sub-allocation manager, it has to be protected by another lock.
  451. * By conception this is an helper for other part of the driver
  452. * like the indirect buffer or semaphore, which both have their
  453. * locking.
  454. *
  455. * Principe is simple, we keep a list of sub allocation in offset
  456. * order (first entry has offset == 0, last entry has the highest
  457. * offset).
  458. *
  459. * When allocating new object we first check if there is room at
  460. * the end total_size - (last_object_offset + last_object_size) >=
  461. * alloc_size. If so we allocate new object there.
  462. *
  463. * When there is not enough room at the end, we start waiting for
  464. * each sub object until we reach object_offset+object_size >=
  465. * alloc_size, this object then become the sub object we return.
  466. *
  467. * Alignment can't be bigger than page size.
  468. *
  469. * Hole are not considered for allocation to keep things simple.
  470. * Assumption is that there won't be hole (all object on same
  471. * alignment).
  472. */
  473. #define AMDGPU_SA_NUM_FENCE_LISTS 32
  474. struct amdgpu_sa_manager {
  475. wait_queue_head_t wq;
  476. struct amdgpu_bo *bo;
  477. struct list_head *hole;
  478. struct list_head flist[AMDGPU_SA_NUM_FENCE_LISTS];
  479. struct list_head olist;
  480. unsigned size;
  481. uint64_t gpu_addr;
  482. void *cpu_ptr;
  483. uint32_t domain;
  484. uint32_t align;
  485. };
  486. /* sub-allocation buffer */
  487. struct amdgpu_sa_bo {
  488. struct list_head olist;
  489. struct list_head flist;
  490. struct amdgpu_sa_manager *manager;
  491. unsigned soffset;
  492. unsigned eoffset;
  493. struct fence *fence;
  494. };
  495. /*
  496. * GEM objects.
  497. */
  498. void amdgpu_gem_force_release(struct amdgpu_device *adev);
  499. int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size,
  500. int alignment, u32 initial_domain,
  501. u64 flags, bool kernel,
  502. struct drm_gem_object **obj);
  503. int amdgpu_mode_dumb_create(struct drm_file *file_priv,
  504. struct drm_device *dev,
  505. struct drm_mode_create_dumb *args);
  506. int amdgpu_mode_dumb_mmap(struct drm_file *filp,
  507. struct drm_device *dev,
  508. uint32_t handle, uint64_t *offset_p);
  509. /*
  510. * Synchronization
  511. */
  512. struct amdgpu_sync {
  513. DECLARE_HASHTABLE(fences, 4);
  514. struct fence *last_vm_update;
  515. };
  516. void amdgpu_sync_create(struct amdgpu_sync *sync);
  517. int amdgpu_sync_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync,
  518. struct fence *f);
  519. int amdgpu_sync_resv(struct amdgpu_device *adev,
  520. struct amdgpu_sync *sync,
  521. struct reservation_object *resv,
  522. void *owner);
  523. struct fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync);
  524. int amdgpu_sync_wait(struct amdgpu_sync *sync);
  525. void amdgpu_sync_free(struct amdgpu_sync *sync);
  526. int amdgpu_sync_init(void);
  527. void amdgpu_sync_fini(void);
  528. /*
  529. * GART structures, functions & helpers
  530. */
  531. struct amdgpu_mc;
  532. #define AMDGPU_GPU_PAGE_SIZE 4096
  533. #define AMDGPU_GPU_PAGE_MASK (AMDGPU_GPU_PAGE_SIZE - 1)
  534. #define AMDGPU_GPU_PAGE_SHIFT 12
  535. #define AMDGPU_GPU_PAGE_ALIGN(a) (((a) + AMDGPU_GPU_PAGE_MASK) & ~AMDGPU_GPU_PAGE_MASK)
  536. struct amdgpu_gart {
  537. dma_addr_t table_addr;
  538. struct amdgpu_bo *robj;
  539. void *ptr;
  540. unsigned num_gpu_pages;
  541. unsigned num_cpu_pages;
  542. unsigned table_size;
  543. struct page **pages;
  544. dma_addr_t *pages_addr;
  545. bool ready;
  546. const struct amdgpu_gart_funcs *gart_funcs;
  547. };
  548. int amdgpu_gart_table_ram_alloc(struct amdgpu_device *adev);
  549. void amdgpu_gart_table_ram_free(struct amdgpu_device *adev);
  550. int amdgpu_gart_table_vram_alloc(struct amdgpu_device *adev);
  551. void amdgpu_gart_table_vram_free(struct amdgpu_device *adev);
  552. int amdgpu_gart_table_vram_pin(struct amdgpu_device *adev);
  553. void amdgpu_gart_table_vram_unpin(struct amdgpu_device *adev);
  554. int amdgpu_gart_init(struct amdgpu_device *adev);
  555. void amdgpu_gart_fini(struct amdgpu_device *adev);
  556. void amdgpu_gart_unbind(struct amdgpu_device *adev, unsigned offset,
  557. int pages);
  558. int amdgpu_gart_bind(struct amdgpu_device *adev, unsigned offset,
  559. int pages, struct page **pagelist,
  560. dma_addr_t *dma_addr, uint32_t flags);
  561. /*
  562. * GPU MC structures, functions & helpers
  563. */
  564. struct amdgpu_mc {
  565. resource_size_t aper_size;
  566. resource_size_t aper_base;
  567. resource_size_t agp_base;
  568. /* for some chips with <= 32MB we need to lie
  569. * about vram size near mc fb location */
  570. u64 mc_vram_size;
  571. u64 visible_vram_size;
  572. u64 gtt_size;
  573. u64 gtt_start;
  574. u64 gtt_end;
  575. u64 vram_start;
  576. u64 vram_end;
  577. unsigned vram_width;
  578. u64 real_vram_size;
  579. int vram_mtrr;
  580. u64 gtt_base_align;
  581. u64 mc_mask;
  582. const struct firmware *fw; /* MC firmware */
  583. uint32_t fw_version;
  584. struct amdgpu_irq_src vm_fault;
  585. uint32_t vram_type;
  586. };
  587. /*
  588. * GPU doorbell structures, functions & helpers
  589. */
  590. typedef enum _AMDGPU_DOORBELL_ASSIGNMENT
  591. {
  592. AMDGPU_DOORBELL_KIQ = 0x000,
  593. AMDGPU_DOORBELL_HIQ = 0x001,
  594. AMDGPU_DOORBELL_DIQ = 0x002,
  595. AMDGPU_DOORBELL_MEC_RING0 = 0x010,
  596. AMDGPU_DOORBELL_MEC_RING1 = 0x011,
  597. AMDGPU_DOORBELL_MEC_RING2 = 0x012,
  598. AMDGPU_DOORBELL_MEC_RING3 = 0x013,
  599. AMDGPU_DOORBELL_MEC_RING4 = 0x014,
  600. AMDGPU_DOORBELL_MEC_RING5 = 0x015,
  601. AMDGPU_DOORBELL_MEC_RING6 = 0x016,
  602. AMDGPU_DOORBELL_MEC_RING7 = 0x017,
  603. AMDGPU_DOORBELL_GFX_RING0 = 0x020,
  604. AMDGPU_DOORBELL_sDMA_ENGINE0 = 0x1E0,
  605. AMDGPU_DOORBELL_sDMA_ENGINE1 = 0x1E1,
  606. AMDGPU_DOORBELL_IH = 0x1E8,
  607. AMDGPU_DOORBELL_MAX_ASSIGNMENT = 0x3FF,
  608. AMDGPU_DOORBELL_INVALID = 0xFFFF
  609. } AMDGPU_DOORBELL_ASSIGNMENT;
  610. struct amdgpu_doorbell {
  611. /* doorbell mmio */
  612. resource_size_t base;
  613. resource_size_t size;
  614. u32 __iomem *ptr;
  615. u32 num_doorbells; /* Number of doorbells actually reserved for amdgpu. */
  616. };
  617. void amdgpu_doorbell_get_kfd_info(struct amdgpu_device *adev,
  618. phys_addr_t *aperture_base,
  619. size_t *aperture_size,
  620. size_t *start_offset);
  621. /*
  622. * IRQS.
  623. */
  624. struct amdgpu_flip_work {
  625. struct work_struct flip_work;
  626. struct work_struct unpin_work;
  627. struct amdgpu_device *adev;
  628. int crtc_id;
  629. uint64_t base;
  630. struct drm_pending_vblank_event *event;
  631. struct amdgpu_bo *old_rbo;
  632. struct fence *excl;
  633. unsigned shared_count;
  634. struct fence **shared;
  635. struct fence_cb cb;
  636. };
  637. /*
  638. * CP & rings.
  639. */
  640. struct amdgpu_ib {
  641. struct amdgpu_sa_bo *sa_bo;
  642. uint32_t length_dw;
  643. uint64_t gpu_addr;
  644. uint32_t *ptr;
  645. struct amdgpu_user_fence *user;
  646. struct amdgpu_vm *vm;
  647. unsigned vm_id;
  648. uint64_t vm_pd_addr;
  649. struct amdgpu_ctx *ctx;
  650. uint32_t gds_base, gds_size;
  651. uint32_t gws_base, gws_size;
  652. uint32_t oa_base, oa_size;
  653. uint32_t flags;
  654. /* resulting sequence number */
  655. uint64_t sequence;
  656. };
  657. enum amdgpu_ring_type {
  658. AMDGPU_RING_TYPE_GFX,
  659. AMDGPU_RING_TYPE_COMPUTE,
  660. AMDGPU_RING_TYPE_SDMA,
  661. AMDGPU_RING_TYPE_UVD,
  662. AMDGPU_RING_TYPE_VCE
  663. };
  664. extern struct amd_sched_backend_ops amdgpu_sched_ops;
  665. int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs,
  666. struct amdgpu_job **job);
  667. int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, unsigned size,
  668. struct amdgpu_job **job);
  669. void amdgpu_job_free(struct amdgpu_job *job);
  670. int amdgpu_job_submit(struct amdgpu_job *job, struct amdgpu_ring *ring,
  671. struct amd_sched_entity *entity, void *owner,
  672. struct fence **f);
  673. struct amdgpu_ring {
  674. struct amdgpu_device *adev;
  675. const struct amdgpu_ring_funcs *funcs;
  676. struct amdgpu_fence_driver fence_drv;
  677. struct amd_gpu_scheduler sched;
  678. spinlock_t fence_lock;
  679. struct amdgpu_bo *ring_obj;
  680. volatile uint32_t *ring;
  681. unsigned rptr_offs;
  682. u64 next_rptr_gpu_addr;
  683. volatile u32 *next_rptr_cpu_addr;
  684. unsigned wptr;
  685. unsigned wptr_old;
  686. unsigned ring_size;
  687. unsigned max_dw;
  688. int count_dw;
  689. uint64_t gpu_addr;
  690. uint32_t align_mask;
  691. uint32_t ptr_mask;
  692. bool ready;
  693. u32 nop;
  694. u32 idx;
  695. u32 me;
  696. u32 pipe;
  697. u32 queue;
  698. struct amdgpu_bo *mqd_obj;
  699. u32 doorbell_index;
  700. bool use_doorbell;
  701. unsigned wptr_offs;
  702. unsigned next_rptr_offs;
  703. unsigned fence_offs;
  704. struct amdgpu_ctx *current_ctx;
  705. enum amdgpu_ring_type type;
  706. char name[16];
  707. unsigned cond_exe_offs;
  708. u64 cond_exe_gpu_addr;
  709. volatile u32 *cond_exe_cpu_addr;
  710. };
  711. /*
  712. * VM
  713. */
  714. /* maximum number of VMIDs */
  715. #define AMDGPU_NUM_VM 16
  716. /* number of entries in page table */
  717. #define AMDGPU_VM_PTE_COUNT (1 << amdgpu_vm_block_size)
  718. /* PTBs (Page Table Blocks) need to be aligned to 32K */
  719. #define AMDGPU_VM_PTB_ALIGN_SIZE 32768
  720. #define AMDGPU_VM_PTB_ALIGN_MASK (AMDGPU_VM_PTB_ALIGN_SIZE - 1)
  721. #define AMDGPU_VM_PTB_ALIGN(a) (((a) + AMDGPU_VM_PTB_ALIGN_MASK) & ~AMDGPU_VM_PTB_ALIGN_MASK)
  722. #define AMDGPU_PTE_VALID (1 << 0)
  723. #define AMDGPU_PTE_SYSTEM (1 << 1)
  724. #define AMDGPU_PTE_SNOOPED (1 << 2)
  725. /* VI only */
  726. #define AMDGPU_PTE_EXECUTABLE (1 << 4)
  727. #define AMDGPU_PTE_READABLE (1 << 5)
  728. #define AMDGPU_PTE_WRITEABLE (1 << 6)
  729. /* PTE (Page Table Entry) fragment field for different page sizes */
  730. #define AMDGPU_PTE_FRAG_4KB (0 << 7)
  731. #define AMDGPU_PTE_FRAG_64KB (4 << 7)
  732. #define AMDGPU_LOG2_PAGES_PER_FRAG 4
  733. /* How to programm VM fault handling */
  734. #define AMDGPU_VM_FAULT_STOP_NEVER 0
  735. #define AMDGPU_VM_FAULT_STOP_FIRST 1
  736. #define AMDGPU_VM_FAULT_STOP_ALWAYS 2
  737. struct amdgpu_vm_pt {
  738. struct amdgpu_bo_list_entry entry;
  739. uint64_t addr;
  740. };
  741. struct amdgpu_vm_id {
  742. struct amdgpu_vm_manager_id *mgr_id;
  743. uint64_t pd_gpu_addr;
  744. /* last flushed PD/PT update */
  745. struct fence *flushed_updates;
  746. };
  747. struct amdgpu_vm {
  748. /* tree of virtual addresses mapped */
  749. struct rb_root va;
  750. /* protecting invalidated */
  751. spinlock_t status_lock;
  752. /* BOs moved, but not yet updated in the PT */
  753. struct list_head invalidated;
  754. /* BOs cleared in the PT because of a move */
  755. struct list_head cleared;
  756. /* BO mappings freed, but not yet updated in the PT */
  757. struct list_head freed;
  758. /* contains the page directory */
  759. struct amdgpu_bo *page_directory;
  760. unsigned max_pde_used;
  761. struct fence *page_directory_fence;
  762. /* array of page tables, one for each page directory entry */
  763. struct amdgpu_vm_pt *page_tables;
  764. /* for id and flush management per ring */
  765. struct amdgpu_vm_id ids[AMDGPU_MAX_RINGS];
  766. /* protecting freed */
  767. spinlock_t freed_lock;
  768. /* Scheduler entity for page table updates */
  769. struct amd_sched_entity entity;
  770. };
  771. struct amdgpu_vm_manager_id {
  772. struct list_head list;
  773. struct fence *active;
  774. atomic_long_t owner;
  775. uint32_t gds_base;
  776. uint32_t gds_size;
  777. uint32_t gws_base;
  778. uint32_t gws_size;
  779. uint32_t oa_base;
  780. uint32_t oa_size;
  781. };
  782. struct amdgpu_vm_manager {
  783. /* Handling of VMIDs */
  784. struct mutex lock;
  785. unsigned num_ids;
  786. struct list_head ids_lru;
  787. struct amdgpu_vm_manager_id ids[AMDGPU_NUM_VM];
  788. uint32_t max_pfn;
  789. /* vram base address for page table entry */
  790. u64 vram_base_offset;
  791. /* is vm enabled? */
  792. bool enabled;
  793. /* vm pte handling */
  794. const struct amdgpu_vm_pte_funcs *vm_pte_funcs;
  795. struct amdgpu_ring *vm_pte_rings[AMDGPU_MAX_RINGS];
  796. unsigned vm_pte_num_rings;
  797. atomic_t vm_pte_next_ring;
  798. };
  799. void amdgpu_vm_manager_init(struct amdgpu_device *adev);
  800. void amdgpu_vm_manager_fini(struct amdgpu_device *adev);
  801. int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm);
  802. void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm);
  803. void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm,
  804. struct list_head *validated,
  805. struct amdgpu_bo_list_entry *entry);
  806. void amdgpu_vm_get_pt_bos(struct amdgpu_vm *vm, struct list_head *duplicates);
  807. void amdgpu_vm_move_pt_bos_in_lru(struct amdgpu_device *adev,
  808. struct amdgpu_vm *vm);
  809. int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
  810. struct amdgpu_sync *sync, struct fence *fence,
  811. unsigned *vm_id, uint64_t *vm_pd_addr);
  812. void amdgpu_vm_flush(struct amdgpu_ring *ring,
  813. unsigned vm_id, uint64_t pd_addr,
  814. uint32_t gds_base, uint32_t gds_size,
  815. uint32_t gws_base, uint32_t gws_size,
  816. uint32_t oa_base, uint32_t oa_size);
  817. void amdgpu_vm_reset_id(struct amdgpu_device *adev, unsigned vm_id);
  818. uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr);
  819. int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
  820. struct amdgpu_vm *vm);
  821. int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
  822. struct amdgpu_vm *vm);
  823. int amdgpu_vm_clear_invalids(struct amdgpu_device *adev, struct amdgpu_vm *vm,
  824. struct amdgpu_sync *sync);
  825. int amdgpu_vm_bo_update(struct amdgpu_device *adev,
  826. struct amdgpu_bo_va *bo_va,
  827. struct ttm_mem_reg *mem);
  828. void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
  829. struct amdgpu_bo *bo);
  830. struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm,
  831. struct amdgpu_bo *bo);
  832. struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev,
  833. struct amdgpu_vm *vm,
  834. struct amdgpu_bo *bo);
  835. int amdgpu_vm_bo_map(struct amdgpu_device *adev,
  836. struct amdgpu_bo_va *bo_va,
  837. uint64_t addr, uint64_t offset,
  838. uint64_t size, uint32_t flags);
  839. int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
  840. struct amdgpu_bo_va *bo_va,
  841. uint64_t addr);
  842. void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
  843. struct amdgpu_bo_va *bo_va);
  844. /*
  845. * context related structures
  846. */
  847. struct amdgpu_ctx_ring {
  848. uint64_t sequence;
  849. struct fence **fences;
  850. struct amd_sched_entity entity;
  851. };
  852. struct amdgpu_ctx {
  853. struct kref refcount;
  854. struct amdgpu_device *adev;
  855. unsigned reset_counter;
  856. spinlock_t ring_lock;
  857. struct fence **fences;
  858. struct amdgpu_ctx_ring rings[AMDGPU_MAX_RINGS];
  859. };
  860. struct amdgpu_ctx_mgr {
  861. struct amdgpu_device *adev;
  862. struct mutex lock;
  863. /* protected by lock */
  864. struct idr ctx_handles;
  865. };
  866. struct amdgpu_ctx *amdgpu_ctx_get(struct amdgpu_fpriv *fpriv, uint32_t id);
  867. int amdgpu_ctx_put(struct amdgpu_ctx *ctx);
  868. uint64_t amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, struct amdgpu_ring *ring,
  869. struct fence *fence);
  870. struct fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx,
  871. struct amdgpu_ring *ring, uint64_t seq);
  872. int amdgpu_ctx_ioctl(struct drm_device *dev, void *data,
  873. struct drm_file *filp);
  874. void amdgpu_ctx_mgr_init(struct amdgpu_ctx_mgr *mgr);
  875. void amdgpu_ctx_mgr_fini(struct amdgpu_ctx_mgr *mgr);
  876. /*
  877. * file private structure
  878. */
  879. struct amdgpu_fpriv {
  880. struct amdgpu_vm vm;
  881. struct mutex bo_list_lock;
  882. struct idr bo_list_handles;
  883. struct amdgpu_ctx_mgr ctx_mgr;
  884. };
  885. /*
  886. * residency list
  887. */
  888. struct amdgpu_bo_list {
  889. struct mutex lock;
  890. struct amdgpu_bo *gds_obj;
  891. struct amdgpu_bo *gws_obj;
  892. struct amdgpu_bo *oa_obj;
  893. unsigned first_userptr;
  894. unsigned num_entries;
  895. struct amdgpu_bo_list_entry *array;
  896. };
  897. struct amdgpu_bo_list *
  898. amdgpu_bo_list_get(struct amdgpu_fpriv *fpriv, int id);
  899. void amdgpu_bo_list_get_list(struct amdgpu_bo_list *list,
  900. struct list_head *validated);
  901. void amdgpu_bo_list_put(struct amdgpu_bo_list *list);
  902. void amdgpu_bo_list_free(struct amdgpu_bo_list *list);
  903. /*
  904. * GFX stuff
  905. */
  906. #include "clearstate_defs.h"
  907. struct amdgpu_rlc {
  908. /* for power gating */
  909. struct amdgpu_bo *save_restore_obj;
  910. uint64_t save_restore_gpu_addr;
  911. volatile uint32_t *sr_ptr;
  912. const u32 *reg_list;
  913. u32 reg_list_size;
  914. /* for clear state */
  915. struct amdgpu_bo *clear_state_obj;
  916. uint64_t clear_state_gpu_addr;
  917. volatile uint32_t *cs_ptr;
  918. const struct cs_section_def *cs_data;
  919. u32 clear_state_size;
  920. /* for cp tables */
  921. struct amdgpu_bo *cp_table_obj;
  922. uint64_t cp_table_gpu_addr;
  923. volatile uint32_t *cp_table_ptr;
  924. u32 cp_table_size;
  925. };
  926. struct amdgpu_mec {
  927. struct amdgpu_bo *hpd_eop_obj;
  928. u64 hpd_eop_gpu_addr;
  929. u32 num_pipe;
  930. u32 num_mec;
  931. u32 num_queue;
  932. };
  933. /*
  934. * GPU scratch registers structures, functions & helpers
  935. */
  936. struct amdgpu_scratch {
  937. unsigned num_reg;
  938. uint32_t reg_base;
  939. bool free[32];
  940. uint32_t reg[32];
  941. };
  942. /*
  943. * GFX configurations
  944. */
  945. struct amdgpu_gca_config {
  946. unsigned max_shader_engines;
  947. unsigned max_tile_pipes;
  948. unsigned max_cu_per_sh;
  949. unsigned max_sh_per_se;
  950. unsigned max_backends_per_se;
  951. unsigned max_texture_channel_caches;
  952. unsigned max_gprs;
  953. unsigned max_gs_threads;
  954. unsigned max_hw_contexts;
  955. unsigned sc_prim_fifo_size_frontend;
  956. unsigned sc_prim_fifo_size_backend;
  957. unsigned sc_hiz_tile_fifo_size;
  958. unsigned sc_earlyz_tile_fifo_size;
  959. unsigned num_tile_pipes;
  960. unsigned backend_enable_mask;
  961. unsigned mem_max_burst_length_bytes;
  962. unsigned mem_row_size_in_kb;
  963. unsigned shader_engine_tile_size;
  964. unsigned num_gpus;
  965. unsigned multi_gpu_tile_size;
  966. unsigned mc_arb_ramcfg;
  967. unsigned gb_addr_config;
  968. unsigned num_rbs;
  969. uint32_t tile_mode_array[32];
  970. uint32_t macrotile_mode_array[16];
  971. };
  972. struct amdgpu_gfx {
  973. struct mutex gpu_clock_mutex;
  974. struct amdgpu_gca_config config;
  975. struct amdgpu_rlc rlc;
  976. struct amdgpu_mec mec;
  977. struct amdgpu_scratch scratch;
  978. const struct firmware *me_fw; /* ME firmware */
  979. uint32_t me_fw_version;
  980. const struct firmware *pfp_fw; /* PFP firmware */
  981. uint32_t pfp_fw_version;
  982. const struct firmware *ce_fw; /* CE firmware */
  983. uint32_t ce_fw_version;
  984. const struct firmware *rlc_fw; /* RLC firmware */
  985. uint32_t rlc_fw_version;
  986. const struct firmware *mec_fw; /* MEC firmware */
  987. uint32_t mec_fw_version;
  988. const struct firmware *mec2_fw; /* MEC2 firmware */
  989. uint32_t mec2_fw_version;
  990. uint32_t me_feature_version;
  991. uint32_t ce_feature_version;
  992. uint32_t pfp_feature_version;
  993. uint32_t rlc_feature_version;
  994. uint32_t mec_feature_version;
  995. uint32_t mec2_feature_version;
  996. struct amdgpu_ring gfx_ring[AMDGPU_MAX_GFX_RINGS];
  997. unsigned num_gfx_rings;
  998. struct amdgpu_ring compute_ring[AMDGPU_MAX_COMPUTE_RINGS];
  999. unsigned num_compute_rings;
  1000. struct amdgpu_irq_src eop_irq;
  1001. struct amdgpu_irq_src priv_reg_irq;
  1002. struct amdgpu_irq_src priv_inst_irq;
  1003. /* gfx status */
  1004. uint32_t gfx_current_status;
  1005. /* ce ram size*/
  1006. unsigned ce_ram_size;
  1007. };
  1008. int amdgpu_ib_get(struct amdgpu_device *adev, struct amdgpu_vm *vm,
  1009. unsigned size, struct amdgpu_ib *ib);
  1010. void amdgpu_ib_free(struct amdgpu_device *adev, struct amdgpu_ib *ib, struct fence *f);
  1011. int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
  1012. struct amdgpu_ib *ib, struct fence *last_vm_update,
  1013. struct fence **f);
  1014. int amdgpu_ib_pool_init(struct amdgpu_device *adev);
  1015. void amdgpu_ib_pool_fini(struct amdgpu_device *adev);
  1016. int amdgpu_ib_ring_tests(struct amdgpu_device *adev);
  1017. int amdgpu_ring_alloc(struct amdgpu_ring *ring, unsigned ndw);
  1018. void amdgpu_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count);
  1019. void amdgpu_ring_generic_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib);
  1020. void amdgpu_ring_commit(struct amdgpu_ring *ring);
  1021. void amdgpu_ring_undo(struct amdgpu_ring *ring);
  1022. unsigned amdgpu_ring_backup(struct amdgpu_ring *ring,
  1023. uint32_t **data);
  1024. int amdgpu_ring_restore(struct amdgpu_ring *ring,
  1025. unsigned size, uint32_t *data);
  1026. int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
  1027. unsigned ring_size, u32 nop, u32 align_mask,
  1028. struct amdgpu_irq_src *irq_src, unsigned irq_type,
  1029. enum amdgpu_ring_type ring_type);
  1030. void amdgpu_ring_fini(struct amdgpu_ring *ring);
  1031. /*
  1032. * CS.
  1033. */
  1034. struct amdgpu_cs_chunk {
  1035. uint32_t chunk_id;
  1036. uint32_t length_dw;
  1037. uint32_t *kdata;
  1038. };
  1039. struct amdgpu_cs_parser {
  1040. struct amdgpu_device *adev;
  1041. struct drm_file *filp;
  1042. struct amdgpu_ctx *ctx;
  1043. /* chunks */
  1044. unsigned nchunks;
  1045. struct amdgpu_cs_chunk *chunks;
  1046. /* scheduler job object */
  1047. struct amdgpu_job *job;
  1048. /* buffer objects */
  1049. struct ww_acquire_ctx ticket;
  1050. struct amdgpu_bo_list *bo_list;
  1051. struct amdgpu_bo_list_entry vm_pd;
  1052. struct list_head validated;
  1053. struct fence *fence;
  1054. uint64_t bytes_moved_threshold;
  1055. uint64_t bytes_moved;
  1056. /* user fence */
  1057. struct amdgpu_bo_list_entry uf_entry;
  1058. };
  1059. struct amdgpu_job {
  1060. struct amd_sched_job base;
  1061. struct amdgpu_device *adev;
  1062. struct amdgpu_ring *ring;
  1063. struct amdgpu_sync sync;
  1064. struct amdgpu_ib *ibs;
  1065. struct fence *fence; /* the hw fence */
  1066. uint32_t num_ibs;
  1067. void *owner;
  1068. struct amdgpu_user_fence uf;
  1069. };
  1070. #define to_amdgpu_job(sched_job) \
  1071. container_of((sched_job), struct amdgpu_job, base)
  1072. static inline u32 amdgpu_get_ib_value(struct amdgpu_cs_parser *p,
  1073. uint32_t ib_idx, int idx)
  1074. {
  1075. return p->job->ibs[ib_idx].ptr[idx];
  1076. }
  1077. static inline void amdgpu_set_ib_value(struct amdgpu_cs_parser *p,
  1078. uint32_t ib_idx, int idx,
  1079. uint32_t value)
  1080. {
  1081. p->job->ibs[ib_idx].ptr[idx] = value;
  1082. }
  1083. /*
  1084. * Writeback
  1085. */
  1086. #define AMDGPU_MAX_WB 1024 /* Reserve at most 1024 WB slots for amdgpu-owned rings. */
  1087. struct amdgpu_wb {
  1088. struct amdgpu_bo *wb_obj;
  1089. volatile uint32_t *wb;
  1090. uint64_t gpu_addr;
  1091. u32 num_wb; /* Number of wb slots actually reserved for amdgpu. */
  1092. unsigned long used[DIV_ROUND_UP(AMDGPU_MAX_WB, BITS_PER_LONG)];
  1093. };
  1094. int amdgpu_wb_get(struct amdgpu_device *adev, u32 *wb);
  1095. void amdgpu_wb_free(struct amdgpu_device *adev, u32 wb);
  1096. enum amdgpu_int_thermal_type {
  1097. THERMAL_TYPE_NONE,
  1098. THERMAL_TYPE_EXTERNAL,
  1099. THERMAL_TYPE_EXTERNAL_GPIO,
  1100. THERMAL_TYPE_RV6XX,
  1101. THERMAL_TYPE_RV770,
  1102. THERMAL_TYPE_ADT7473_WITH_INTERNAL,
  1103. THERMAL_TYPE_EVERGREEN,
  1104. THERMAL_TYPE_SUMO,
  1105. THERMAL_TYPE_NI,
  1106. THERMAL_TYPE_SI,
  1107. THERMAL_TYPE_EMC2103_WITH_INTERNAL,
  1108. THERMAL_TYPE_CI,
  1109. THERMAL_TYPE_KV,
  1110. };
  1111. enum amdgpu_dpm_auto_throttle_src {
  1112. AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL,
  1113. AMDGPU_DPM_AUTO_THROTTLE_SRC_EXTERNAL
  1114. };
  1115. enum amdgpu_dpm_event_src {
  1116. AMDGPU_DPM_EVENT_SRC_ANALOG = 0,
  1117. AMDGPU_DPM_EVENT_SRC_EXTERNAL = 1,
  1118. AMDGPU_DPM_EVENT_SRC_DIGITAL = 2,
  1119. AMDGPU_DPM_EVENT_SRC_ANALOG_OR_EXTERNAL = 3,
  1120. AMDGPU_DPM_EVENT_SRC_DIGIAL_OR_EXTERNAL = 4
  1121. };
  1122. #define AMDGPU_MAX_VCE_LEVELS 6
  1123. enum amdgpu_vce_level {
  1124. AMDGPU_VCE_LEVEL_AC_ALL = 0, /* AC, All cases */
  1125. AMDGPU_VCE_LEVEL_DC_EE = 1, /* DC, entropy encoding */
  1126. AMDGPU_VCE_LEVEL_DC_LL_LOW = 2, /* DC, low latency queue, res <= 720 */
  1127. AMDGPU_VCE_LEVEL_DC_LL_HIGH = 3, /* DC, low latency queue, 1080 >= res > 720 */
  1128. AMDGPU_VCE_LEVEL_DC_GP_LOW = 4, /* DC, general purpose queue, res <= 720 */
  1129. AMDGPU_VCE_LEVEL_DC_GP_HIGH = 5, /* DC, general purpose queue, 1080 >= res > 720 */
  1130. };
  1131. struct amdgpu_ps {
  1132. u32 caps; /* vbios flags */
  1133. u32 class; /* vbios flags */
  1134. u32 class2; /* vbios flags */
  1135. /* UVD clocks */
  1136. u32 vclk;
  1137. u32 dclk;
  1138. /* VCE clocks */
  1139. u32 evclk;
  1140. u32 ecclk;
  1141. bool vce_active;
  1142. enum amdgpu_vce_level vce_level;
  1143. /* asic priv */
  1144. void *ps_priv;
  1145. };
  1146. struct amdgpu_dpm_thermal {
  1147. /* thermal interrupt work */
  1148. struct work_struct work;
  1149. /* low temperature threshold */
  1150. int min_temp;
  1151. /* high temperature threshold */
  1152. int max_temp;
  1153. /* was last interrupt low to high or high to low */
  1154. bool high_to_low;
  1155. /* interrupt source */
  1156. struct amdgpu_irq_src irq;
  1157. };
  1158. enum amdgpu_clk_action
  1159. {
  1160. AMDGPU_SCLK_UP = 1,
  1161. AMDGPU_SCLK_DOWN
  1162. };
  1163. struct amdgpu_blacklist_clocks
  1164. {
  1165. u32 sclk;
  1166. u32 mclk;
  1167. enum amdgpu_clk_action action;
  1168. };
  1169. struct amdgpu_clock_and_voltage_limits {
  1170. u32 sclk;
  1171. u32 mclk;
  1172. u16 vddc;
  1173. u16 vddci;
  1174. };
  1175. struct amdgpu_clock_array {
  1176. u32 count;
  1177. u32 *values;
  1178. };
  1179. struct amdgpu_clock_voltage_dependency_entry {
  1180. u32 clk;
  1181. u16 v;
  1182. };
  1183. struct amdgpu_clock_voltage_dependency_table {
  1184. u32 count;
  1185. struct amdgpu_clock_voltage_dependency_entry *entries;
  1186. };
  1187. union amdgpu_cac_leakage_entry {
  1188. struct {
  1189. u16 vddc;
  1190. u32 leakage;
  1191. };
  1192. struct {
  1193. u16 vddc1;
  1194. u16 vddc2;
  1195. u16 vddc3;
  1196. };
  1197. };
  1198. struct amdgpu_cac_leakage_table {
  1199. u32 count;
  1200. union amdgpu_cac_leakage_entry *entries;
  1201. };
  1202. struct amdgpu_phase_shedding_limits_entry {
  1203. u16 voltage;
  1204. u32 sclk;
  1205. u32 mclk;
  1206. };
  1207. struct amdgpu_phase_shedding_limits_table {
  1208. u32 count;
  1209. struct amdgpu_phase_shedding_limits_entry *entries;
  1210. };
  1211. struct amdgpu_uvd_clock_voltage_dependency_entry {
  1212. u32 vclk;
  1213. u32 dclk;
  1214. u16 v;
  1215. };
  1216. struct amdgpu_uvd_clock_voltage_dependency_table {
  1217. u8 count;
  1218. struct amdgpu_uvd_clock_voltage_dependency_entry *entries;
  1219. };
  1220. struct amdgpu_vce_clock_voltage_dependency_entry {
  1221. u32 ecclk;
  1222. u32 evclk;
  1223. u16 v;
  1224. };
  1225. struct amdgpu_vce_clock_voltage_dependency_table {
  1226. u8 count;
  1227. struct amdgpu_vce_clock_voltage_dependency_entry *entries;
  1228. };
  1229. struct amdgpu_ppm_table {
  1230. u8 ppm_design;
  1231. u16 cpu_core_number;
  1232. u32 platform_tdp;
  1233. u32 small_ac_platform_tdp;
  1234. u32 platform_tdc;
  1235. u32 small_ac_platform_tdc;
  1236. u32 apu_tdp;
  1237. u32 dgpu_tdp;
  1238. u32 dgpu_ulv_power;
  1239. u32 tj_max;
  1240. };
  1241. struct amdgpu_cac_tdp_table {
  1242. u16 tdp;
  1243. u16 configurable_tdp;
  1244. u16 tdc;
  1245. u16 battery_power_limit;
  1246. u16 small_power_limit;
  1247. u16 low_cac_leakage;
  1248. u16 high_cac_leakage;
  1249. u16 maximum_power_delivery_limit;
  1250. };
  1251. struct amdgpu_dpm_dynamic_state {
  1252. struct amdgpu_clock_voltage_dependency_table vddc_dependency_on_sclk;
  1253. struct amdgpu_clock_voltage_dependency_table vddci_dependency_on_mclk;
  1254. struct amdgpu_clock_voltage_dependency_table vddc_dependency_on_mclk;
  1255. struct amdgpu_clock_voltage_dependency_table mvdd_dependency_on_mclk;
  1256. struct amdgpu_clock_voltage_dependency_table vddc_dependency_on_dispclk;
  1257. struct amdgpu_uvd_clock_voltage_dependency_table uvd_clock_voltage_dependency_table;
  1258. struct amdgpu_vce_clock_voltage_dependency_table vce_clock_voltage_dependency_table;
  1259. struct amdgpu_clock_voltage_dependency_table samu_clock_voltage_dependency_table;
  1260. struct amdgpu_clock_voltage_dependency_table acp_clock_voltage_dependency_table;
  1261. struct amdgpu_clock_voltage_dependency_table vddgfx_dependency_on_sclk;
  1262. struct amdgpu_clock_array valid_sclk_values;
  1263. struct amdgpu_clock_array valid_mclk_values;
  1264. struct amdgpu_clock_and_voltage_limits max_clock_voltage_on_dc;
  1265. struct amdgpu_clock_and_voltage_limits max_clock_voltage_on_ac;
  1266. u32 mclk_sclk_ratio;
  1267. u32 sclk_mclk_delta;
  1268. u16 vddc_vddci_delta;
  1269. u16 min_vddc_for_pcie_gen2;
  1270. struct amdgpu_cac_leakage_table cac_leakage_table;
  1271. struct amdgpu_phase_shedding_limits_table phase_shedding_limits_table;
  1272. struct amdgpu_ppm_table *ppm_table;
  1273. struct amdgpu_cac_tdp_table *cac_tdp_table;
  1274. };
  1275. struct amdgpu_dpm_fan {
  1276. u16 t_min;
  1277. u16 t_med;
  1278. u16 t_high;
  1279. u16 pwm_min;
  1280. u16 pwm_med;
  1281. u16 pwm_high;
  1282. u8 t_hyst;
  1283. u32 cycle_delay;
  1284. u16 t_max;
  1285. u8 control_mode;
  1286. u16 default_max_fan_pwm;
  1287. u16 default_fan_output_sensitivity;
  1288. u16 fan_output_sensitivity;
  1289. bool ucode_fan_control;
  1290. };
  1291. enum amdgpu_pcie_gen {
  1292. AMDGPU_PCIE_GEN1 = 0,
  1293. AMDGPU_PCIE_GEN2 = 1,
  1294. AMDGPU_PCIE_GEN3 = 2,
  1295. AMDGPU_PCIE_GEN_INVALID = 0xffff
  1296. };
  1297. enum amdgpu_dpm_forced_level {
  1298. AMDGPU_DPM_FORCED_LEVEL_AUTO = 0,
  1299. AMDGPU_DPM_FORCED_LEVEL_LOW = 1,
  1300. AMDGPU_DPM_FORCED_LEVEL_HIGH = 2,
  1301. AMDGPU_DPM_FORCED_LEVEL_MANUAL = 3,
  1302. };
  1303. struct amdgpu_vce_state {
  1304. /* vce clocks */
  1305. u32 evclk;
  1306. u32 ecclk;
  1307. /* gpu clocks */
  1308. u32 sclk;
  1309. u32 mclk;
  1310. u8 clk_idx;
  1311. u8 pstate;
  1312. };
  1313. struct amdgpu_dpm_funcs {
  1314. int (*get_temperature)(struct amdgpu_device *adev);
  1315. int (*pre_set_power_state)(struct amdgpu_device *adev);
  1316. int (*set_power_state)(struct amdgpu_device *adev);
  1317. void (*post_set_power_state)(struct amdgpu_device *adev);
  1318. void (*display_configuration_changed)(struct amdgpu_device *adev);
  1319. u32 (*get_sclk)(struct amdgpu_device *adev, bool low);
  1320. u32 (*get_mclk)(struct amdgpu_device *adev, bool low);
  1321. void (*print_power_state)(struct amdgpu_device *adev, struct amdgpu_ps *ps);
  1322. void (*debugfs_print_current_performance_level)(struct amdgpu_device *adev, struct seq_file *m);
  1323. int (*force_performance_level)(struct amdgpu_device *adev, enum amdgpu_dpm_forced_level level);
  1324. bool (*vblank_too_short)(struct amdgpu_device *adev);
  1325. void (*powergate_uvd)(struct amdgpu_device *adev, bool gate);
  1326. void (*powergate_vce)(struct amdgpu_device *adev, bool gate);
  1327. void (*enable_bapm)(struct amdgpu_device *adev, bool enable);
  1328. void (*set_fan_control_mode)(struct amdgpu_device *adev, u32 mode);
  1329. u32 (*get_fan_control_mode)(struct amdgpu_device *adev);
  1330. int (*set_fan_speed_percent)(struct amdgpu_device *adev, u32 speed);
  1331. int (*get_fan_speed_percent)(struct amdgpu_device *adev, u32 *speed);
  1332. };
  1333. struct amdgpu_dpm {
  1334. struct amdgpu_ps *ps;
  1335. /* number of valid power states */
  1336. int num_ps;
  1337. /* current power state that is active */
  1338. struct amdgpu_ps *current_ps;
  1339. /* requested power state */
  1340. struct amdgpu_ps *requested_ps;
  1341. /* boot up power state */
  1342. struct amdgpu_ps *boot_ps;
  1343. /* default uvd power state */
  1344. struct amdgpu_ps *uvd_ps;
  1345. /* vce requirements */
  1346. struct amdgpu_vce_state vce_states[AMDGPU_MAX_VCE_LEVELS];
  1347. enum amdgpu_vce_level vce_level;
  1348. enum amd_pm_state_type state;
  1349. enum amd_pm_state_type user_state;
  1350. u32 platform_caps;
  1351. u32 voltage_response_time;
  1352. u32 backbias_response_time;
  1353. void *priv;
  1354. u32 new_active_crtcs;
  1355. int new_active_crtc_count;
  1356. u32 current_active_crtcs;
  1357. int current_active_crtc_count;
  1358. struct amdgpu_dpm_dynamic_state dyn_state;
  1359. struct amdgpu_dpm_fan fan;
  1360. u32 tdp_limit;
  1361. u32 near_tdp_limit;
  1362. u32 near_tdp_limit_adjusted;
  1363. u32 sq_ramping_threshold;
  1364. u32 cac_leakage;
  1365. u16 tdp_od_limit;
  1366. u32 tdp_adjustment;
  1367. u16 load_line_slope;
  1368. bool power_control;
  1369. bool ac_power;
  1370. /* special states active */
  1371. bool thermal_active;
  1372. bool uvd_active;
  1373. bool vce_active;
  1374. /* thermal handling */
  1375. struct amdgpu_dpm_thermal thermal;
  1376. /* forced levels */
  1377. enum amdgpu_dpm_forced_level forced_level;
  1378. };
  1379. struct amdgpu_pm {
  1380. struct mutex mutex;
  1381. u32 current_sclk;
  1382. u32 current_mclk;
  1383. u32 default_sclk;
  1384. u32 default_mclk;
  1385. struct amdgpu_i2c_chan *i2c_bus;
  1386. /* internal thermal controller on rv6xx+ */
  1387. enum amdgpu_int_thermal_type int_thermal_type;
  1388. struct device *int_hwmon_dev;
  1389. /* fan control parameters */
  1390. bool no_fan;
  1391. u8 fan_pulses_per_revolution;
  1392. u8 fan_min_rpm;
  1393. u8 fan_max_rpm;
  1394. /* dpm */
  1395. bool dpm_enabled;
  1396. bool sysfs_initialized;
  1397. struct amdgpu_dpm dpm;
  1398. const struct firmware *fw; /* SMC firmware */
  1399. uint32_t fw_version;
  1400. const struct amdgpu_dpm_funcs *funcs;
  1401. uint32_t pcie_gen_mask;
  1402. uint32_t pcie_mlw_mask;
  1403. struct amd_pp_display_configuration pm_display_cfg;/* set by DAL */
  1404. };
  1405. void amdgpu_get_pcie_info(struct amdgpu_device *adev);
  1406. /*
  1407. * UVD
  1408. */
  1409. #define AMDGPU_MAX_UVD_HANDLES 10
  1410. #define AMDGPU_UVD_STACK_SIZE (1024*1024)
  1411. #define AMDGPU_UVD_HEAP_SIZE (1024*1024)
  1412. #define AMDGPU_UVD_FIRMWARE_OFFSET 256
  1413. struct amdgpu_uvd {
  1414. struct amdgpu_bo *vcpu_bo;
  1415. void *cpu_addr;
  1416. uint64_t gpu_addr;
  1417. void *saved_bo;
  1418. atomic_t handles[AMDGPU_MAX_UVD_HANDLES];
  1419. struct drm_file *filp[AMDGPU_MAX_UVD_HANDLES];
  1420. struct delayed_work idle_work;
  1421. const struct firmware *fw; /* UVD firmware */
  1422. struct amdgpu_ring ring;
  1423. struct amdgpu_irq_src irq;
  1424. bool address_64_bit;
  1425. struct amd_sched_entity entity;
  1426. };
  1427. /*
  1428. * VCE
  1429. */
  1430. #define AMDGPU_MAX_VCE_HANDLES 16
  1431. #define AMDGPU_VCE_FIRMWARE_OFFSET 256
  1432. #define AMDGPU_VCE_HARVEST_VCE0 (1 << 0)
  1433. #define AMDGPU_VCE_HARVEST_VCE1 (1 << 1)
  1434. struct amdgpu_vce {
  1435. struct amdgpu_bo *vcpu_bo;
  1436. uint64_t gpu_addr;
  1437. unsigned fw_version;
  1438. unsigned fb_version;
  1439. atomic_t handles[AMDGPU_MAX_VCE_HANDLES];
  1440. struct drm_file *filp[AMDGPU_MAX_VCE_HANDLES];
  1441. uint32_t img_size[AMDGPU_MAX_VCE_HANDLES];
  1442. struct delayed_work idle_work;
  1443. const struct firmware *fw; /* VCE firmware */
  1444. struct amdgpu_ring ring[AMDGPU_MAX_VCE_RINGS];
  1445. struct amdgpu_irq_src irq;
  1446. unsigned harvest_config;
  1447. struct amd_sched_entity entity;
  1448. };
  1449. /*
  1450. * SDMA
  1451. */
  1452. struct amdgpu_sdma_instance {
  1453. /* SDMA firmware */
  1454. const struct firmware *fw;
  1455. uint32_t fw_version;
  1456. uint32_t feature_version;
  1457. struct amdgpu_ring ring;
  1458. bool burst_nop;
  1459. };
  1460. struct amdgpu_sdma {
  1461. struct amdgpu_sdma_instance instance[AMDGPU_MAX_SDMA_INSTANCES];
  1462. struct amdgpu_irq_src trap_irq;
  1463. struct amdgpu_irq_src illegal_inst_irq;
  1464. int num_instances;
  1465. };
  1466. /*
  1467. * Firmware
  1468. */
  1469. struct amdgpu_firmware {
  1470. struct amdgpu_firmware_info ucode[AMDGPU_UCODE_ID_MAXIMUM];
  1471. bool smu_load;
  1472. struct amdgpu_bo *fw_buf;
  1473. unsigned int fw_size;
  1474. };
  1475. /*
  1476. * Benchmarking
  1477. */
  1478. void amdgpu_benchmark(struct amdgpu_device *adev, int test_number);
  1479. /*
  1480. * Testing
  1481. */
  1482. void amdgpu_test_moves(struct amdgpu_device *adev);
  1483. void amdgpu_test_ring_sync(struct amdgpu_device *adev,
  1484. struct amdgpu_ring *cpA,
  1485. struct amdgpu_ring *cpB);
  1486. void amdgpu_test_syncing(struct amdgpu_device *adev);
  1487. /*
  1488. * MMU Notifier
  1489. */
  1490. #if defined(CONFIG_MMU_NOTIFIER)
  1491. int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr);
  1492. void amdgpu_mn_unregister(struct amdgpu_bo *bo);
  1493. #else
  1494. static inline int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr)
  1495. {
  1496. return -ENODEV;
  1497. }
  1498. static inline void amdgpu_mn_unregister(struct amdgpu_bo *bo) {}
  1499. #endif
  1500. /*
  1501. * Debugfs
  1502. */
  1503. struct amdgpu_debugfs {
  1504. struct drm_info_list *files;
  1505. unsigned num_files;
  1506. };
  1507. int amdgpu_debugfs_add_files(struct amdgpu_device *adev,
  1508. struct drm_info_list *files,
  1509. unsigned nfiles);
  1510. int amdgpu_debugfs_fence_init(struct amdgpu_device *adev);
  1511. #if defined(CONFIG_DEBUG_FS)
  1512. int amdgpu_debugfs_init(struct drm_minor *minor);
  1513. void amdgpu_debugfs_cleanup(struct drm_minor *minor);
  1514. #endif
  1515. /*
  1516. * amdgpu smumgr functions
  1517. */
  1518. struct amdgpu_smumgr_funcs {
  1519. int (*check_fw_load_finish)(struct amdgpu_device *adev, uint32_t fwtype);
  1520. int (*request_smu_load_fw)(struct amdgpu_device *adev);
  1521. int (*request_smu_specific_fw)(struct amdgpu_device *adev, uint32_t fwtype);
  1522. };
  1523. /*
  1524. * amdgpu smumgr
  1525. */
  1526. struct amdgpu_smumgr {
  1527. struct amdgpu_bo *toc_buf;
  1528. struct amdgpu_bo *smu_buf;
  1529. /* asic priv smu data */
  1530. void *priv;
  1531. spinlock_t smu_lock;
  1532. /* smumgr functions */
  1533. const struct amdgpu_smumgr_funcs *smumgr_funcs;
  1534. /* ucode loading complete flag */
  1535. uint32_t fw_flags;
  1536. };
  1537. /*
  1538. * ASIC specific register table accessible by UMD
  1539. */
  1540. struct amdgpu_allowed_register_entry {
  1541. uint32_t reg_offset;
  1542. bool untouched;
  1543. bool grbm_indexed;
  1544. };
  1545. struct amdgpu_cu_info {
  1546. uint32_t number; /* total active CU number */
  1547. uint32_t ao_cu_mask;
  1548. uint32_t bitmap[4][4];
  1549. };
  1550. /*
  1551. * ASIC specific functions.
  1552. */
  1553. struct amdgpu_asic_funcs {
  1554. bool (*read_disabled_bios)(struct amdgpu_device *adev);
  1555. bool (*read_bios_from_rom)(struct amdgpu_device *adev,
  1556. u8 *bios, u32 length_bytes);
  1557. int (*read_register)(struct amdgpu_device *adev, u32 se_num,
  1558. u32 sh_num, u32 reg_offset, u32 *value);
  1559. void (*set_vga_state)(struct amdgpu_device *adev, bool state);
  1560. int (*reset)(struct amdgpu_device *adev);
  1561. /* wait for mc_idle */
  1562. int (*wait_for_mc_idle)(struct amdgpu_device *adev);
  1563. /* get the reference clock */
  1564. u32 (*get_xclk)(struct amdgpu_device *adev);
  1565. /* get the gpu clock counter */
  1566. uint64_t (*get_gpu_clock_counter)(struct amdgpu_device *adev);
  1567. int (*get_cu_info)(struct amdgpu_device *adev, struct amdgpu_cu_info *info);
  1568. /* MM block clocks */
  1569. int (*set_uvd_clocks)(struct amdgpu_device *adev, u32 vclk, u32 dclk);
  1570. int (*set_vce_clocks)(struct amdgpu_device *adev, u32 evclk, u32 ecclk);
  1571. };
  1572. /*
  1573. * IOCTL.
  1574. */
  1575. int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data,
  1576. struct drm_file *filp);
  1577. int amdgpu_bo_list_ioctl(struct drm_device *dev, void *data,
  1578. struct drm_file *filp);
  1579. int amdgpu_gem_info_ioctl(struct drm_device *dev, void *data,
  1580. struct drm_file *filp);
  1581. int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data,
  1582. struct drm_file *filp);
  1583. int amdgpu_gem_mmap_ioctl(struct drm_device *dev, void *data,
  1584. struct drm_file *filp);
  1585. int amdgpu_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
  1586. struct drm_file *filp);
  1587. int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
  1588. struct drm_file *filp);
  1589. int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data,
  1590. struct drm_file *filp);
  1591. int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp);
  1592. int amdgpu_cs_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *filp);
  1593. int amdgpu_gem_metadata_ioctl(struct drm_device *dev, void *data,
  1594. struct drm_file *filp);
  1595. /* VRAM scratch page for HDP bug, default vram page */
  1596. struct amdgpu_vram_scratch {
  1597. struct amdgpu_bo *robj;
  1598. volatile uint32_t *ptr;
  1599. u64 gpu_addr;
  1600. };
  1601. /*
  1602. * ACPI
  1603. */
  1604. struct amdgpu_atif_notification_cfg {
  1605. bool enabled;
  1606. int command_code;
  1607. };
  1608. struct amdgpu_atif_notifications {
  1609. bool display_switch;
  1610. bool expansion_mode_change;
  1611. bool thermal_state;
  1612. bool forced_power_state;
  1613. bool system_power_state;
  1614. bool display_conf_change;
  1615. bool px_gfx_switch;
  1616. bool brightness_change;
  1617. bool dgpu_display_event;
  1618. };
  1619. struct amdgpu_atif_functions {
  1620. bool system_params;
  1621. bool sbios_requests;
  1622. bool select_active_disp;
  1623. bool lid_state;
  1624. bool get_tv_standard;
  1625. bool set_tv_standard;
  1626. bool get_panel_expansion_mode;
  1627. bool set_panel_expansion_mode;
  1628. bool temperature_change;
  1629. bool graphics_device_types;
  1630. };
  1631. struct amdgpu_atif {
  1632. struct amdgpu_atif_notifications notifications;
  1633. struct amdgpu_atif_functions functions;
  1634. struct amdgpu_atif_notification_cfg notification_cfg;
  1635. struct amdgpu_encoder *encoder_for_bl;
  1636. };
  1637. struct amdgpu_atcs_functions {
  1638. bool get_ext_state;
  1639. bool pcie_perf_req;
  1640. bool pcie_dev_rdy;
  1641. bool pcie_bus_width;
  1642. };
  1643. struct amdgpu_atcs {
  1644. struct amdgpu_atcs_functions functions;
  1645. };
  1646. /*
  1647. * CGS
  1648. */
  1649. void *amdgpu_cgs_create_device(struct amdgpu_device *adev);
  1650. void amdgpu_cgs_destroy_device(void *cgs_device);
  1651. /*
  1652. * CGS
  1653. */
  1654. void *amdgpu_cgs_create_device(struct amdgpu_device *adev);
  1655. void amdgpu_cgs_destroy_device(void *cgs_device);
  1656. /* GPU virtualization */
  1657. struct amdgpu_virtualization {
  1658. bool supports_sr_iov;
  1659. };
  1660. /*
  1661. * Core structure, functions and helpers.
  1662. */
  1663. typedef uint32_t (*amdgpu_rreg_t)(struct amdgpu_device*, uint32_t);
  1664. typedef void (*amdgpu_wreg_t)(struct amdgpu_device*, uint32_t, uint32_t);
  1665. typedef uint32_t (*amdgpu_block_rreg_t)(struct amdgpu_device*, uint32_t, uint32_t);
  1666. typedef void (*amdgpu_block_wreg_t)(struct amdgpu_device*, uint32_t, uint32_t, uint32_t);
  1667. struct amdgpu_ip_block_status {
  1668. bool valid;
  1669. bool sw;
  1670. bool hw;
  1671. };
  1672. struct amdgpu_device {
  1673. struct device *dev;
  1674. struct drm_device *ddev;
  1675. struct pci_dev *pdev;
  1676. #ifdef CONFIG_DRM_AMD_ACP
  1677. struct amdgpu_acp acp;
  1678. #endif
  1679. /* ASIC */
  1680. enum amd_asic_type asic_type;
  1681. uint32_t family;
  1682. uint32_t rev_id;
  1683. uint32_t external_rev_id;
  1684. unsigned long flags;
  1685. int usec_timeout;
  1686. const struct amdgpu_asic_funcs *asic_funcs;
  1687. bool shutdown;
  1688. bool need_dma32;
  1689. bool accel_working;
  1690. struct work_struct reset_work;
  1691. struct notifier_block acpi_nb;
  1692. struct amdgpu_i2c_chan *i2c_bus[AMDGPU_MAX_I2C_BUS];
  1693. struct amdgpu_debugfs debugfs[AMDGPU_DEBUGFS_MAX_COMPONENTS];
  1694. unsigned debugfs_count;
  1695. #if defined(CONFIG_DEBUG_FS)
  1696. struct dentry *debugfs_regs;
  1697. #endif
  1698. struct amdgpu_atif atif;
  1699. struct amdgpu_atcs atcs;
  1700. struct mutex srbm_mutex;
  1701. /* GRBM index mutex. Protects concurrent access to GRBM index */
  1702. struct mutex grbm_idx_mutex;
  1703. struct dev_pm_domain vga_pm_domain;
  1704. bool have_disp_power_ref;
  1705. /* BIOS */
  1706. uint8_t *bios;
  1707. bool is_atom_bios;
  1708. struct amdgpu_bo *stollen_vga_memory;
  1709. uint32_t bios_scratch[AMDGPU_BIOS_NUM_SCRATCH];
  1710. /* Register/doorbell mmio */
  1711. resource_size_t rmmio_base;
  1712. resource_size_t rmmio_size;
  1713. void __iomem *rmmio;
  1714. /* protects concurrent MM_INDEX/DATA based register access */
  1715. spinlock_t mmio_idx_lock;
  1716. /* protects concurrent SMC based register access */
  1717. spinlock_t smc_idx_lock;
  1718. amdgpu_rreg_t smc_rreg;
  1719. amdgpu_wreg_t smc_wreg;
  1720. /* protects concurrent PCIE register access */
  1721. spinlock_t pcie_idx_lock;
  1722. amdgpu_rreg_t pcie_rreg;
  1723. amdgpu_wreg_t pcie_wreg;
  1724. /* protects concurrent UVD register access */
  1725. spinlock_t uvd_ctx_idx_lock;
  1726. amdgpu_rreg_t uvd_ctx_rreg;
  1727. amdgpu_wreg_t uvd_ctx_wreg;
  1728. /* protects concurrent DIDT register access */
  1729. spinlock_t didt_idx_lock;
  1730. amdgpu_rreg_t didt_rreg;
  1731. amdgpu_wreg_t didt_wreg;
  1732. /* protects concurrent ENDPOINT (audio) register access */
  1733. spinlock_t audio_endpt_idx_lock;
  1734. amdgpu_block_rreg_t audio_endpt_rreg;
  1735. amdgpu_block_wreg_t audio_endpt_wreg;
  1736. void __iomem *rio_mem;
  1737. resource_size_t rio_mem_size;
  1738. struct amdgpu_doorbell doorbell;
  1739. /* clock/pll info */
  1740. struct amdgpu_clock clock;
  1741. /* MC */
  1742. struct amdgpu_mc mc;
  1743. struct amdgpu_gart gart;
  1744. struct amdgpu_dummy_page dummy_page;
  1745. struct amdgpu_vm_manager vm_manager;
  1746. /* memory management */
  1747. struct amdgpu_mman mman;
  1748. struct amdgpu_vram_scratch vram_scratch;
  1749. struct amdgpu_wb wb;
  1750. atomic64_t vram_usage;
  1751. atomic64_t vram_vis_usage;
  1752. atomic64_t gtt_usage;
  1753. atomic64_t num_bytes_moved;
  1754. atomic_t gpu_reset_counter;
  1755. /* display */
  1756. struct amdgpu_mode_info mode_info;
  1757. struct work_struct hotplug_work;
  1758. struct amdgpu_irq_src crtc_irq;
  1759. struct amdgpu_irq_src pageflip_irq;
  1760. struct amdgpu_irq_src hpd_irq;
  1761. /* rings */
  1762. unsigned fence_context;
  1763. unsigned num_rings;
  1764. struct amdgpu_ring *rings[AMDGPU_MAX_RINGS];
  1765. bool ib_pool_ready;
  1766. struct amdgpu_sa_manager ring_tmp_bo;
  1767. /* interrupts */
  1768. struct amdgpu_irq irq;
  1769. /* powerplay */
  1770. struct amd_powerplay powerplay;
  1771. bool pp_enabled;
  1772. bool pp_force_state_enabled;
  1773. /* dpm */
  1774. struct amdgpu_pm pm;
  1775. u32 cg_flags;
  1776. u32 pg_flags;
  1777. /* amdgpu smumgr */
  1778. struct amdgpu_smumgr smu;
  1779. /* gfx */
  1780. struct amdgpu_gfx gfx;
  1781. /* sdma */
  1782. struct amdgpu_sdma sdma;
  1783. /* uvd */
  1784. struct amdgpu_uvd uvd;
  1785. /* vce */
  1786. struct amdgpu_vce vce;
  1787. /* firmwares */
  1788. struct amdgpu_firmware firmware;
  1789. /* GDS */
  1790. struct amdgpu_gds gds;
  1791. const struct amdgpu_ip_block_version *ip_blocks;
  1792. int num_ip_blocks;
  1793. struct amdgpu_ip_block_status *ip_block_status;
  1794. struct mutex mn_lock;
  1795. DECLARE_HASHTABLE(mn_hash, 7);
  1796. /* tracking pinned memory */
  1797. u64 vram_pin_size;
  1798. u64 gart_pin_size;
  1799. /* amdkfd interface */
  1800. struct kfd_dev *kfd;
  1801. struct amdgpu_virtualization virtualization;
  1802. };
  1803. bool amdgpu_device_is_px(struct drm_device *dev);
  1804. int amdgpu_device_init(struct amdgpu_device *adev,
  1805. struct drm_device *ddev,
  1806. struct pci_dev *pdev,
  1807. uint32_t flags);
  1808. void amdgpu_device_fini(struct amdgpu_device *adev);
  1809. int amdgpu_gpu_wait_for_idle(struct amdgpu_device *adev);
  1810. uint32_t amdgpu_mm_rreg(struct amdgpu_device *adev, uint32_t reg,
  1811. bool always_indirect);
  1812. void amdgpu_mm_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v,
  1813. bool always_indirect);
  1814. u32 amdgpu_io_rreg(struct amdgpu_device *adev, u32 reg);
  1815. void amdgpu_io_wreg(struct amdgpu_device *adev, u32 reg, u32 v);
  1816. u32 amdgpu_mm_rdoorbell(struct amdgpu_device *adev, u32 index);
  1817. void amdgpu_mm_wdoorbell(struct amdgpu_device *adev, u32 index, u32 v);
  1818. /*
  1819. * Registers read & write functions.
  1820. */
  1821. #define RREG32(reg) amdgpu_mm_rreg(adev, (reg), false)
  1822. #define RREG32_IDX(reg) amdgpu_mm_rreg(adev, (reg), true)
  1823. #define DREG32(reg) printk(KERN_INFO "REGISTER: " #reg " : 0x%08X\n", amdgpu_mm_rreg(adev, (reg), false))
  1824. #define WREG32(reg, v) amdgpu_mm_wreg(adev, (reg), (v), false)
  1825. #define WREG32_IDX(reg, v) amdgpu_mm_wreg(adev, (reg), (v), true)
  1826. #define REG_SET(FIELD, v) (((v) << FIELD##_SHIFT) & FIELD##_MASK)
  1827. #define REG_GET(FIELD, v) (((v) << FIELD##_SHIFT) & FIELD##_MASK)
  1828. #define RREG32_PCIE(reg) adev->pcie_rreg(adev, (reg))
  1829. #define WREG32_PCIE(reg, v) adev->pcie_wreg(adev, (reg), (v))
  1830. #define RREG32_SMC(reg) adev->smc_rreg(adev, (reg))
  1831. #define WREG32_SMC(reg, v) adev->smc_wreg(adev, (reg), (v))
  1832. #define RREG32_UVD_CTX(reg) adev->uvd_ctx_rreg(adev, (reg))
  1833. #define WREG32_UVD_CTX(reg, v) adev->uvd_ctx_wreg(adev, (reg), (v))
  1834. #define RREG32_DIDT(reg) adev->didt_rreg(adev, (reg))
  1835. #define WREG32_DIDT(reg, v) adev->didt_wreg(adev, (reg), (v))
  1836. #define RREG32_AUDIO_ENDPT(block, reg) adev->audio_endpt_rreg(adev, (block), (reg))
  1837. #define WREG32_AUDIO_ENDPT(block, reg, v) adev->audio_endpt_wreg(adev, (block), (reg), (v))
  1838. #define WREG32_P(reg, val, mask) \
  1839. do { \
  1840. uint32_t tmp_ = RREG32(reg); \
  1841. tmp_ &= (mask); \
  1842. tmp_ |= ((val) & ~(mask)); \
  1843. WREG32(reg, tmp_); \
  1844. } while (0)
  1845. #define WREG32_AND(reg, and) WREG32_P(reg, 0, and)
  1846. #define WREG32_OR(reg, or) WREG32_P(reg, or, ~(or))
  1847. #define WREG32_PLL_P(reg, val, mask) \
  1848. do { \
  1849. uint32_t tmp_ = RREG32_PLL(reg); \
  1850. tmp_ &= (mask); \
  1851. tmp_ |= ((val) & ~(mask)); \
  1852. WREG32_PLL(reg, tmp_); \
  1853. } while (0)
  1854. #define DREG32_SYS(sqf, adev, reg) seq_printf((sqf), #reg " : 0x%08X\n", amdgpu_mm_rreg((adev), (reg), false))
  1855. #define RREG32_IO(reg) amdgpu_io_rreg(adev, (reg))
  1856. #define WREG32_IO(reg, v) amdgpu_io_wreg(adev, (reg), (v))
  1857. #define RDOORBELL32(index) amdgpu_mm_rdoorbell(adev, (index))
  1858. #define WDOORBELL32(index, v) amdgpu_mm_wdoorbell(adev, (index), (v))
  1859. #define REG_FIELD_SHIFT(reg, field) reg##__##field##__SHIFT
  1860. #define REG_FIELD_MASK(reg, field) reg##__##field##_MASK
  1861. #define REG_SET_FIELD(orig_val, reg, field, field_val) \
  1862. (((orig_val) & ~REG_FIELD_MASK(reg, field)) | \
  1863. (REG_FIELD_MASK(reg, field) & ((field_val) << REG_FIELD_SHIFT(reg, field))))
  1864. #define REG_GET_FIELD(value, reg, field) \
  1865. (((value) & REG_FIELD_MASK(reg, field)) >> REG_FIELD_SHIFT(reg, field))
  1866. /*
  1867. * BIOS helpers.
  1868. */
  1869. #define RBIOS8(i) (adev->bios[i])
  1870. #define RBIOS16(i) (RBIOS8(i) | (RBIOS8((i)+1) << 8))
  1871. #define RBIOS32(i) ((RBIOS16(i)) | (RBIOS16((i)+2) << 16))
  1872. /*
  1873. * RING helpers.
  1874. */
  1875. static inline void amdgpu_ring_write(struct amdgpu_ring *ring, uint32_t v)
  1876. {
  1877. if (ring->count_dw <= 0)
  1878. DRM_ERROR("amdgpu: writing more dwords to the ring than expected!\n");
  1879. ring->ring[ring->wptr++] = v;
  1880. ring->wptr &= ring->ptr_mask;
  1881. ring->count_dw--;
  1882. }
  1883. static inline struct amdgpu_sdma_instance *
  1884. amdgpu_get_sdma_instance(struct amdgpu_ring *ring)
  1885. {
  1886. struct amdgpu_device *adev = ring->adev;
  1887. int i;
  1888. for (i = 0; i < adev->sdma.num_instances; i++)
  1889. if (&adev->sdma.instance[i].ring == ring)
  1890. break;
  1891. if (i < AMDGPU_MAX_SDMA_INSTANCES)
  1892. return &adev->sdma.instance[i];
  1893. else
  1894. return NULL;
  1895. }
  1896. /*
  1897. * ASICs macro.
  1898. */
  1899. #define amdgpu_asic_set_vga_state(adev, state) (adev)->asic_funcs->set_vga_state((adev), (state))
  1900. #define amdgpu_asic_reset(adev) (adev)->asic_funcs->reset((adev))
  1901. #define amdgpu_asic_wait_for_mc_idle(adev) (adev)->asic_funcs->wait_for_mc_idle((adev))
  1902. #define amdgpu_asic_get_xclk(adev) (adev)->asic_funcs->get_xclk((adev))
  1903. #define amdgpu_asic_set_uvd_clocks(adev, v, d) (adev)->asic_funcs->set_uvd_clocks((adev), (v), (d))
  1904. #define amdgpu_asic_set_vce_clocks(adev, ev, ec) (adev)->asic_funcs->set_vce_clocks((adev), (ev), (ec))
  1905. #define amdgpu_asic_get_gpu_clock_counter(adev) (adev)->asic_funcs->get_gpu_clock_counter((adev))
  1906. #define amdgpu_asic_read_disabled_bios(adev) (adev)->asic_funcs->read_disabled_bios((adev))
  1907. #define amdgpu_asic_read_bios_from_rom(adev, b, l) (adev)->asic_funcs->read_bios_from_rom((adev), (b), (l))
  1908. #define amdgpu_asic_read_register(adev, se, sh, offset, v)((adev)->asic_funcs->read_register((adev), (se), (sh), (offset), (v)))
  1909. #define amdgpu_asic_get_cu_info(adev, info) (adev)->asic_funcs->get_cu_info((adev), (info))
  1910. #define amdgpu_gart_flush_gpu_tlb(adev, vmid) (adev)->gart.gart_funcs->flush_gpu_tlb((adev), (vmid))
  1911. #define amdgpu_gart_set_pte_pde(adev, pt, idx, addr, flags) (adev)->gart.gart_funcs->set_pte_pde((adev), (pt), (idx), (addr), (flags))
  1912. #define amdgpu_vm_copy_pte(adev, ib, pe, src, count) ((adev)->vm_manager.vm_pte_funcs->copy_pte((ib), (pe), (src), (count)))
  1913. #define amdgpu_vm_write_pte(adev, ib, pa, pe, addr, count, incr, flags) ((adev)->vm_manager.vm_pte_funcs->write_pte((ib), (pa), (pe), (addr), (count), (incr), (flags)))
  1914. #define amdgpu_vm_set_pte_pde(adev, ib, pe, addr, count, incr, flags) ((adev)->vm_manager.vm_pte_funcs->set_pte_pde((ib), (pe), (addr), (count), (incr), (flags)))
  1915. #define amdgpu_ring_parse_cs(r, p, ib) ((r)->funcs->parse_cs((p), (ib)))
  1916. #define amdgpu_ring_test_ring(r) (r)->funcs->test_ring((r))
  1917. #define amdgpu_ring_test_ib(r) (r)->funcs->test_ib((r))
  1918. #define amdgpu_ring_get_rptr(r) (r)->funcs->get_rptr((r))
  1919. #define amdgpu_ring_get_wptr(r) (r)->funcs->get_wptr((r))
  1920. #define amdgpu_ring_set_wptr(r) (r)->funcs->set_wptr((r))
  1921. #define amdgpu_ring_emit_ib(r, ib) (r)->funcs->emit_ib((r), (ib))
  1922. #define amdgpu_ring_emit_pipeline_sync(r) (r)->funcs->emit_pipeline_sync((r))
  1923. #define amdgpu_ring_emit_vm_flush(r, vmid, addr) (r)->funcs->emit_vm_flush((r), (vmid), (addr))
  1924. #define amdgpu_ring_emit_fence(r, addr, seq, flags) (r)->funcs->emit_fence((r), (addr), (seq), (flags))
  1925. #define amdgpu_ring_emit_gds_switch(r, v, db, ds, wb, ws, ab, as) (r)->funcs->emit_gds_switch((r), (v), (db), (ds), (wb), (ws), (ab), (as))
  1926. #define amdgpu_ring_emit_hdp_flush(r) (r)->funcs->emit_hdp_flush((r))
  1927. #define amdgpu_ring_emit_hdp_invalidate(r) (r)->funcs->emit_hdp_invalidate((r))
  1928. #define amdgpu_ring_pad_ib(r, ib) ((r)->funcs->pad_ib((r), (ib)))
  1929. #define amdgpu_ring_init_cond_exec(r) (r)->funcs->init_cond_exec((r))
  1930. #define amdgpu_ring_patch_cond_exec(r,o) (r)->funcs->patch_cond_exec((r),(o))
  1931. #define amdgpu_ih_get_wptr(adev) (adev)->irq.ih_funcs->get_wptr((adev))
  1932. #define amdgpu_ih_decode_iv(adev, iv) (adev)->irq.ih_funcs->decode_iv((adev), (iv))
  1933. #define amdgpu_ih_set_rptr(adev) (adev)->irq.ih_funcs->set_rptr((adev))
  1934. #define amdgpu_display_set_vga_render_state(adev, r) (adev)->mode_info.funcs->set_vga_render_state((adev), (r))
  1935. #define amdgpu_display_vblank_get_counter(adev, crtc) (adev)->mode_info.funcs->vblank_get_counter((adev), (crtc))
  1936. #define amdgpu_display_vblank_wait(adev, crtc) (adev)->mode_info.funcs->vblank_wait((adev), (crtc))
  1937. #define amdgpu_display_is_display_hung(adev) (adev)->mode_info.funcs->is_display_hung((adev))
  1938. #define amdgpu_display_backlight_set_level(adev, e, l) (adev)->mode_info.funcs->backlight_set_level((e), (l))
  1939. #define amdgpu_display_backlight_get_level(adev, e) (adev)->mode_info.funcs->backlight_get_level((e))
  1940. #define amdgpu_display_hpd_sense(adev, h) (adev)->mode_info.funcs->hpd_sense((adev), (h))
  1941. #define amdgpu_display_hpd_set_polarity(adev, h) (adev)->mode_info.funcs->hpd_set_polarity((adev), (h))
  1942. #define amdgpu_display_hpd_get_gpio_reg(adev) (adev)->mode_info.funcs->hpd_get_gpio_reg((adev))
  1943. #define amdgpu_display_bandwidth_update(adev) (adev)->mode_info.funcs->bandwidth_update((adev))
  1944. #define amdgpu_display_page_flip(adev, crtc, base) (adev)->mode_info.funcs->page_flip((adev), (crtc), (base))
  1945. #define amdgpu_display_page_flip_get_scanoutpos(adev, crtc, vbl, pos) (adev)->mode_info.funcs->page_flip_get_scanoutpos((adev), (crtc), (vbl), (pos))
  1946. #define amdgpu_display_add_encoder(adev, e, s, c) (adev)->mode_info.funcs->add_encoder((adev), (e), (s), (c))
  1947. #define amdgpu_display_add_connector(adev, ci, sd, ct, ib, coi, h, r) (adev)->mode_info.funcs->add_connector((adev), (ci), (sd), (ct), (ib), (coi), (h), (r))
  1948. #define amdgpu_display_stop_mc_access(adev, s) (adev)->mode_info.funcs->stop_mc_access((adev), (s))
  1949. #define amdgpu_display_resume_mc_access(adev, s) (adev)->mode_info.funcs->resume_mc_access((adev), (s))
  1950. #define amdgpu_emit_copy_buffer(adev, ib, s, d, b) (adev)->mman.buffer_funcs->emit_copy_buffer((ib), (s), (d), (b))
  1951. #define amdgpu_emit_fill_buffer(adev, ib, s, d, b) (adev)->mman.buffer_funcs->emit_fill_buffer((ib), (s), (d), (b))
  1952. #define amdgpu_dpm_pre_set_power_state(adev) (adev)->pm.funcs->pre_set_power_state((adev))
  1953. #define amdgpu_dpm_set_power_state(adev) (adev)->pm.funcs->set_power_state((adev))
  1954. #define amdgpu_dpm_post_set_power_state(adev) (adev)->pm.funcs->post_set_power_state((adev))
  1955. #define amdgpu_dpm_display_configuration_changed(adev) (adev)->pm.funcs->display_configuration_changed((adev))
  1956. #define amdgpu_dpm_print_power_state(adev, ps) (adev)->pm.funcs->print_power_state((adev), (ps))
  1957. #define amdgpu_dpm_vblank_too_short(adev) (adev)->pm.funcs->vblank_too_short((adev))
  1958. #define amdgpu_dpm_enable_bapm(adev, e) (adev)->pm.funcs->enable_bapm((adev), (e))
  1959. #define amdgpu_dpm_get_temperature(adev) \
  1960. ((adev)->pp_enabled ? \
  1961. (adev)->powerplay.pp_funcs->get_temperature((adev)->powerplay.pp_handle) : \
  1962. (adev)->pm.funcs->get_temperature((adev)))
  1963. #define amdgpu_dpm_set_fan_control_mode(adev, m) \
  1964. ((adev)->pp_enabled ? \
  1965. (adev)->powerplay.pp_funcs->set_fan_control_mode((adev)->powerplay.pp_handle, (m)) : \
  1966. (adev)->pm.funcs->set_fan_control_mode((adev), (m)))
  1967. #define amdgpu_dpm_get_fan_control_mode(adev) \
  1968. ((adev)->pp_enabled ? \
  1969. (adev)->powerplay.pp_funcs->get_fan_control_mode((adev)->powerplay.pp_handle) : \
  1970. (adev)->pm.funcs->get_fan_control_mode((adev)))
  1971. #define amdgpu_dpm_set_fan_speed_percent(adev, s) \
  1972. ((adev)->pp_enabled ? \
  1973. (adev)->powerplay.pp_funcs->set_fan_speed_percent((adev)->powerplay.pp_handle, (s)) : \
  1974. (adev)->pm.funcs->set_fan_speed_percent((adev), (s)))
  1975. #define amdgpu_dpm_get_fan_speed_percent(adev, s) \
  1976. ((adev)->pp_enabled ? \
  1977. (adev)->powerplay.pp_funcs->get_fan_speed_percent((adev)->powerplay.pp_handle, (s)) : \
  1978. (adev)->pm.funcs->get_fan_speed_percent((adev), (s)))
  1979. #define amdgpu_dpm_get_sclk(adev, l) \
  1980. ((adev)->pp_enabled ? \
  1981. (adev)->powerplay.pp_funcs->get_sclk((adev)->powerplay.pp_handle, (l)) : \
  1982. (adev)->pm.funcs->get_sclk((adev), (l)))
  1983. #define amdgpu_dpm_get_mclk(adev, l) \
  1984. ((adev)->pp_enabled ? \
  1985. (adev)->powerplay.pp_funcs->get_mclk((adev)->powerplay.pp_handle, (l)) : \
  1986. (adev)->pm.funcs->get_mclk((adev), (l)))
  1987. #define amdgpu_dpm_force_performance_level(adev, l) \
  1988. ((adev)->pp_enabled ? \
  1989. (adev)->powerplay.pp_funcs->force_performance_level((adev)->powerplay.pp_handle, (l)) : \
  1990. (adev)->pm.funcs->force_performance_level((adev), (l)))
  1991. #define amdgpu_dpm_powergate_uvd(adev, g) \
  1992. ((adev)->pp_enabled ? \
  1993. (adev)->powerplay.pp_funcs->powergate_uvd((adev)->powerplay.pp_handle, (g)) : \
  1994. (adev)->pm.funcs->powergate_uvd((adev), (g)))
  1995. #define amdgpu_dpm_powergate_vce(adev, g) \
  1996. ((adev)->pp_enabled ? \
  1997. (adev)->powerplay.pp_funcs->powergate_vce((adev)->powerplay.pp_handle, (g)) : \
  1998. (adev)->pm.funcs->powergate_vce((adev), (g)))
  1999. #define amdgpu_dpm_debugfs_print_current_performance_level(adev, m) \
  2000. ((adev)->pp_enabled ? \
  2001. (adev)->powerplay.pp_funcs->print_current_performance_level((adev)->powerplay.pp_handle, (m)) : \
  2002. (adev)->pm.funcs->debugfs_print_current_performance_level((adev), (m)))
  2003. #define amdgpu_dpm_get_current_power_state(adev) \
  2004. (adev)->powerplay.pp_funcs->get_current_power_state((adev)->powerplay.pp_handle)
  2005. #define amdgpu_dpm_get_performance_level(adev) \
  2006. (adev)->powerplay.pp_funcs->get_performance_level((adev)->powerplay.pp_handle)
  2007. #define amdgpu_dpm_get_pp_num_states(adev, data) \
  2008. (adev)->powerplay.pp_funcs->get_pp_num_states((adev)->powerplay.pp_handle, data)
  2009. #define amdgpu_dpm_get_pp_table(adev, table) \
  2010. (adev)->powerplay.pp_funcs->get_pp_table((adev)->powerplay.pp_handle, table)
  2011. #define amdgpu_dpm_set_pp_table(adev, buf, size) \
  2012. (adev)->powerplay.pp_funcs->set_pp_table((adev)->powerplay.pp_handle, buf, size)
  2013. #define amdgpu_dpm_print_clock_levels(adev, type, buf) \
  2014. (adev)->powerplay.pp_funcs->print_clock_levels((adev)->powerplay.pp_handle, type, buf)
  2015. #define amdgpu_dpm_force_clock_level(adev, type, level) \
  2016. (adev)->powerplay.pp_funcs->force_clock_level((adev)->powerplay.pp_handle, type, level)
  2017. #define amdgpu_dpm_dispatch_task(adev, event_id, input, output) \
  2018. (adev)->powerplay.pp_funcs->dispatch_tasks((adev)->powerplay.pp_handle, (event_id), (input), (output))
  2019. #define amdgpu_gds_switch(adev, r, v, d, w, a) (adev)->gds.funcs->patch_gds_switch((r), (v), (d), (w), (a))
  2020. /* Common functions */
  2021. int amdgpu_gpu_reset(struct amdgpu_device *adev);
  2022. void amdgpu_pci_config_reset(struct amdgpu_device *adev);
  2023. bool amdgpu_card_posted(struct amdgpu_device *adev);
  2024. void amdgpu_update_display_priority(struct amdgpu_device *adev);
  2025. int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data);
  2026. int amdgpu_cs_get_ring(struct amdgpu_device *adev, u32 ip_type,
  2027. u32 ip_instance, u32 ring,
  2028. struct amdgpu_ring **out_ring);
  2029. void amdgpu_ttm_placement_from_domain(struct amdgpu_bo *rbo, u32 domain);
  2030. bool amdgpu_ttm_bo_is_amdgpu_bo(struct ttm_buffer_object *bo);
  2031. int amdgpu_ttm_tt_get_user_pages(struct ttm_tt *ttm, struct page **pages);
  2032. int amdgpu_ttm_tt_set_userptr(struct ttm_tt *ttm, uint64_t addr,
  2033. uint32_t flags);
  2034. bool amdgpu_ttm_tt_has_userptr(struct ttm_tt *ttm);
  2035. struct mm_struct *amdgpu_ttm_tt_get_usermm(struct ttm_tt *ttm);
  2036. bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start,
  2037. unsigned long end);
  2038. bool amdgpu_ttm_tt_userptr_invalidated(struct ttm_tt *ttm,
  2039. int *last_invalidated);
  2040. bool amdgpu_ttm_tt_is_readonly(struct ttm_tt *ttm);
  2041. uint32_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm,
  2042. struct ttm_mem_reg *mem);
  2043. void amdgpu_vram_location(struct amdgpu_device *adev, struct amdgpu_mc *mc, u64 base);
  2044. void amdgpu_gtt_location(struct amdgpu_device *adev, struct amdgpu_mc *mc);
  2045. void amdgpu_ttm_set_active_vram_size(struct amdgpu_device *adev, u64 size);
  2046. void amdgpu_program_register_sequence(struct amdgpu_device *adev,
  2047. const u32 *registers,
  2048. const u32 array_size);
  2049. bool amdgpu_device_is_px(struct drm_device *dev);
  2050. /* atpx handler */
  2051. #if defined(CONFIG_VGA_SWITCHEROO)
  2052. void amdgpu_register_atpx_handler(void);
  2053. void amdgpu_unregister_atpx_handler(void);
  2054. #else
  2055. static inline void amdgpu_register_atpx_handler(void) {}
  2056. static inline void amdgpu_unregister_atpx_handler(void) {}
  2057. #endif
  2058. /*
  2059. * KMS
  2060. */
  2061. extern const struct drm_ioctl_desc amdgpu_ioctls_kms[];
  2062. extern int amdgpu_max_kms_ioctl;
  2063. int amdgpu_driver_load_kms(struct drm_device *dev, unsigned long flags);
  2064. int amdgpu_driver_unload_kms(struct drm_device *dev);
  2065. void amdgpu_driver_lastclose_kms(struct drm_device *dev);
  2066. int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv);
  2067. void amdgpu_driver_postclose_kms(struct drm_device *dev,
  2068. struct drm_file *file_priv);
  2069. void amdgpu_driver_preclose_kms(struct drm_device *dev,
  2070. struct drm_file *file_priv);
  2071. int amdgpu_suspend_kms(struct drm_device *dev, bool suspend, bool fbcon);
  2072. int amdgpu_resume_kms(struct drm_device *dev, bool resume, bool fbcon);
  2073. u32 amdgpu_get_vblank_counter_kms(struct drm_device *dev, unsigned int pipe);
  2074. int amdgpu_enable_vblank_kms(struct drm_device *dev, unsigned int pipe);
  2075. void amdgpu_disable_vblank_kms(struct drm_device *dev, unsigned int pipe);
  2076. int amdgpu_get_vblank_timestamp_kms(struct drm_device *dev, unsigned int pipe,
  2077. int *max_error,
  2078. struct timeval *vblank_time,
  2079. unsigned flags);
  2080. long amdgpu_kms_compat_ioctl(struct file *filp, unsigned int cmd,
  2081. unsigned long arg);
  2082. /*
  2083. * functions used by amdgpu_encoder.c
  2084. */
  2085. struct amdgpu_afmt_acr {
  2086. u32 clock;
  2087. int n_32khz;
  2088. int cts_32khz;
  2089. int n_44_1khz;
  2090. int cts_44_1khz;
  2091. int n_48khz;
  2092. int cts_48khz;
  2093. };
  2094. struct amdgpu_afmt_acr amdgpu_afmt_acr(uint32_t clock);
  2095. /* amdgpu_acpi.c */
  2096. #if defined(CONFIG_ACPI)
  2097. int amdgpu_acpi_init(struct amdgpu_device *adev);
  2098. void amdgpu_acpi_fini(struct amdgpu_device *adev);
  2099. bool amdgpu_acpi_is_pcie_performance_request_supported(struct amdgpu_device *adev);
  2100. int amdgpu_acpi_pcie_performance_request(struct amdgpu_device *adev,
  2101. u8 perf_req, bool advertise);
  2102. int amdgpu_acpi_pcie_notify_device_ready(struct amdgpu_device *adev);
  2103. #else
  2104. static inline int amdgpu_acpi_init(struct amdgpu_device *adev) { return 0; }
  2105. static inline void amdgpu_acpi_fini(struct amdgpu_device *adev) { }
  2106. #endif
  2107. struct amdgpu_bo_va_mapping *
  2108. amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser,
  2109. uint64_t addr, struct amdgpu_bo **bo);
  2110. #include "amdgpu_object.h"
  2111. #endif