amdgpu.h 56 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822
  1. /*
  2. * Copyright 2008 Advanced Micro Devices, Inc.
  3. * Copyright 2008 Red Hat Inc.
  4. * Copyright 2009 Jerome Glisse.
  5. *
  6. * Permission is hereby granted, free of charge, to any person obtaining a
  7. * copy of this software and associated documentation files (the "Software"),
  8. * to deal in the Software without restriction, including without limitation
  9. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  10. * and/or sell copies of the Software, and to permit persons to whom the
  11. * Software is furnished to do so, subject to the following conditions:
  12. *
  13. * The above copyright notice and this permission notice shall be included in
  14. * all copies or substantial portions of the Software.
  15. *
  16. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  17. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  18. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  19. * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  20. * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  21. * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  22. * OTHER DEALINGS IN THE SOFTWARE.
  23. *
  24. * Authors: Dave Airlie
  25. * Alex Deucher
  26. * Jerome Glisse
  27. */
  28. #ifndef __AMDGPU_H__
  29. #define __AMDGPU_H__
  30. #include <linux/atomic.h>
  31. #include <linux/wait.h>
  32. #include <linux/list.h>
  33. #include <linux/kref.h>
  34. #include <linux/interval_tree.h>
  35. #include <linux/hashtable.h>
  36. #include <linux/dma-fence.h>
  37. #include <ttm/ttm_bo_api.h>
  38. #include <ttm/ttm_bo_driver.h>
  39. #include <ttm/ttm_placement.h>
  40. #include <ttm/ttm_module.h>
  41. #include <ttm/ttm_execbuf_util.h>
  42. #include <drm/drmP.h>
  43. #include <drm/drm_gem.h>
  44. #include <drm/amdgpu_drm.h>
  45. #include "amd_shared.h"
  46. #include "amdgpu_mode.h"
  47. #include "amdgpu_ih.h"
  48. #include "amdgpu_irq.h"
  49. #include "amdgpu_ucode.h"
  50. #include "amdgpu_ttm.h"
  51. #include "amdgpu_gds.h"
  52. #include "amdgpu_sync.h"
  53. #include "amdgpu_ring.h"
  54. #include "amdgpu_vm.h"
  55. #include "amd_powerplay.h"
  56. #include "amdgpu_dpm.h"
  57. #include "amdgpu_acp.h"
  58. #include "amdgpu_uvd.h"
  59. #include "gpu_scheduler.h"
  60. #include "amdgpu_virt.h"
  61. /*
  62. * Modules parameters.
  63. */
  64. extern int amdgpu_modeset;
  65. extern int amdgpu_vram_limit;
  66. extern int amdgpu_gart_size;
  67. extern int amdgpu_moverate;
  68. extern int amdgpu_benchmarking;
  69. extern int amdgpu_testing;
  70. extern int amdgpu_audio;
  71. extern int amdgpu_disp_priority;
  72. extern int amdgpu_hw_i2c;
  73. extern int amdgpu_pcie_gen2;
  74. extern int amdgpu_msi;
  75. extern int amdgpu_lockup_timeout;
  76. extern int amdgpu_dpm;
  77. extern int amdgpu_smc_load_fw;
  78. extern int amdgpu_aspm;
  79. extern int amdgpu_runtime_pm;
  80. extern unsigned amdgpu_ip_block_mask;
  81. extern int amdgpu_bapm;
  82. extern int amdgpu_deep_color;
  83. extern int amdgpu_vm_size;
  84. extern int amdgpu_vm_block_size;
  85. extern int amdgpu_vm_fault_stop;
  86. extern int amdgpu_vm_debug;
  87. extern int amdgpu_sched_jobs;
  88. extern int amdgpu_sched_hw_submission;
  89. extern int amdgpu_no_evict;
  90. extern int amdgpu_direct_gma_size;
  91. extern unsigned amdgpu_pcie_gen_cap;
  92. extern unsigned amdgpu_pcie_lane_cap;
  93. extern unsigned amdgpu_cg_mask;
  94. extern unsigned amdgpu_pg_mask;
  95. extern char *amdgpu_disable_cu;
  96. extern char *amdgpu_virtual_display;
  97. extern unsigned amdgpu_pp_feature_mask;
  98. extern int amdgpu_vram_page_split;
  99. #define AMDGPU_WAIT_IDLE_TIMEOUT_IN_MS 3000
  100. #define AMDGPU_MAX_USEC_TIMEOUT 100000 /* 100 ms */
  101. #define AMDGPU_FENCE_JIFFIES_TIMEOUT (HZ / 2)
  102. /* AMDGPU_IB_POOL_SIZE must be a power of 2 */
  103. #define AMDGPU_IB_POOL_SIZE 16
  104. #define AMDGPU_DEBUGFS_MAX_COMPONENTS 32
  105. #define AMDGPUFB_CONN_LIMIT 4
  106. #define AMDGPU_BIOS_NUM_SCRATCH 8
  107. /* max number of IP instances */
  108. #define AMDGPU_MAX_SDMA_INSTANCES 2
  109. /* hardcode that limit for now */
  110. #define AMDGPU_VA_RESERVED_SIZE (8 << 20)
  111. /* hard reset data */
  112. #define AMDGPU_ASIC_RESET_DATA 0x39d5e86b
  113. /* reset flags */
  114. #define AMDGPU_RESET_GFX (1 << 0)
  115. #define AMDGPU_RESET_COMPUTE (1 << 1)
  116. #define AMDGPU_RESET_DMA (1 << 2)
  117. #define AMDGPU_RESET_CP (1 << 3)
  118. #define AMDGPU_RESET_GRBM (1 << 4)
  119. #define AMDGPU_RESET_DMA1 (1 << 5)
  120. #define AMDGPU_RESET_RLC (1 << 6)
  121. #define AMDGPU_RESET_SEM (1 << 7)
  122. #define AMDGPU_RESET_IH (1 << 8)
  123. #define AMDGPU_RESET_VMC (1 << 9)
  124. #define AMDGPU_RESET_MC (1 << 10)
  125. #define AMDGPU_RESET_DISPLAY (1 << 11)
  126. #define AMDGPU_RESET_UVD (1 << 12)
  127. #define AMDGPU_RESET_VCE (1 << 13)
  128. #define AMDGPU_RESET_VCE1 (1 << 14)
  129. /* GFX current status */
  130. #define AMDGPU_GFX_NORMAL_MODE 0x00000000L
  131. #define AMDGPU_GFX_SAFE_MODE 0x00000001L
  132. #define AMDGPU_GFX_PG_DISABLED_MODE 0x00000002L
  133. #define AMDGPU_GFX_CG_DISABLED_MODE 0x00000004L
  134. #define AMDGPU_GFX_LBPW_DISABLED_MODE 0x00000008L
  135. /* max cursor sizes (in pixels) */
  136. #define CIK_CURSOR_WIDTH 128
  137. #define CIK_CURSOR_HEIGHT 128
  138. struct amdgpu_device;
  139. struct amdgpu_ib;
  140. struct amdgpu_cs_parser;
  141. struct amdgpu_job;
  142. struct amdgpu_irq_src;
  143. struct amdgpu_fpriv;
  144. enum amdgpu_cp_irq {
  145. AMDGPU_CP_IRQ_GFX_EOP = 0,
  146. AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP,
  147. AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE1_EOP,
  148. AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE2_EOP,
  149. AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE3_EOP,
  150. AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE0_EOP,
  151. AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE1_EOP,
  152. AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE2_EOP,
  153. AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE3_EOP,
  154. AMDGPU_CP_IRQ_LAST
  155. };
  156. enum amdgpu_sdma_irq {
  157. AMDGPU_SDMA_IRQ_TRAP0 = 0,
  158. AMDGPU_SDMA_IRQ_TRAP1,
  159. AMDGPU_SDMA_IRQ_LAST
  160. };
  161. enum amdgpu_thermal_irq {
  162. AMDGPU_THERMAL_IRQ_LOW_TO_HIGH = 0,
  163. AMDGPU_THERMAL_IRQ_HIGH_TO_LOW,
  164. AMDGPU_THERMAL_IRQ_LAST
  165. };
  166. enum amdgpu_kiq_irq {
  167. AMDGPU_CP_KIQ_IRQ_DRIVER0 = 0,
  168. AMDGPU_CP_KIQ_IRQ_LAST
  169. };
  170. int amdgpu_set_clockgating_state(struct amdgpu_device *adev,
  171. enum amd_ip_block_type block_type,
  172. enum amd_clockgating_state state);
  173. int amdgpu_set_powergating_state(struct amdgpu_device *adev,
  174. enum amd_ip_block_type block_type,
  175. enum amd_powergating_state state);
  176. void amdgpu_get_clockgating_state(struct amdgpu_device *adev, u32 *flags);
  177. int amdgpu_wait_for_idle(struct amdgpu_device *adev,
  178. enum amd_ip_block_type block_type);
  179. bool amdgpu_is_idle(struct amdgpu_device *adev,
  180. enum amd_ip_block_type block_type);
  181. #define AMDGPU_MAX_IP_NUM 16
  182. struct amdgpu_ip_block_status {
  183. bool valid;
  184. bool sw;
  185. bool hw;
  186. bool late_initialized;
  187. bool hang;
  188. };
  189. struct amdgpu_ip_block_version {
  190. const enum amd_ip_block_type type;
  191. const u32 major;
  192. const u32 minor;
  193. const u32 rev;
  194. const struct amd_ip_funcs *funcs;
  195. };
  196. struct amdgpu_ip_block {
  197. struct amdgpu_ip_block_status status;
  198. const struct amdgpu_ip_block_version *version;
  199. };
  200. int amdgpu_ip_block_version_cmp(struct amdgpu_device *adev,
  201. enum amd_ip_block_type type,
  202. u32 major, u32 minor);
  203. struct amdgpu_ip_block * amdgpu_get_ip_block(struct amdgpu_device *adev,
  204. enum amd_ip_block_type type);
  205. int amdgpu_ip_block_add(struct amdgpu_device *adev,
  206. const struct amdgpu_ip_block_version *ip_block_version);
  207. /* provided by hw blocks that can move/clear data. e.g., gfx or sdma */
  208. struct amdgpu_buffer_funcs {
  209. /* maximum bytes in a single operation */
  210. uint32_t copy_max_bytes;
  211. /* number of dw to reserve per operation */
  212. unsigned copy_num_dw;
  213. /* used for buffer migration */
  214. void (*emit_copy_buffer)(struct amdgpu_ib *ib,
  215. /* src addr in bytes */
  216. uint64_t src_offset,
  217. /* dst addr in bytes */
  218. uint64_t dst_offset,
  219. /* number of byte to transfer */
  220. uint32_t byte_count);
  221. /* maximum bytes in a single operation */
  222. uint32_t fill_max_bytes;
  223. /* number of dw to reserve per operation */
  224. unsigned fill_num_dw;
  225. /* used for buffer clearing */
  226. void (*emit_fill_buffer)(struct amdgpu_ib *ib,
  227. /* value to write to memory */
  228. uint32_t src_data,
  229. /* dst addr in bytes */
  230. uint64_t dst_offset,
  231. /* number of byte to fill */
  232. uint32_t byte_count);
  233. };
  234. /* provided by hw blocks that can write ptes, e.g., sdma */
  235. struct amdgpu_vm_pte_funcs {
  236. /* copy pte entries from GART */
  237. void (*copy_pte)(struct amdgpu_ib *ib,
  238. uint64_t pe, uint64_t src,
  239. unsigned count);
  240. /* write pte one entry at a time with addr mapping */
  241. void (*write_pte)(struct amdgpu_ib *ib, uint64_t pe,
  242. uint64_t value, unsigned count,
  243. uint32_t incr);
  244. /* for linear pte/pde updates without addr mapping */
  245. void (*set_pte_pde)(struct amdgpu_ib *ib,
  246. uint64_t pe,
  247. uint64_t addr, unsigned count,
  248. uint32_t incr, uint64_t flags);
  249. };
  250. /* provided by the gmc block */
  251. struct amdgpu_gart_funcs {
  252. /* flush the vm tlb via mmio */
  253. void (*flush_gpu_tlb)(struct amdgpu_device *adev,
  254. uint32_t vmid);
  255. /* write pte/pde updates using the cpu */
  256. int (*set_pte_pde)(struct amdgpu_device *adev,
  257. void *cpu_pt_addr, /* cpu addr of page table */
  258. uint32_t gpu_page_idx, /* pte/pde to update */
  259. uint64_t addr, /* addr to write into pte/pde */
  260. uint64_t flags); /* access flags */
  261. /* enable/disable PRT support */
  262. void (*set_prt)(struct amdgpu_device *adev, bool enable);
  263. /* set pte flags based per asic */
  264. uint64_t (*get_vm_pte_flags)(struct amdgpu_device *adev,
  265. uint32_t flags);
  266. };
  267. /* provided by the ih block */
  268. struct amdgpu_ih_funcs {
  269. /* ring read/write ptr handling, called from interrupt context */
  270. u32 (*get_wptr)(struct amdgpu_device *adev);
  271. void (*decode_iv)(struct amdgpu_device *adev,
  272. struct amdgpu_iv_entry *entry);
  273. void (*set_rptr)(struct amdgpu_device *adev);
  274. };
  275. /*
  276. * BIOS.
  277. */
  278. bool amdgpu_get_bios(struct amdgpu_device *adev);
  279. bool amdgpu_read_bios(struct amdgpu_device *adev);
  280. /*
  281. * Dummy page
  282. */
  283. struct amdgpu_dummy_page {
  284. struct page *page;
  285. dma_addr_t addr;
  286. };
  287. int amdgpu_dummy_page_init(struct amdgpu_device *adev);
  288. void amdgpu_dummy_page_fini(struct amdgpu_device *adev);
  289. /*
  290. * Clocks
  291. */
  292. #define AMDGPU_MAX_PPLL 3
  293. struct amdgpu_clock {
  294. struct amdgpu_pll ppll[AMDGPU_MAX_PPLL];
  295. struct amdgpu_pll spll;
  296. struct amdgpu_pll mpll;
  297. /* 10 Khz units */
  298. uint32_t default_mclk;
  299. uint32_t default_sclk;
  300. uint32_t default_dispclk;
  301. uint32_t current_dispclk;
  302. uint32_t dp_extclk;
  303. uint32_t max_pixel_clock;
  304. };
  305. /*
  306. * BO.
  307. */
  308. struct amdgpu_bo_list_entry {
  309. struct amdgpu_bo *robj;
  310. struct ttm_validate_buffer tv;
  311. struct amdgpu_bo_va *bo_va;
  312. uint32_t priority;
  313. struct page **user_pages;
  314. int user_invalidated;
  315. };
  316. struct amdgpu_bo_va_mapping {
  317. struct list_head list;
  318. struct interval_tree_node it;
  319. uint64_t offset;
  320. uint64_t flags;
  321. };
  322. /* bo virtual addresses in a specific vm */
  323. struct amdgpu_bo_va {
  324. /* protected by bo being reserved */
  325. struct list_head bo_list;
  326. struct dma_fence *last_pt_update;
  327. unsigned ref_count;
  328. /* protected by vm mutex and spinlock */
  329. struct list_head vm_status;
  330. /* mappings for this bo_va */
  331. struct list_head invalids;
  332. struct list_head valids;
  333. /* constant after initialization */
  334. struct amdgpu_vm *vm;
  335. struct amdgpu_bo *bo;
  336. };
  337. #define AMDGPU_GEM_DOMAIN_MAX 0x3
  338. struct amdgpu_bo {
  339. /* Protected by tbo.reserved */
  340. u32 prefered_domains;
  341. u32 allowed_domains;
  342. struct ttm_place placements[AMDGPU_GEM_DOMAIN_MAX + 1];
  343. struct ttm_placement placement;
  344. struct ttm_buffer_object tbo;
  345. struct ttm_bo_kmap_obj kmap;
  346. u64 flags;
  347. unsigned pin_count;
  348. void *kptr;
  349. u64 tiling_flags;
  350. u64 metadata_flags;
  351. void *metadata;
  352. u32 metadata_size;
  353. unsigned prime_shared_count;
  354. /* list of all virtual address to which this bo
  355. * is associated to
  356. */
  357. struct list_head va;
  358. /* Constant after initialization */
  359. struct drm_gem_object gem_base;
  360. struct amdgpu_bo *parent;
  361. struct amdgpu_bo *shadow;
  362. struct ttm_bo_kmap_obj dma_buf_vmap;
  363. struct amdgpu_mn *mn;
  364. struct list_head mn_list;
  365. struct list_head shadow_list;
  366. };
  367. #define gem_to_amdgpu_bo(gobj) container_of((gobj), struct amdgpu_bo, gem_base)
  368. void amdgpu_gem_object_free(struct drm_gem_object *obj);
  369. int amdgpu_gem_object_open(struct drm_gem_object *obj,
  370. struct drm_file *file_priv);
  371. void amdgpu_gem_object_close(struct drm_gem_object *obj,
  372. struct drm_file *file_priv);
  373. unsigned long amdgpu_gem_timeout(uint64_t timeout_ns);
  374. struct sg_table *amdgpu_gem_prime_get_sg_table(struct drm_gem_object *obj);
  375. struct drm_gem_object *
  376. amdgpu_gem_prime_import_sg_table(struct drm_device *dev,
  377. struct dma_buf_attachment *attach,
  378. struct sg_table *sg);
  379. struct dma_buf *amdgpu_gem_prime_export(struct drm_device *dev,
  380. struct drm_gem_object *gobj,
  381. int flags);
  382. int amdgpu_gem_prime_pin(struct drm_gem_object *obj);
  383. void amdgpu_gem_prime_unpin(struct drm_gem_object *obj);
  384. struct reservation_object *amdgpu_gem_prime_res_obj(struct drm_gem_object *);
  385. void *amdgpu_gem_prime_vmap(struct drm_gem_object *obj);
  386. void amdgpu_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr);
  387. int amdgpu_gem_debugfs_init(struct amdgpu_device *adev);
  388. /* sub-allocation manager, it has to be protected by another lock.
  389. * By conception this is an helper for other part of the driver
  390. * like the indirect buffer or semaphore, which both have their
  391. * locking.
  392. *
  393. * Principe is simple, we keep a list of sub allocation in offset
  394. * order (first entry has offset == 0, last entry has the highest
  395. * offset).
  396. *
  397. * When allocating new object we first check if there is room at
  398. * the end total_size - (last_object_offset + last_object_size) >=
  399. * alloc_size. If so we allocate new object there.
  400. *
  401. * When there is not enough room at the end, we start waiting for
  402. * each sub object until we reach object_offset+object_size >=
  403. * alloc_size, this object then become the sub object we return.
  404. *
  405. * Alignment can't be bigger than page size.
  406. *
  407. * Hole are not considered for allocation to keep things simple.
  408. * Assumption is that there won't be hole (all object on same
  409. * alignment).
  410. */
  411. #define AMDGPU_SA_NUM_FENCE_LISTS 32
  412. struct amdgpu_sa_manager {
  413. wait_queue_head_t wq;
  414. struct amdgpu_bo *bo;
  415. struct list_head *hole;
  416. struct list_head flist[AMDGPU_SA_NUM_FENCE_LISTS];
  417. struct list_head olist;
  418. unsigned size;
  419. uint64_t gpu_addr;
  420. void *cpu_ptr;
  421. uint32_t domain;
  422. uint32_t align;
  423. };
  424. /* sub-allocation buffer */
  425. struct amdgpu_sa_bo {
  426. struct list_head olist;
  427. struct list_head flist;
  428. struct amdgpu_sa_manager *manager;
  429. unsigned soffset;
  430. unsigned eoffset;
  431. struct dma_fence *fence;
  432. };
  433. /*
  434. * GEM objects.
  435. */
  436. void amdgpu_gem_force_release(struct amdgpu_device *adev);
  437. int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size,
  438. int alignment, u32 initial_domain,
  439. u64 flags, bool kernel,
  440. struct drm_gem_object **obj);
  441. int amdgpu_mode_dumb_create(struct drm_file *file_priv,
  442. struct drm_device *dev,
  443. struct drm_mode_create_dumb *args);
  444. int amdgpu_mode_dumb_mmap(struct drm_file *filp,
  445. struct drm_device *dev,
  446. uint32_t handle, uint64_t *offset_p);
  447. int amdgpu_fence_slab_init(void);
  448. void amdgpu_fence_slab_fini(void);
  449. /*
  450. * GART structures, functions & helpers
  451. */
  452. struct amdgpu_mc;
  453. #define AMDGPU_GPU_PAGE_SIZE 4096
  454. #define AMDGPU_GPU_PAGE_MASK (AMDGPU_GPU_PAGE_SIZE - 1)
  455. #define AMDGPU_GPU_PAGE_SHIFT 12
  456. #define AMDGPU_GPU_PAGE_ALIGN(a) (((a) + AMDGPU_GPU_PAGE_MASK) & ~AMDGPU_GPU_PAGE_MASK)
  457. struct amdgpu_gart {
  458. dma_addr_t table_addr;
  459. struct amdgpu_bo *robj;
  460. void *ptr;
  461. unsigned num_gpu_pages;
  462. unsigned num_cpu_pages;
  463. unsigned table_size;
  464. #ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
  465. struct page **pages;
  466. #endif
  467. bool ready;
  468. /* Asic default pte flags */
  469. uint64_t gart_pte_flags;
  470. const struct amdgpu_gart_funcs *gart_funcs;
  471. };
  472. int amdgpu_gart_table_ram_alloc(struct amdgpu_device *adev);
  473. void amdgpu_gart_table_ram_free(struct amdgpu_device *adev);
  474. int amdgpu_gart_table_vram_alloc(struct amdgpu_device *adev);
  475. void amdgpu_gart_table_vram_free(struct amdgpu_device *adev);
  476. int amdgpu_gart_table_vram_pin(struct amdgpu_device *adev);
  477. void amdgpu_gart_table_vram_unpin(struct amdgpu_device *adev);
  478. int amdgpu_gart_init(struct amdgpu_device *adev);
  479. void amdgpu_gart_fini(struct amdgpu_device *adev);
  480. void amdgpu_gart_unbind(struct amdgpu_device *adev, uint64_t offset,
  481. int pages);
  482. int amdgpu_gart_bind(struct amdgpu_device *adev, uint64_t offset,
  483. int pages, struct page **pagelist,
  484. dma_addr_t *dma_addr, uint64_t flags);
  485. int amdgpu_ttm_recover_gart(struct amdgpu_device *adev);
  486. /*
  487. * GPU MC structures, functions & helpers
  488. */
  489. struct amdgpu_mc {
  490. resource_size_t aper_size;
  491. resource_size_t aper_base;
  492. resource_size_t agp_base;
  493. /* for some chips with <= 32MB we need to lie
  494. * about vram size near mc fb location */
  495. u64 mc_vram_size;
  496. u64 visible_vram_size;
  497. u64 gtt_size;
  498. u64 gtt_start;
  499. u64 gtt_end;
  500. u64 vram_start;
  501. u64 vram_end;
  502. unsigned vram_width;
  503. u64 real_vram_size;
  504. int vram_mtrr;
  505. u64 gtt_base_align;
  506. u64 mc_mask;
  507. const struct firmware *fw; /* MC firmware */
  508. uint32_t fw_version;
  509. struct amdgpu_irq_src vm_fault;
  510. uint32_t vram_type;
  511. uint32_t srbm_soft_reset;
  512. struct amdgpu_mode_mc_save save;
  513. bool prt_warning;
  514. /* apertures */
  515. u64 shared_aperture_start;
  516. u64 shared_aperture_end;
  517. u64 private_aperture_start;
  518. u64 private_aperture_end;
  519. };
  520. /*
  521. * GPU doorbell structures, functions & helpers
  522. */
  523. typedef enum _AMDGPU_DOORBELL_ASSIGNMENT
  524. {
  525. AMDGPU_DOORBELL_KIQ = 0x000,
  526. AMDGPU_DOORBELL_HIQ = 0x001,
  527. AMDGPU_DOORBELL_DIQ = 0x002,
  528. AMDGPU_DOORBELL_MEC_RING0 = 0x010,
  529. AMDGPU_DOORBELL_MEC_RING1 = 0x011,
  530. AMDGPU_DOORBELL_MEC_RING2 = 0x012,
  531. AMDGPU_DOORBELL_MEC_RING3 = 0x013,
  532. AMDGPU_DOORBELL_MEC_RING4 = 0x014,
  533. AMDGPU_DOORBELL_MEC_RING5 = 0x015,
  534. AMDGPU_DOORBELL_MEC_RING6 = 0x016,
  535. AMDGPU_DOORBELL_MEC_RING7 = 0x017,
  536. AMDGPU_DOORBELL_GFX_RING0 = 0x020,
  537. AMDGPU_DOORBELL_sDMA_ENGINE0 = 0x1E0,
  538. AMDGPU_DOORBELL_sDMA_ENGINE1 = 0x1E1,
  539. AMDGPU_DOORBELL_IH = 0x1E8,
  540. AMDGPU_DOORBELL_MAX_ASSIGNMENT = 0x3FF,
  541. AMDGPU_DOORBELL_INVALID = 0xFFFF
  542. } AMDGPU_DOORBELL_ASSIGNMENT;
  543. struct amdgpu_doorbell {
  544. /* doorbell mmio */
  545. resource_size_t base;
  546. resource_size_t size;
  547. u32 __iomem *ptr;
  548. u32 num_doorbells; /* Number of doorbells actually reserved for amdgpu. */
  549. };
  550. void amdgpu_doorbell_get_kfd_info(struct amdgpu_device *adev,
  551. phys_addr_t *aperture_base,
  552. size_t *aperture_size,
  553. size_t *start_offset);
  554. /*
  555. * IRQS.
  556. */
  557. struct amdgpu_flip_work {
  558. struct delayed_work flip_work;
  559. struct work_struct unpin_work;
  560. struct amdgpu_device *adev;
  561. int crtc_id;
  562. u32 target_vblank;
  563. uint64_t base;
  564. struct drm_pending_vblank_event *event;
  565. struct amdgpu_bo *old_abo;
  566. struct dma_fence *excl;
  567. unsigned shared_count;
  568. struct dma_fence **shared;
  569. struct dma_fence_cb cb;
  570. bool async;
  571. };
  572. /*
  573. * CP & rings.
  574. */
  575. struct amdgpu_ib {
  576. struct amdgpu_sa_bo *sa_bo;
  577. uint32_t length_dw;
  578. uint64_t gpu_addr;
  579. uint32_t *ptr;
  580. uint32_t flags;
  581. };
  582. extern const struct amd_sched_backend_ops amdgpu_sched_ops;
  583. int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs,
  584. struct amdgpu_job **job, struct amdgpu_vm *vm);
  585. int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, unsigned size,
  586. struct amdgpu_job **job);
  587. void amdgpu_job_free_resources(struct amdgpu_job *job);
  588. void amdgpu_job_free(struct amdgpu_job *job);
  589. int amdgpu_job_submit(struct amdgpu_job *job, struct amdgpu_ring *ring,
  590. struct amd_sched_entity *entity, void *owner,
  591. struct dma_fence **f);
  592. /*
  593. * context related structures
  594. */
  595. struct amdgpu_ctx_ring {
  596. uint64_t sequence;
  597. struct dma_fence **fences;
  598. struct amd_sched_entity entity;
  599. };
  600. struct amdgpu_ctx {
  601. struct kref refcount;
  602. struct amdgpu_device *adev;
  603. unsigned reset_counter;
  604. spinlock_t ring_lock;
  605. struct dma_fence **fences;
  606. struct amdgpu_ctx_ring rings[AMDGPU_MAX_RINGS];
  607. bool preamble_presented;
  608. };
  609. struct amdgpu_ctx_mgr {
  610. struct amdgpu_device *adev;
  611. struct mutex lock;
  612. /* protected by lock */
  613. struct idr ctx_handles;
  614. };
  615. struct amdgpu_ctx *amdgpu_ctx_get(struct amdgpu_fpriv *fpriv, uint32_t id);
  616. int amdgpu_ctx_put(struct amdgpu_ctx *ctx);
  617. uint64_t amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, struct amdgpu_ring *ring,
  618. struct dma_fence *fence);
  619. struct dma_fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx,
  620. struct amdgpu_ring *ring, uint64_t seq);
  621. int amdgpu_ctx_ioctl(struct drm_device *dev, void *data,
  622. struct drm_file *filp);
  623. void amdgpu_ctx_mgr_init(struct amdgpu_ctx_mgr *mgr);
  624. void amdgpu_ctx_mgr_fini(struct amdgpu_ctx_mgr *mgr);
  625. /*
  626. * file private structure
  627. */
  628. struct amdgpu_fpriv {
  629. struct amdgpu_vm vm;
  630. struct amdgpu_bo_va *prt_va;
  631. struct mutex bo_list_lock;
  632. struct idr bo_list_handles;
  633. struct amdgpu_ctx_mgr ctx_mgr;
  634. };
  635. /*
  636. * residency list
  637. */
  638. struct amdgpu_bo_list {
  639. struct mutex lock;
  640. struct amdgpu_bo *gds_obj;
  641. struct amdgpu_bo *gws_obj;
  642. struct amdgpu_bo *oa_obj;
  643. unsigned first_userptr;
  644. unsigned num_entries;
  645. struct amdgpu_bo_list_entry *array;
  646. };
  647. struct amdgpu_bo_list *
  648. amdgpu_bo_list_get(struct amdgpu_fpriv *fpriv, int id);
  649. void amdgpu_bo_list_get_list(struct amdgpu_bo_list *list,
  650. struct list_head *validated);
  651. void amdgpu_bo_list_put(struct amdgpu_bo_list *list);
  652. void amdgpu_bo_list_free(struct amdgpu_bo_list *list);
  653. /*
  654. * GFX stuff
  655. */
  656. #include "clearstate_defs.h"
  657. struct amdgpu_rlc_funcs {
  658. void (*enter_safe_mode)(struct amdgpu_device *adev);
  659. void (*exit_safe_mode)(struct amdgpu_device *adev);
  660. };
  661. struct amdgpu_rlc {
  662. /* for power gating */
  663. struct amdgpu_bo *save_restore_obj;
  664. uint64_t save_restore_gpu_addr;
  665. volatile uint32_t *sr_ptr;
  666. const u32 *reg_list;
  667. u32 reg_list_size;
  668. /* for clear state */
  669. struct amdgpu_bo *clear_state_obj;
  670. uint64_t clear_state_gpu_addr;
  671. volatile uint32_t *cs_ptr;
  672. const struct cs_section_def *cs_data;
  673. u32 clear_state_size;
  674. /* for cp tables */
  675. struct amdgpu_bo *cp_table_obj;
  676. uint64_t cp_table_gpu_addr;
  677. volatile uint32_t *cp_table_ptr;
  678. u32 cp_table_size;
  679. /* safe mode for updating CG/PG state */
  680. bool in_safe_mode;
  681. const struct amdgpu_rlc_funcs *funcs;
  682. /* for firmware data */
  683. u32 save_and_restore_offset;
  684. u32 clear_state_descriptor_offset;
  685. u32 avail_scratch_ram_locations;
  686. u32 reg_restore_list_size;
  687. u32 reg_list_format_start;
  688. u32 reg_list_format_separate_start;
  689. u32 starting_offsets_start;
  690. u32 reg_list_format_size_bytes;
  691. u32 reg_list_size_bytes;
  692. u32 *register_list_format;
  693. u32 *register_restore;
  694. };
  695. struct amdgpu_mec {
  696. struct amdgpu_bo *hpd_eop_obj;
  697. u64 hpd_eop_gpu_addr;
  698. u32 num_pipe;
  699. u32 num_mec;
  700. u32 num_queue;
  701. void *mqd_backup[AMDGPU_MAX_COMPUTE_RINGS + 1];
  702. };
  703. struct amdgpu_kiq {
  704. u64 eop_gpu_addr;
  705. struct amdgpu_bo *eop_obj;
  706. struct amdgpu_ring ring;
  707. struct amdgpu_irq_src irq;
  708. };
  709. /*
  710. * GPU scratch registers structures, functions & helpers
  711. */
  712. struct amdgpu_scratch {
  713. unsigned num_reg;
  714. uint32_t reg_base;
  715. uint32_t free_mask;
  716. };
  717. /*
  718. * GFX configurations
  719. */
  720. #define AMDGPU_GFX_MAX_SE 4
  721. #define AMDGPU_GFX_MAX_SH_PER_SE 2
  722. struct amdgpu_rb_config {
  723. uint32_t rb_backend_disable;
  724. uint32_t user_rb_backend_disable;
  725. uint32_t raster_config;
  726. uint32_t raster_config_1;
  727. };
  728. struct amdgpu_gfx_config {
  729. unsigned max_shader_engines;
  730. unsigned max_tile_pipes;
  731. unsigned max_cu_per_sh;
  732. unsigned max_sh_per_se;
  733. unsigned max_backends_per_se;
  734. unsigned max_texture_channel_caches;
  735. unsigned max_gprs;
  736. unsigned max_gs_threads;
  737. unsigned max_hw_contexts;
  738. unsigned sc_prim_fifo_size_frontend;
  739. unsigned sc_prim_fifo_size_backend;
  740. unsigned sc_hiz_tile_fifo_size;
  741. unsigned sc_earlyz_tile_fifo_size;
  742. unsigned num_tile_pipes;
  743. unsigned backend_enable_mask;
  744. unsigned mem_max_burst_length_bytes;
  745. unsigned mem_row_size_in_kb;
  746. unsigned shader_engine_tile_size;
  747. unsigned num_gpus;
  748. unsigned multi_gpu_tile_size;
  749. unsigned mc_arb_ramcfg;
  750. unsigned gb_addr_config;
  751. unsigned num_rbs;
  752. uint32_t tile_mode_array[32];
  753. uint32_t macrotile_mode_array[16];
  754. struct amdgpu_rb_config rb_config[AMDGPU_GFX_MAX_SE][AMDGPU_GFX_MAX_SH_PER_SE];
  755. /* gfx configure feature */
  756. uint32_t double_offchip_lds_buf;
  757. };
  758. struct amdgpu_cu_info {
  759. uint32_t number; /* total active CU number */
  760. uint32_t ao_cu_mask;
  761. uint32_t bitmap[4][4];
  762. };
  763. struct amdgpu_gfx_funcs {
  764. /* get the gpu clock counter */
  765. uint64_t (*get_gpu_clock_counter)(struct amdgpu_device *adev);
  766. void (*select_se_sh)(struct amdgpu_device *adev, u32 se_num, u32 sh_num, u32 instance);
  767. void (*read_wave_data)(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t *dst, int *no_fields);
  768. void (*read_wave_vgprs)(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t thread, uint32_t start, uint32_t size, uint32_t *dst);
  769. void (*read_wave_sgprs)(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t start, uint32_t size, uint32_t *dst);
  770. };
  771. struct amdgpu_gfx {
  772. struct mutex gpu_clock_mutex;
  773. struct amdgpu_gfx_config config;
  774. struct amdgpu_rlc rlc;
  775. struct amdgpu_mec mec;
  776. struct amdgpu_kiq kiq;
  777. struct amdgpu_scratch scratch;
  778. const struct firmware *me_fw; /* ME firmware */
  779. uint32_t me_fw_version;
  780. const struct firmware *pfp_fw; /* PFP firmware */
  781. uint32_t pfp_fw_version;
  782. const struct firmware *ce_fw; /* CE firmware */
  783. uint32_t ce_fw_version;
  784. const struct firmware *rlc_fw; /* RLC firmware */
  785. uint32_t rlc_fw_version;
  786. const struct firmware *mec_fw; /* MEC firmware */
  787. uint32_t mec_fw_version;
  788. const struct firmware *mec2_fw; /* MEC2 firmware */
  789. uint32_t mec2_fw_version;
  790. uint32_t me_feature_version;
  791. uint32_t ce_feature_version;
  792. uint32_t pfp_feature_version;
  793. uint32_t rlc_feature_version;
  794. uint32_t mec_feature_version;
  795. uint32_t mec2_feature_version;
  796. struct amdgpu_ring gfx_ring[AMDGPU_MAX_GFX_RINGS];
  797. unsigned num_gfx_rings;
  798. struct amdgpu_ring compute_ring[AMDGPU_MAX_COMPUTE_RINGS];
  799. unsigned num_compute_rings;
  800. struct amdgpu_irq_src eop_irq;
  801. struct amdgpu_irq_src priv_reg_irq;
  802. struct amdgpu_irq_src priv_inst_irq;
  803. /* gfx status */
  804. uint32_t gfx_current_status;
  805. /* ce ram size*/
  806. unsigned ce_ram_size;
  807. struct amdgpu_cu_info cu_info;
  808. const struct amdgpu_gfx_funcs *funcs;
  809. /* reset mask */
  810. uint32_t grbm_soft_reset;
  811. uint32_t srbm_soft_reset;
  812. bool in_reset;
  813. };
  814. int amdgpu_ib_get(struct amdgpu_device *adev, struct amdgpu_vm *vm,
  815. unsigned size, struct amdgpu_ib *ib);
  816. void amdgpu_ib_free(struct amdgpu_device *adev, struct amdgpu_ib *ib,
  817. struct dma_fence *f);
  818. int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
  819. struct amdgpu_ib *ibs, struct amdgpu_job *job,
  820. struct dma_fence **f);
  821. int amdgpu_ib_pool_init(struct amdgpu_device *adev);
  822. void amdgpu_ib_pool_fini(struct amdgpu_device *adev);
  823. int amdgpu_ib_ring_tests(struct amdgpu_device *adev);
  824. /*
  825. * CS.
  826. */
  827. struct amdgpu_cs_chunk {
  828. uint32_t chunk_id;
  829. uint32_t length_dw;
  830. void *kdata;
  831. };
  832. struct amdgpu_cs_parser {
  833. struct amdgpu_device *adev;
  834. struct drm_file *filp;
  835. struct amdgpu_ctx *ctx;
  836. /* chunks */
  837. unsigned nchunks;
  838. struct amdgpu_cs_chunk *chunks;
  839. /* scheduler job object */
  840. struct amdgpu_job *job;
  841. /* buffer objects */
  842. struct ww_acquire_ctx ticket;
  843. struct amdgpu_bo_list *bo_list;
  844. struct amdgpu_bo_list_entry vm_pd;
  845. struct list_head validated;
  846. struct dma_fence *fence;
  847. uint64_t bytes_moved_threshold;
  848. uint64_t bytes_moved;
  849. struct amdgpu_bo_list_entry *evictable;
  850. /* user fence */
  851. struct amdgpu_bo_list_entry uf_entry;
  852. };
  853. #define AMDGPU_PREAMBLE_IB_PRESENT (1 << 0) /* bit set means command submit involves a preamble IB */
  854. #define AMDGPU_PREAMBLE_IB_PRESENT_FIRST (1 << 1) /* bit set means preamble IB is first presented in belonging context */
  855. #define AMDGPU_HAVE_CTX_SWITCH (1 << 2) /* bit set means context switch occured */
  856. #define AMDGPU_VM_DOMAIN (1 << 3) /* bit set means in virtual memory context */
  857. struct amdgpu_job {
  858. struct amd_sched_job base;
  859. struct amdgpu_device *adev;
  860. struct amdgpu_vm *vm;
  861. struct amdgpu_ring *ring;
  862. struct amdgpu_sync sync;
  863. struct amdgpu_ib *ibs;
  864. struct dma_fence *fence; /* the hw fence */
  865. uint32_t preamble_status;
  866. uint32_t num_ibs;
  867. void *owner;
  868. uint64_t fence_ctx; /* the fence_context this job uses */
  869. bool vm_needs_flush;
  870. unsigned vm_id;
  871. uint64_t vm_pd_addr;
  872. uint32_t gds_base, gds_size;
  873. uint32_t gws_base, gws_size;
  874. uint32_t oa_base, oa_size;
  875. /* user fence handling */
  876. uint64_t uf_addr;
  877. uint64_t uf_sequence;
  878. };
  879. #define to_amdgpu_job(sched_job) \
  880. container_of((sched_job), struct amdgpu_job, base)
  881. static inline u32 amdgpu_get_ib_value(struct amdgpu_cs_parser *p,
  882. uint32_t ib_idx, int idx)
  883. {
  884. return p->job->ibs[ib_idx].ptr[idx];
  885. }
  886. static inline void amdgpu_set_ib_value(struct amdgpu_cs_parser *p,
  887. uint32_t ib_idx, int idx,
  888. uint32_t value)
  889. {
  890. p->job->ibs[ib_idx].ptr[idx] = value;
  891. }
  892. /*
  893. * Writeback
  894. */
  895. #define AMDGPU_MAX_WB 1024 /* Reserve at most 1024 WB slots for amdgpu-owned rings. */
  896. struct amdgpu_wb {
  897. struct amdgpu_bo *wb_obj;
  898. volatile uint32_t *wb;
  899. uint64_t gpu_addr;
  900. u32 num_wb; /* Number of wb slots actually reserved for amdgpu. */
  901. unsigned long used[DIV_ROUND_UP(AMDGPU_MAX_WB, BITS_PER_LONG)];
  902. };
  903. int amdgpu_wb_get(struct amdgpu_device *adev, u32 *wb);
  904. void amdgpu_wb_free(struct amdgpu_device *adev, u32 wb);
  905. int amdgpu_wb_get_64bit(struct amdgpu_device *adev, u32 *wb);
  906. void amdgpu_wb_free_64bit(struct amdgpu_device *adev, u32 wb);
  907. void amdgpu_get_pcie_info(struct amdgpu_device *adev);
  908. /*
  909. * VCE
  910. */
  911. #define AMDGPU_MAX_VCE_HANDLES 16
  912. #define AMDGPU_VCE_FIRMWARE_OFFSET 256
  913. #define AMDGPU_VCE_HARVEST_VCE0 (1 << 0)
  914. #define AMDGPU_VCE_HARVEST_VCE1 (1 << 1)
  915. struct amdgpu_vce {
  916. struct amdgpu_bo *vcpu_bo;
  917. uint64_t gpu_addr;
  918. unsigned fw_version;
  919. unsigned fb_version;
  920. atomic_t handles[AMDGPU_MAX_VCE_HANDLES];
  921. struct drm_file *filp[AMDGPU_MAX_VCE_HANDLES];
  922. uint32_t img_size[AMDGPU_MAX_VCE_HANDLES];
  923. struct delayed_work idle_work;
  924. struct mutex idle_mutex;
  925. const struct firmware *fw; /* VCE firmware */
  926. struct amdgpu_ring ring[AMDGPU_MAX_VCE_RINGS];
  927. struct amdgpu_irq_src irq;
  928. unsigned harvest_config;
  929. struct amd_sched_entity entity;
  930. uint32_t srbm_soft_reset;
  931. unsigned num_rings;
  932. };
  933. /*
  934. * SDMA
  935. */
  936. struct amdgpu_sdma_instance {
  937. /* SDMA firmware */
  938. const struct firmware *fw;
  939. uint32_t fw_version;
  940. uint32_t feature_version;
  941. struct amdgpu_ring ring;
  942. bool burst_nop;
  943. };
  944. struct amdgpu_sdma {
  945. struct amdgpu_sdma_instance instance[AMDGPU_MAX_SDMA_INSTANCES];
  946. #ifdef CONFIG_DRM_AMDGPU_SI
  947. //SI DMA has a difference trap irq number for the second engine
  948. struct amdgpu_irq_src trap_irq_1;
  949. #endif
  950. struct amdgpu_irq_src trap_irq;
  951. struct amdgpu_irq_src illegal_inst_irq;
  952. int num_instances;
  953. uint32_t srbm_soft_reset;
  954. };
  955. /*
  956. * Firmware
  957. */
  958. struct amdgpu_firmware {
  959. struct amdgpu_firmware_info ucode[AMDGPU_UCODE_ID_MAXIMUM];
  960. bool smu_load;
  961. struct amdgpu_bo *fw_buf;
  962. unsigned int fw_size;
  963. };
  964. /*
  965. * Benchmarking
  966. */
  967. void amdgpu_benchmark(struct amdgpu_device *adev, int test_number);
  968. /*
  969. * Testing
  970. */
  971. void amdgpu_test_moves(struct amdgpu_device *adev);
  972. /*
  973. * MMU Notifier
  974. */
  975. #if defined(CONFIG_MMU_NOTIFIER)
  976. int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr);
  977. void amdgpu_mn_unregister(struct amdgpu_bo *bo);
  978. #else
  979. static inline int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr)
  980. {
  981. return -ENODEV;
  982. }
  983. static inline void amdgpu_mn_unregister(struct amdgpu_bo *bo) {}
  984. #endif
  985. /*
  986. * Debugfs
  987. */
  988. struct amdgpu_debugfs {
  989. const struct drm_info_list *files;
  990. unsigned num_files;
  991. };
  992. int amdgpu_debugfs_add_files(struct amdgpu_device *adev,
  993. const struct drm_info_list *files,
  994. unsigned nfiles);
  995. int amdgpu_debugfs_fence_init(struct amdgpu_device *adev);
  996. #if defined(CONFIG_DEBUG_FS)
  997. int amdgpu_debugfs_init(struct drm_minor *minor);
  998. #endif
  999. int amdgpu_debugfs_firmware_init(struct amdgpu_device *adev);
  1000. /*
  1001. * amdgpu smumgr functions
  1002. */
  1003. struct amdgpu_smumgr_funcs {
  1004. int (*check_fw_load_finish)(struct amdgpu_device *adev, uint32_t fwtype);
  1005. int (*request_smu_load_fw)(struct amdgpu_device *adev);
  1006. int (*request_smu_specific_fw)(struct amdgpu_device *adev, uint32_t fwtype);
  1007. };
  1008. /*
  1009. * amdgpu smumgr
  1010. */
  1011. struct amdgpu_smumgr {
  1012. struct amdgpu_bo *toc_buf;
  1013. struct amdgpu_bo *smu_buf;
  1014. /* asic priv smu data */
  1015. void *priv;
  1016. spinlock_t smu_lock;
  1017. /* smumgr functions */
  1018. const struct amdgpu_smumgr_funcs *smumgr_funcs;
  1019. /* ucode loading complete flag */
  1020. uint32_t fw_flags;
  1021. };
  1022. /*
  1023. * ASIC specific register table accessible by UMD
  1024. */
  1025. struct amdgpu_allowed_register_entry {
  1026. uint32_t reg_offset;
  1027. bool untouched;
  1028. bool grbm_indexed;
  1029. };
  1030. /*
  1031. * ASIC specific functions.
  1032. */
  1033. struct amdgpu_asic_funcs {
  1034. bool (*read_disabled_bios)(struct amdgpu_device *adev);
  1035. bool (*read_bios_from_rom)(struct amdgpu_device *adev,
  1036. u8 *bios, u32 length_bytes);
  1037. int (*read_register)(struct amdgpu_device *adev, u32 se_num,
  1038. u32 sh_num, u32 reg_offset, u32 *value);
  1039. void (*set_vga_state)(struct amdgpu_device *adev, bool state);
  1040. int (*reset)(struct amdgpu_device *adev);
  1041. /* get the reference clock */
  1042. u32 (*get_xclk)(struct amdgpu_device *adev);
  1043. /* MM block clocks */
  1044. int (*set_uvd_clocks)(struct amdgpu_device *adev, u32 vclk, u32 dclk);
  1045. int (*set_vce_clocks)(struct amdgpu_device *adev, u32 evclk, u32 ecclk);
  1046. /* static power management */
  1047. int (*get_pcie_lanes)(struct amdgpu_device *adev);
  1048. void (*set_pcie_lanes)(struct amdgpu_device *adev, int lanes);
  1049. /* get config memsize register */
  1050. u32 (*get_config_memsize)(struct amdgpu_device *adev);
  1051. };
  1052. /*
  1053. * IOCTL.
  1054. */
  1055. int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data,
  1056. struct drm_file *filp);
  1057. int amdgpu_bo_list_ioctl(struct drm_device *dev, void *data,
  1058. struct drm_file *filp);
  1059. int amdgpu_gem_info_ioctl(struct drm_device *dev, void *data,
  1060. struct drm_file *filp);
  1061. int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data,
  1062. struct drm_file *filp);
  1063. int amdgpu_gem_mmap_ioctl(struct drm_device *dev, void *data,
  1064. struct drm_file *filp);
  1065. int amdgpu_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
  1066. struct drm_file *filp);
  1067. int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
  1068. struct drm_file *filp);
  1069. int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data,
  1070. struct drm_file *filp);
  1071. int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp);
  1072. int amdgpu_cs_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *filp);
  1073. int amdgpu_cs_wait_fences_ioctl(struct drm_device *dev, void *data,
  1074. struct drm_file *filp);
  1075. int amdgpu_gem_metadata_ioctl(struct drm_device *dev, void *data,
  1076. struct drm_file *filp);
  1077. /* VRAM scratch page for HDP bug, default vram page */
  1078. struct amdgpu_vram_scratch {
  1079. struct amdgpu_bo *robj;
  1080. volatile uint32_t *ptr;
  1081. u64 gpu_addr;
  1082. };
  1083. /*
  1084. * ACPI
  1085. */
  1086. struct amdgpu_atif_notification_cfg {
  1087. bool enabled;
  1088. int command_code;
  1089. };
  1090. struct amdgpu_atif_notifications {
  1091. bool display_switch;
  1092. bool expansion_mode_change;
  1093. bool thermal_state;
  1094. bool forced_power_state;
  1095. bool system_power_state;
  1096. bool display_conf_change;
  1097. bool px_gfx_switch;
  1098. bool brightness_change;
  1099. bool dgpu_display_event;
  1100. };
  1101. struct amdgpu_atif_functions {
  1102. bool system_params;
  1103. bool sbios_requests;
  1104. bool select_active_disp;
  1105. bool lid_state;
  1106. bool get_tv_standard;
  1107. bool set_tv_standard;
  1108. bool get_panel_expansion_mode;
  1109. bool set_panel_expansion_mode;
  1110. bool temperature_change;
  1111. bool graphics_device_types;
  1112. };
  1113. struct amdgpu_atif {
  1114. struct amdgpu_atif_notifications notifications;
  1115. struct amdgpu_atif_functions functions;
  1116. struct amdgpu_atif_notification_cfg notification_cfg;
  1117. struct amdgpu_encoder *encoder_for_bl;
  1118. };
  1119. struct amdgpu_atcs_functions {
  1120. bool get_ext_state;
  1121. bool pcie_perf_req;
  1122. bool pcie_dev_rdy;
  1123. bool pcie_bus_width;
  1124. };
  1125. struct amdgpu_atcs {
  1126. struct amdgpu_atcs_functions functions;
  1127. };
  1128. /*
  1129. * CGS
  1130. */
  1131. struct cgs_device *amdgpu_cgs_create_device(struct amdgpu_device *adev);
  1132. void amdgpu_cgs_destroy_device(struct cgs_device *cgs_device);
  1133. /*
  1134. * Core structure, functions and helpers.
  1135. */
  1136. typedef uint32_t (*amdgpu_rreg_t)(struct amdgpu_device*, uint32_t);
  1137. typedef void (*amdgpu_wreg_t)(struct amdgpu_device*, uint32_t, uint32_t);
  1138. typedef uint32_t (*amdgpu_block_rreg_t)(struct amdgpu_device*, uint32_t, uint32_t);
  1139. typedef void (*amdgpu_block_wreg_t)(struct amdgpu_device*, uint32_t, uint32_t, uint32_t);
  1140. struct amdgpu_device {
  1141. struct device *dev;
  1142. struct drm_device *ddev;
  1143. struct pci_dev *pdev;
  1144. #ifdef CONFIG_DRM_AMD_ACP
  1145. struct amdgpu_acp acp;
  1146. #endif
  1147. /* ASIC */
  1148. enum amd_asic_type asic_type;
  1149. uint32_t family;
  1150. uint32_t rev_id;
  1151. uint32_t external_rev_id;
  1152. unsigned long flags;
  1153. int usec_timeout;
  1154. const struct amdgpu_asic_funcs *asic_funcs;
  1155. bool shutdown;
  1156. bool need_dma32;
  1157. bool accel_working;
  1158. struct work_struct reset_work;
  1159. struct notifier_block acpi_nb;
  1160. struct amdgpu_i2c_chan *i2c_bus[AMDGPU_MAX_I2C_BUS];
  1161. struct amdgpu_debugfs debugfs[AMDGPU_DEBUGFS_MAX_COMPONENTS];
  1162. unsigned debugfs_count;
  1163. #if defined(CONFIG_DEBUG_FS)
  1164. struct dentry *debugfs_regs[AMDGPU_DEBUGFS_MAX_COMPONENTS];
  1165. #endif
  1166. struct amdgpu_atif atif;
  1167. struct amdgpu_atcs atcs;
  1168. struct mutex srbm_mutex;
  1169. /* GRBM index mutex. Protects concurrent access to GRBM index */
  1170. struct mutex grbm_idx_mutex;
  1171. struct dev_pm_domain vga_pm_domain;
  1172. bool have_disp_power_ref;
  1173. /* BIOS */
  1174. uint8_t *bios;
  1175. uint32_t bios_size;
  1176. struct amdgpu_bo *stollen_vga_memory;
  1177. uint32_t bios_scratch[AMDGPU_BIOS_NUM_SCRATCH];
  1178. /* Register/doorbell mmio */
  1179. resource_size_t rmmio_base;
  1180. resource_size_t rmmio_size;
  1181. void __iomem *rmmio;
  1182. /* protects concurrent MM_INDEX/DATA based register access */
  1183. spinlock_t mmio_idx_lock;
  1184. /* protects concurrent SMC based register access */
  1185. spinlock_t smc_idx_lock;
  1186. amdgpu_rreg_t smc_rreg;
  1187. amdgpu_wreg_t smc_wreg;
  1188. /* protects concurrent PCIE register access */
  1189. spinlock_t pcie_idx_lock;
  1190. amdgpu_rreg_t pcie_rreg;
  1191. amdgpu_wreg_t pcie_wreg;
  1192. amdgpu_rreg_t pciep_rreg;
  1193. amdgpu_wreg_t pciep_wreg;
  1194. /* protects concurrent UVD register access */
  1195. spinlock_t uvd_ctx_idx_lock;
  1196. amdgpu_rreg_t uvd_ctx_rreg;
  1197. amdgpu_wreg_t uvd_ctx_wreg;
  1198. /* protects concurrent DIDT register access */
  1199. spinlock_t didt_idx_lock;
  1200. amdgpu_rreg_t didt_rreg;
  1201. amdgpu_wreg_t didt_wreg;
  1202. /* protects concurrent gc_cac register access */
  1203. spinlock_t gc_cac_idx_lock;
  1204. amdgpu_rreg_t gc_cac_rreg;
  1205. amdgpu_wreg_t gc_cac_wreg;
  1206. /* protects concurrent ENDPOINT (audio) register access */
  1207. spinlock_t audio_endpt_idx_lock;
  1208. amdgpu_block_rreg_t audio_endpt_rreg;
  1209. amdgpu_block_wreg_t audio_endpt_wreg;
  1210. void __iomem *rio_mem;
  1211. resource_size_t rio_mem_size;
  1212. struct amdgpu_doorbell doorbell;
  1213. /* clock/pll info */
  1214. struct amdgpu_clock clock;
  1215. /* MC */
  1216. struct amdgpu_mc mc;
  1217. struct amdgpu_gart gart;
  1218. struct amdgpu_dummy_page dummy_page;
  1219. struct amdgpu_vm_manager vm_manager;
  1220. /* memory management */
  1221. struct amdgpu_mman mman;
  1222. struct amdgpu_vram_scratch vram_scratch;
  1223. struct amdgpu_wb wb;
  1224. atomic64_t vram_usage;
  1225. atomic64_t vram_vis_usage;
  1226. atomic64_t gtt_usage;
  1227. atomic64_t num_bytes_moved;
  1228. atomic64_t num_evictions;
  1229. atomic_t gpu_reset_counter;
  1230. /* data for buffer migration throttling */
  1231. struct {
  1232. spinlock_t lock;
  1233. s64 last_update_us;
  1234. s64 accum_us; /* accumulated microseconds */
  1235. u32 log2_max_MBps;
  1236. } mm_stats;
  1237. /* display */
  1238. bool enable_virtual_display;
  1239. struct amdgpu_mode_info mode_info;
  1240. struct work_struct hotplug_work;
  1241. struct amdgpu_irq_src crtc_irq;
  1242. struct amdgpu_irq_src pageflip_irq;
  1243. struct amdgpu_irq_src hpd_irq;
  1244. /* rings */
  1245. u64 fence_context;
  1246. unsigned num_rings;
  1247. struct amdgpu_ring *rings[AMDGPU_MAX_RINGS];
  1248. bool ib_pool_ready;
  1249. struct amdgpu_sa_manager ring_tmp_bo;
  1250. /* interrupts */
  1251. struct amdgpu_irq irq;
  1252. /* powerplay */
  1253. struct amd_powerplay powerplay;
  1254. bool pp_enabled;
  1255. bool pp_force_state_enabled;
  1256. /* dpm */
  1257. struct amdgpu_pm pm;
  1258. u32 cg_flags;
  1259. u32 pg_flags;
  1260. /* amdgpu smumgr */
  1261. struct amdgpu_smumgr smu;
  1262. /* gfx */
  1263. struct amdgpu_gfx gfx;
  1264. /* sdma */
  1265. struct amdgpu_sdma sdma;
  1266. /* uvd */
  1267. struct amdgpu_uvd uvd;
  1268. /* vce */
  1269. struct amdgpu_vce vce;
  1270. /* firmwares */
  1271. struct amdgpu_firmware firmware;
  1272. /* GDS */
  1273. struct amdgpu_gds gds;
  1274. struct amdgpu_ip_block ip_blocks[AMDGPU_MAX_IP_NUM];
  1275. int num_ip_blocks;
  1276. struct mutex mn_lock;
  1277. DECLARE_HASHTABLE(mn_hash, 7);
  1278. /* tracking pinned memory */
  1279. u64 vram_pin_size;
  1280. u64 invisible_pin_size;
  1281. u64 gart_pin_size;
  1282. /* amdkfd interface */
  1283. struct kfd_dev *kfd;
  1284. struct amdgpu_virt virt;
  1285. /* link all shadow bo */
  1286. struct list_head shadow_list;
  1287. struct mutex shadow_list_lock;
  1288. /* link all gtt */
  1289. spinlock_t gtt_list_lock;
  1290. struct list_head gtt_list;
  1291. /* record hw reset is performed */
  1292. bool has_hw_reset;
  1293. };
  1294. static inline struct amdgpu_device *amdgpu_ttm_adev(struct ttm_bo_device *bdev)
  1295. {
  1296. return container_of(bdev, struct amdgpu_device, mman.bdev);
  1297. }
  1298. bool amdgpu_device_is_px(struct drm_device *dev);
  1299. int amdgpu_device_init(struct amdgpu_device *adev,
  1300. struct drm_device *ddev,
  1301. struct pci_dev *pdev,
  1302. uint32_t flags);
  1303. void amdgpu_device_fini(struct amdgpu_device *adev);
  1304. int amdgpu_gpu_wait_for_idle(struct amdgpu_device *adev);
  1305. uint32_t amdgpu_mm_rreg(struct amdgpu_device *adev, uint32_t reg,
  1306. uint32_t acc_flags);
  1307. void amdgpu_mm_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v,
  1308. uint32_t acc_flags);
  1309. u32 amdgpu_io_rreg(struct amdgpu_device *adev, u32 reg);
  1310. void amdgpu_io_wreg(struct amdgpu_device *adev, u32 reg, u32 v);
  1311. u32 amdgpu_mm_rdoorbell(struct amdgpu_device *adev, u32 index);
  1312. void amdgpu_mm_wdoorbell(struct amdgpu_device *adev, u32 index, u32 v);
  1313. u64 amdgpu_mm_rdoorbell64(struct amdgpu_device *adev, u32 index);
  1314. void amdgpu_mm_wdoorbell64(struct amdgpu_device *adev, u32 index, u64 v);
  1315. /*
  1316. * Registers read & write functions.
  1317. */
  1318. #define AMDGPU_REGS_IDX (1<<0)
  1319. #define AMDGPU_REGS_NO_KIQ (1<<1)
  1320. #define RREG32_NO_KIQ(reg) amdgpu_mm_rreg(adev, (reg), AMDGPU_REGS_NO_KIQ)
  1321. #define WREG32_NO_KIQ(reg, v) amdgpu_mm_wreg(adev, (reg), (v), AMDGPU_REGS_NO_KIQ)
  1322. #define RREG32(reg) amdgpu_mm_rreg(adev, (reg), 0)
  1323. #define RREG32_IDX(reg) amdgpu_mm_rreg(adev, (reg), AMDGPU_REGS_IDX)
  1324. #define DREG32(reg) printk(KERN_INFO "REGISTER: " #reg " : 0x%08X\n", amdgpu_mm_rreg(adev, (reg), 0))
  1325. #define WREG32(reg, v) amdgpu_mm_wreg(adev, (reg), (v), 0)
  1326. #define WREG32_IDX(reg, v) amdgpu_mm_wreg(adev, (reg), (v), AMDGPU_REGS_IDX)
  1327. #define REG_SET(FIELD, v) (((v) << FIELD##_SHIFT) & FIELD##_MASK)
  1328. #define REG_GET(FIELD, v) (((v) << FIELD##_SHIFT) & FIELD##_MASK)
  1329. #define RREG32_PCIE(reg) adev->pcie_rreg(adev, (reg))
  1330. #define WREG32_PCIE(reg, v) adev->pcie_wreg(adev, (reg), (v))
  1331. #define RREG32_PCIE_PORT(reg) adev->pciep_rreg(adev, (reg))
  1332. #define WREG32_PCIE_PORT(reg, v) adev->pciep_wreg(adev, (reg), (v))
  1333. #define RREG32_SMC(reg) adev->smc_rreg(adev, (reg))
  1334. #define WREG32_SMC(reg, v) adev->smc_wreg(adev, (reg), (v))
  1335. #define RREG32_UVD_CTX(reg) adev->uvd_ctx_rreg(adev, (reg))
  1336. #define WREG32_UVD_CTX(reg, v) adev->uvd_ctx_wreg(adev, (reg), (v))
  1337. #define RREG32_DIDT(reg) adev->didt_rreg(adev, (reg))
  1338. #define WREG32_DIDT(reg, v) adev->didt_wreg(adev, (reg), (v))
  1339. #define RREG32_GC_CAC(reg) adev->gc_cac_rreg(adev, (reg))
  1340. #define WREG32_GC_CAC(reg, v) adev->gc_cac_wreg(adev, (reg), (v))
  1341. #define RREG32_AUDIO_ENDPT(block, reg) adev->audio_endpt_rreg(adev, (block), (reg))
  1342. #define WREG32_AUDIO_ENDPT(block, reg, v) adev->audio_endpt_wreg(adev, (block), (reg), (v))
  1343. #define WREG32_P(reg, val, mask) \
  1344. do { \
  1345. uint32_t tmp_ = RREG32(reg); \
  1346. tmp_ &= (mask); \
  1347. tmp_ |= ((val) & ~(mask)); \
  1348. WREG32(reg, tmp_); \
  1349. } while (0)
  1350. #define WREG32_AND(reg, and) WREG32_P(reg, 0, and)
  1351. #define WREG32_OR(reg, or) WREG32_P(reg, or, ~(or))
  1352. #define WREG32_PLL_P(reg, val, mask) \
  1353. do { \
  1354. uint32_t tmp_ = RREG32_PLL(reg); \
  1355. tmp_ &= (mask); \
  1356. tmp_ |= ((val) & ~(mask)); \
  1357. WREG32_PLL(reg, tmp_); \
  1358. } while (0)
  1359. #define DREG32_SYS(sqf, adev, reg) seq_printf((sqf), #reg " : 0x%08X\n", amdgpu_mm_rreg((adev), (reg), false))
  1360. #define RREG32_IO(reg) amdgpu_io_rreg(adev, (reg))
  1361. #define WREG32_IO(reg, v) amdgpu_io_wreg(adev, (reg), (v))
  1362. #define RDOORBELL32(index) amdgpu_mm_rdoorbell(adev, (index))
  1363. #define WDOORBELL32(index, v) amdgpu_mm_wdoorbell(adev, (index), (v))
  1364. #define RDOORBELL64(index) amdgpu_mm_rdoorbell64(adev, (index))
  1365. #define WDOORBELL64(index, v) amdgpu_mm_wdoorbell64(adev, (index), (v))
  1366. #define REG_FIELD_SHIFT(reg, field) reg##__##field##__SHIFT
  1367. #define REG_FIELD_MASK(reg, field) reg##__##field##_MASK
  1368. #define REG_SET_FIELD(orig_val, reg, field, field_val) \
  1369. (((orig_val) & ~REG_FIELD_MASK(reg, field)) | \
  1370. (REG_FIELD_MASK(reg, field) & ((field_val) << REG_FIELD_SHIFT(reg, field))))
  1371. #define REG_GET_FIELD(value, reg, field) \
  1372. (((value) & REG_FIELD_MASK(reg, field)) >> REG_FIELD_SHIFT(reg, field))
  1373. #define WREG32_FIELD(reg, field, val) \
  1374. WREG32(mm##reg, (RREG32(mm##reg) & ~REG_FIELD_MASK(reg, field)) | (val) << REG_FIELD_SHIFT(reg, field))
  1375. /*
  1376. * BIOS helpers.
  1377. */
  1378. #define RBIOS8(i) (adev->bios[i])
  1379. #define RBIOS16(i) (RBIOS8(i) | (RBIOS8((i)+1) << 8))
  1380. #define RBIOS32(i) ((RBIOS16(i)) | (RBIOS16((i)+2) << 16))
  1381. /*
  1382. * RING helpers.
  1383. */
  1384. static inline void amdgpu_ring_write(struct amdgpu_ring *ring, uint32_t v)
  1385. {
  1386. if (ring->count_dw <= 0)
  1387. DRM_ERROR("amdgpu: writing more dwords to the ring than expected!\n");
  1388. ring->ring[ring->wptr++ & ring->buf_mask] = v;
  1389. ring->wptr &= ring->ptr_mask;
  1390. ring->count_dw--;
  1391. }
  1392. static inline void amdgpu_ring_write_multiple(struct amdgpu_ring *ring, void *src, int count_dw)
  1393. {
  1394. unsigned occupied, chunk1, chunk2;
  1395. void *dst;
  1396. if (ring->count_dw < count_dw) {
  1397. DRM_ERROR("amdgpu: writing more dwords to the ring than expected!\n");
  1398. } else {
  1399. occupied = ring->wptr & ring->ptr_mask;
  1400. dst = (void *)&ring->ring[occupied];
  1401. chunk1 = ring->ptr_mask + 1 - occupied;
  1402. chunk1 = (chunk1 >= count_dw) ? count_dw: chunk1;
  1403. chunk2 = count_dw - chunk1;
  1404. chunk1 <<= 2;
  1405. chunk2 <<= 2;
  1406. if (chunk1)
  1407. memcpy(dst, src, chunk1);
  1408. if (chunk2) {
  1409. src += chunk1;
  1410. dst = (void *)ring->ring;
  1411. memcpy(dst, src, chunk2);
  1412. }
  1413. ring->wptr += count_dw;
  1414. ring->wptr &= ring->ptr_mask;
  1415. ring->count_dw -= count_dw;
  1416. }
  1417. }
  1418. static inline struct amdgpu_sdma_instance *
  1419. amdgpu_get_sdma_instance(struct amdgpu_ring *ring)
  1420. {
  1421. struct amdgpu_device *adev = ring->adev;
  1422. int i;
  1423. for (i = 0; i < adev->sdma.num_instances; i++)
  1424. if (&adev->sdma.instance[i].ring == ring)
  1425. break;
  1426. if (i < AMDGPU_MAX_SDMA_INSTANCES)
  1427. return &adev->sdma.instance[i];
  1428. else
  1429. return NULL;
  1430. }
  1431. /*
  1432. * ASICs macro.
  1433. */
  1434. #define amdgpu_asic_set_vga_state(adev, state) (adev)->asic_funcs->set_vga_state((adev), (state))
  1435. #define amdgpu_asic_reset(adev) (adev)->asic_funcs->reset((adev))
  1436. #define amdgpu_asic_get_xclk(adev) (adev)->asic_funcs->get_xclk((adev))
  1437. #define amdgpu_asic_set_uvd_clocks(adev, v, d) (adev)->asic_funcs->set_uvd_clocks((adev), (v), (d))
  1438. #define amdgpu_asic_set_vce_clocks(adev, ev, ec) (adev)->asic_funcs->set_vce_clocks((adev), (ev), (ec))
  1439. #define amdgpu_get_pcie_lanes(adev) (adev)->asic_funcs->get_pcie_lanes((adev))
  1440. #define amdgpu_set_pcie_lanes(adev, l) (adev)->asic_funcs->set_pcie_lanes((adev), (l))
  1441. #define amdgpu_asic_get_gpu_clock_counter(adev) (adev)->asic_funcs->get_gpu_clock_counter((adev))
  1442. #define amdgpu_asic_read_disabled_bios(adev) (adev)->asic_funcs->read_disabled_bios((adev))
  1443. #define amdgpu_asic_read_bios_from_rom(adev, b, l) (adev)->asic_funcs->read_bios_from_rom((adev), (b), (l))
  1444. #define amdgpu_asic_read_register(adev, se, sh, offset, v)((adev)->asic_funcs->read_register((adev), (se), (sh), (offset), (v)))
  1445. #define amdgpu_asic_get_config_memsize(adev) (adev)->asic_funcs->get_config_memsize((adev))
  1446. #define amdgpu_gart_flush_gpu_tlb(adev, vmid) (adev)->gart.gart_funcs->flush_gpu_tlb((adev), (vmid))
  1447. #define amdgpu_gart_set_pte_pde(adev, pt, idx, addr, flags) (adev)->gart.gart_funcs->set_pte_pde((adev), (pt), (idx), (addr), (flags))
  1448. #define amdgpu_vm_copy_pte(adev, ib, pe, src, count) ((adev)->vm_manager.vm_pte_funcs->copy_pte((ib), (pe), (src), (count)))
  1449. #define amdgpu_vm_write_pte(adev, ib, pe, value, count, incr) ((adev)->vm_manager.vm_pte_funcs->write_pte((ib), (pe), (value), (count), (incr)))
  1450. #define amdgpu_vm_set_pte_pde(adev, ib, pe, addr, count, incr, flags) ((adev)->vm_manager.vm_pte_funcs->set_pte_pde((ib), (pe), (addr), (count), (incr), (flags)))
  1451. #define amdgpu_vm_get_pte_flags(adev, flags) (adev)->gart.gart_funcs->get_vm_pte_flags((adev),(flags))
  1452. #define amdgpu_ring_parse_cs(r, p, ib) ((r)->funcs->parse_cs((p), (ib)))
  1453. #define amdgpu_ring_test_ring(r) (r)->funcs->test_ring((r))
  1454. #define amdgpu_ring_test_ib(r, t) (r)->funcs->test_ib((r), (t))
  1455. #define amdgpu_ring_get_rptr(r) (r)->funcs->get_rptr((r))
  1456. #define amdgpu_ring_get_wptr(r) (r)->funcs->get_wptr((r))
  1457. #define amdgpu_ring_set_wptr(r) (r)->funcs->set_wptr((r))
  1458. #define amdgpu_ring_emit_ib(r, ib, vm_id, c) (r)->funcs->emit_ib((r), (ib), (vm_id), (c))
  1459. #define amdgpu_ring_emit_pipeline_sync(r) (r)->funcs->emit_pipeline_sync((r))
  1460. #define amdgpu_ring_emit_vm_flush(r, vmid, addr) (r)->funcs->emit_vm_flush((r), (vmid), (addr))
  1461. #define amdgpu_ring_emit_fence(r, addr, seq, flags) (r)->funcs->emit_fence((r), (addr), (seq), (flags))
  1462. #define amdgpu_ring_emit_gds_switch(r, v, db, ds, wb, ws, ab, as) (r)->funcs->emit_gds_switch((r), (v), (db), (ds), (wb), (ws), (ab), (as))
  1463. #define amdgpu_ring_emit_hdp_flush(r) (r)->funcs->emit_hdp_flush((r))
  1464. #define amdgpu_ring_emit_hdp_invalidate(r) (r)->funcs->emit_hdp_invalidate((r))
  1465. #define amdgpu_ring_emit_switch_buffer(r) (r)->funcs->emit_switch_buffer((r))
  1466. #define amdgpu_ring_emit_cntxcntl(r, d) (r)->funcs->emit_cntxcntl((r), (d))
  1467. #define amdgpu_ring_emit_rreg(r, d) (r)->funcs->emit_rreg((r), (d))
  1468. #define amdgpu_ring_emit_wreg(r, d, v) (r)->funcs->emit_wreg((r), (d), (v))
  1469. #define amdgpu_ring_pad_ib(r, ib) ((r)->funcs->pad_ib((r), (ib)))
  1470. #define amdgpu_ring_init_cond_exec(r) (r)->funcs->init_cond_exec((r))
  1471. #define amdgpu_ring_patch_cond_exec(r,o) (r)->funcs->patch_cond_exec((r),(o))
  1472. #define amdgpu_ih_get_wptr(adev) (adev)->irq.ih_funcs->get_wptr((adev))
  1473. #define amdgpu_ih_decode_iv(adev, iv) (adev)->irq.ih_funcs->decode_iv((adev), (iv))
  1474. #define amdgpu_ih_set_rptr(adev) (adev)->irq.ih_funcs->set_rptr((adev))
  1475. #define amdgpu_display_set_vga_render_state(adev, r) (adev)->mode_info.funcs->set_vga_render_state((adev), (r))
  1476. #define amdgpu_display_vblank_get_counter(adev, crtc) (adev)->mode_info.funcs->vblank_get_counter((adev), (crtc))
  1477. #define amdgpu_display_vblank_wait(adev, crtc) (adev)->mode_info.funcs->vblank_wait((adev), (crtc))
  1478. #define amdgpu_display_backlight_set_level(adev, e, l) (adev)->mode_info.funcs->backlight_set_level((e), (l))
  1479. #define amdgpu_display_backlight_get_level(adev, e) (adev)->mode_info.funcs->backlight_get_level((e))
  1480. #define amdgpu_display_hpd_sense(adev, h) (adev)->mode_info.funcs->hpd_sense((adev), (h))
  1481. #define amdgpu_display_hpd_set_polarity(adev, h) (adev)->mode_info.funcs->hpd_set_polarity((adev), (h))
  1482. #define amdgpu_display_hpd_get_gpio_reg(adev) (adev)->mode_info.funcs->hpd_get_gpio_reg((adev))
  1483. #define amdgpu_display_bandwidth_update(adev) (adev)->mode_info.funcs->bandwidth_update((adev))
  1484. #define amdgpu_display_page_flip(adev, crtc, base, async) (adev)->mode_info.funcs->page_flip((adev), (crtc), (base), (async))
  1485. #define amdgpu_display_page_flip_get_scanoutpos(adev, crtc, vbl, pos) (adev)->mode_info.funcs->page_flip_get_scanoutpos((adev), (crtc), (vbl), (pos))
  1486. #define amdgpu_display_add_encoder(adev, e, s, c) (adev)->mode_info.funcs->add_encoder((adev), (e), (s), (c))
  1487. #define amdgpu_display_add_connector(adev, ci, sd, ct, ib, coi, h, r) (adev)->mode_info.funcs->add_connector((adev), (ci), (sd), (ct), (ib), (coi), (h), (r))
  1488. #define amdgpu_display_stop_mc_access(adev, s) (adev)->mode_info.funcs->stop_mc_access((adev), (s))
  1489. #define amdgpu_display_resume_mc_access(adev, s) (adev)->mode_info.funcs->resume_mc_access((adev), (s))
  1490. #define amdgpu_emit_copy_buffer(adev, ib, s, d, b) (adev)->mman.buffer_funcs->emit_copy_buffer((ib), (s), (d), (b))
  1491. #define amdgpu_emit_fill_buffer(adev, ib, s, d, b) (adev)->mman.buffer_funcs->emit_fill_buffer((ib), (s), (d), (b))
  1492. #define amdgpu_gfx_get_gpu_clock_counter(adev) (adev)->gfx.funcs->get_gpu_clock_counter((adev))
  1493. #define amdgpu_gfx_select_se_sh(adev, se, sh, instance) (adev)->gfx.funcs->select_se_sh((adev), (se), (sh), (instance))
  1494. #define amdgpu_gds_switch(adev, r, v, d, w, a) (adev)->gds.funcs->patch_gds_switch((r), (v), (d), (w), (a))
  1495. /* Common functions */
  1496. int amdgpu_gpu_reset(struct amdgpu_device *adev);
  1497. bool amdgpu_need_backup(struct amdgpu_device *adev);
  1498. void amdgpu_pci_config_reset(struct amdgpu_device *adev);
  1499. bool amdgpu_need_post(struct amdgpu_device *adev);
  1500. void amdgpu_update_display_priority(struct amdgpu_device *adev);
  1501. int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data);
  1502. int amdgpu_cs_get_ring(struct amdgpu_device *adev, u32 ip_type,
  1503. u32 ip_instance, u32 ring,
  1504. struct amdgpu_ring **out_ring);
  1505. void amdgpu_cs_report_moved_bytes(struct amdgpu_device *adev, u64 num_bytes);
  1506. void amdgpu_ttm_placement_from_domain(struct amdgpu_bo *abo, u32 domain);
  1507. bool amdgpu_ttm_bo_is_amdgpu_bo(struct ttm_buffer_object *bo);
  1508. int amdgpu_ttm_tt_get_user_pages(struct ttm_tt *ttm, struct page **pages);
  1509. int amdgpu_ttm_tt_set_userptr(struct ttm_tt *ttm, uint64_t addr,
  1510. uint32_t flags);
  1511. bool amdgpu_ttm_tt_has_userptr(struct ttm_tt *ttm);
  1512. struct mm_struct *amdgpu_ttm_tt_get_usermm(struct ttm_tt *ttm);
  1513. bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start,
  1514. unsigned long end);
  1515. bool amdgpu_ttm_tt_userptr_invalidated(struct ttm_tt *ttm,
  1516. int *last_invalidated);
  1517. bool amdgpu_ttm_tt_is_readonly(struct ttm_tt *ttm);
  1518. uint64_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm,
  1519. struct ttm_mem_reg *mem);
  1520. void amdgpu_vram_location(struct amdgpu_device *adev, struct amdgpu_mc *mc, u64 base);
  1521. void amdgpu_gtt_location(struct amdgpu_device *adev, struct amdgpu_mc *mc);
  1522. void amdgpu_ttm_set_active_vram_size(struct amdgpu_device *adev, u64 size);
  1523. int amdgpu_ttm_init(struct amdgpu_device *adev);
  1524. void amdgpu_ttm_fini(struct amdgpu_device *adev);
  1525. void amdgpu_program_register_sequence(struct amdgpu_device *adev,
  1526. const u32 *registers,
  1527. const u32 array_size);
  1528. bool amdgpu_device_is_px(struct drm_device *dev);
  1529. /* atpx handler */
  1530. #if defined(CONFIG_VGA_SWITCHEROO)
  1531. void amdgpu_register_atpx_handler(void);
  1532. void amdgpu_unregister_atpx_handler(void);
  1533. bool amdgpu_has_atpx_dgpu_power_cntl(void);
  1534. bool amdgpu_is_atpx_hybrid(void);
  1535. bool amdgpu_atpx_dgpu_req_power_for_displays(void);
  1536. #else
  1537. static inline void amdgpu_register_atpx_handler(void) {}
  1538. static inline void amdgpu_unregister_atpx_handler(void) {}
  1539. static inline bool amdgpu_has_atpx_dgpu_power_cntl(void) { return false; }
  1540. static inline bool amdgpu_is_atpx_hybrid(void) { return false; }
  1541. static inline bool amdgpu_atpx_dgpu_req_power_for_displays(void) { return false; }
  1542. #endif
  1543. /*
  1544. * KMS
  1545. */
  1546. extern const struct drm_ioctl_desc amdgpu_ioctls_kms[];
  1547. extern const int amdgpu_max_kms_ioctl;
  1548. int amdgpu_driver_load_kms(struct drm_device *dev, unsigned long flags);
  1549. void amdgpu_driver_unload_kms(struct drm_device *dev);
  1550. void amdgpu_driver_lastclose_kms(struct drm_device *dev);
  1551. int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv);
  1552. void amdgpu_driver_postclose_kms(struct drm_device *dev,
  1553. struct drm_file *file_priv);
  1554. int amdgpu_suspend(struct amdgpu_device *adev);
  1555. int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon);
  1556. int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon);
  1557. u32 amdgpu_get_vblank_counter_kms(struct drm_device *dev, unsigned int pipe);
  1558. int amdgpu_enable_vblank_kms(struct drm_device *dev, unsigned int pipe);
  1559. void amdgpu_disable_vblank_kms(struct drm_device *dev, unsigned int pipe);
  1560. int amdgpu_get_vblank_timestamp_kms(struct drm_device *dev, unsigned int pipe,
  1561. int *max_error,
  1562. struct timeval *vblank_time,
  1563. unsigned flags);
  1564. long amdgpu_kms_compat_ioctl(struct file *filp, unsigned int cmd,
  1565. unsigned long arg);
  1566. /*
  1567. * functions used by amdgpu_encoder.c
  1568. */
  1569. struct amdgpu_afmt_acr {
  1570. u32 clock;
  1571. int n_32khz;
  1572. int cts_32khz;
  1573. int n_44_1khz;
  1574. int cts_44_1khz;
  1575. int n_48khz;
  1576. int cts_48khz;
  1577. };
  1578. struct amdgpu_afmt_acr amdgpu_afmt_acr(uint32_t clock);
  1579. /* amdgpu_acpi.c */
  1580. #if defined(CONFIG_ACPI)
  1581. int amdgpu_acpi_init(struct amdgpu_device *adev);
  1582. void amdgpu_acpi_fini(struct amdgpu_device *adev);
  1583. bool amdgpu_acpi_is_pcie_performance_request_supported(struct amdgpu_device *adev);
  1584. int amdgpu_acpi_pcie_performance_request(struct amdgpu_device *adev,
  1585. u8 perf_req, bool advertise);
  1586. int amdgpu_acpi_pcie_notify_device_ready(struct amdgpu_device *adev);
  1587. #else
  1588. static inline int amdgpu_acpi_init(struct amdgpu_device *adev) { return 0; }
  1589. static inline void amdgpu_acpi_fini(struct amdgpu_device *adev) { }
  1590. #endif
  1591. struct amdgpu_bo_va_mapping *
  1592. amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser,
  1593. uint64_t addr, struct amdgpu_bo **bo);
  1594. int amdgpu_cs_sysvm_access_required(struct amdgpu_cs_parser *parser);
  1595. #include "amdgpu_object.h"
  1596. #endif