kgd_kfd_interface.h 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500
  1. /*
  2. * Copyright 2014 Advanced Micro Devices, Inc.
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice shall be included in
  12. * all copies or substantial portions of the Software.
  13. *
  14. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  17. * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18. * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19. * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20. * OTHER DEALINGS IN THE SOFTWARE.
  21. */
  22. /*
  23. * This file defines the private interface between the
  24. * AMD kernel graphics drivers and the AMD KFD.
  25. */
  26. #ifndef KGD_KFD_INTERFACE_H_INCLUDED
  27. #define KGD_KFD_INTERFACE_H_INCLUDED
  28. #include <linux/types.h>
  29. #include <linux/bitmap.h>
  30. #include <linux/dma-fence.h>
  31. struct pci_dev;
  32. #define KFD_INTERFACE_VERSION 2
  33. #define KGD_MAX_QUEUES 128
  34. struct kfd_dev;
  35. struct kgd_dev;
  36. struct kgd_mem;
  37. enum kfd_preempt_type {
  38. KFD_PREEMPT_TYPE_WAVEFRONT_DRAIN = 0,
  39. KFD_PREEMPT_TYPE_WAVEFRONT_RESET,
  40. };
  41. struct kfd_vm_fault_info {
  42. uint64_t page_addr;
  43. uint32_t vmid;
  44. uint32_t mc_id;
  45. uint32_t status;
  46. bool prot_valid;
  47. bool prot_read;
  48. bool prot_write;
  49. bool prot_exec;
  50. };
  51. struct kfd_cu_info {
  52. uint32_t num_shader_engines;
  53. uint32_t num_shader_arrays_per_engine;
  54. uint32_t num_cu_per_sh;
  55. uint32_t cu_active_number;
  56. uint32_t cu_ao_mask;
  57. uint32_t simd_per_cu;
  58. uint32_t max_waves_per_simd;
  59. uint32_t wave_front_size;
  60. uint32_t max_scratch_slots_per_cu;
  61. uint32_t lds_size;
  62. uint32_t cu_bitmap[4][4];
  63. };
  64. /* For getting GPU local memory information from KGD */
  65. struct kfd_local_mem_info {
  66. uint64_t local_mem_size_private;
  67. uint64_t local_mem_size_public;
  68. uint32_t vram_width;
  69. uint32_t mem_clk_max;
  70. };
  71. enum kgd_memory_pool {
  72. KGD_POOL_SYSTEM_CACHEABLE = 1,
  73. KGD_POOL_SYSTEM_WRITECOMBINE = 2,
  74. KGD_POOL_FRAMEBUFFER = 3,
  75. };
  76. enum kgd_engine_type {
  77. KGD_ENGINE_PFP = 1,
  78. KGD_ENGINE_ME,
  79. KGD_ENGINE_CE,
  80. KGD_ENGINE_MEC1,
  81. KGD_ENGINE_MEC2,
  82. KGD_ENGINE_RLC,
  83. KGD_ENGINE_SDMA1,
  84. KGD_ENGINE_SDMA2,
  85. KGD_ENGINE_MAX
  86. };
  87. /**
  88. * enum kfd_sched_policy
  89. *
  90. * @KFD_SCHED_POLICY_HWS: H/W scheduling policy known as command processor (cp)
  91. * scheduling. In this scheduling mode we're using the firmware code to
  92. * schedule the user mode queues and kernel queues such as HIQ and DIQ.
  93. * the HIQ queue is used as a special queue that dispatches the configuration
  94. * to the cp and the user mode queues list that are currently running.
  95. * the DIQ queue is a debugging queue that dispatches debugging commands to the
  96. * firmware.
  97. * in this scheduling mode user mode queues over subscription feature is
  98. * enabled.
  99. *
  100. * @KFD_SCHED_POLICY_HWS_NO_OVERSUBSCRIPTION: The same as above but the over
  101. * subscription feature disabled.
  102. *
  103. * @KFD_SCHED_POLICY_NO_HWS: no H/W scheduling policy is a mode which directly
  104. * set the command processor registers and sets the queues "manually". This
  105. * mode is used *ONLY* for debugging proposes.
  106. *
  107. */
  108. enum kfd_sched_policy {
  109. KFD_SCHED_POLICY_HWS = 0,
  110. KFD_SCHED_POLICY_HWS_NO_OVERSUBSCRIPTION,
  111. KFD_SCHED_POLICY_NO_HWS
  112. };
  113. struct kgd2kfd_shared_resources {
  114. /* Bit n == 1 means VMID n is available for KFD. */
  115. unsigned int compute_vmid_bitmap;
  116. /* number of pipes per mec */
  117. uint32_t num_pipe_per_mec;
  118. /* number of queues per pipe */
  119. uint32_t num_queue_per_pipe;
  120. /* Bit n == 1 means Queue n is available for KFD */
  121. DECLARE_BITMAP(queue_bitmap, KGD_MAX_QUEUES);
  122. /* Doorbell assignments (SOC15 and later chips only). Only
  123. * specific doorbells are routed to each SDMA engine. Others
  124. * are routed to IH and VCN. They are not usable by the CP.
  125. *
  126. * Any doorbell number D that satisfies the following condition
  127. * is reserved: (D & reserved_doorbell_mask) == reserved_doorbell_val
  128. *
  129. * KFD currently uses 1024 (= 0x3ff) doorbells per process. If
  130. * doorbells 0x0e0-0x0ff and 0x2e0-0x2ff are reserved, that means
  131. * mask would be set to 0x1e0 and val set to 0x0e0.
  132. */
  133. unsigned int sdma_doorbell[2][8];
  134. unsigned int reserved_doorbell_mask;
  135. unsigned int reserved_doorbell_val;
  136. /* Base address of doorbell aperture. */
  137. phys_addr_t doorbell_physical_address;
  138. /* Size in bytes of doorbell aperture. */
  139. size_t doorbell_aperture_size;
  140. /* Number of bytes at start of aperture reserved for KGD. */
  141. size_t doorbell_start_offset;
  142. /* GPUVM address space size in bytes */
  143. uint64_t gpuvm_size;
  144. /* Minor device number of the render node */
  145. int drm_render_minor;
  146. };
  147. struct tile_config {
  148. uint32_t *tile_config_ptr;
  149. uint32_t *macro_tile_config_ptr;
  150. uint32_t num_tile_configs;
  151. uint32_t num_macro_tile_configs;
  152. uint32_t gb_addr_config;
  153. uint32_t num_banks;
  154. uint32_t num_ranks;
  155. };
  156. #define KFD_MAX_NUM_OF_QUEUES_PER_DEVICE_DEFAULT 4096
  157. /*
  158. * Allocation flag domains
  159. * NOTE: This must match the corresponding definitions in kfd_ioctl.h.
  160. */
  161. #define ALLOC_MEM_FLAGS_VRAM (1 << 0)
  162. #define ALLOC_MEM_FLAGS_GTT (1 << 1)
  163. #define ALLOC_MEM_FLAGS_USERPTR (1 << 2) /* TODO */
  164. #define ALLOC_MEM_FLAGS_DOORBELL (1 << 3) /* TODO */
  165. /*
  166. * Allocation flags attributes/access options.
  167. * NOTE: This must match the corresponding definitions in kfd_ioctl.h.
  168. */
  169. #define ALLOC_MEM_FLAGS_WRITABLE (1 << 31)
  170. #define ALLOC_MEM_FLAGS_EXECUTABLE (1 << 30)
  171. #define ALLOC_MEM_FLAGS_PUBLIC (1 << 29)
  172. #define ALLOC_MEM_FLAGS_NO_SUBSTITUTE (1 << 28) /* TODO */
  173. #define ALLOC_MEM_FLAGS_AQL_QUEUE_MEM (1 << 27)
  174. #define ALLOC_MEM_FLAGS_COHERENT (1 << 26) /* For GFXv9 or later */
  175. /**
  176. * struct kfd2kgd_calls
  177. *
  178. * @init_gtt_mem_allocation: Allocate a buffer on the gart aperture.
  179. * The buffer can be used for mqds, hpds, kernel queue, fence and runlists
  180. *
  181. * @free_gtt_mem: Frees a buffer that was allocated on the gart aperture
  182. *
  183. * @get_local_mem_info: Retrieves information about GPU local memory
  184. *
  185. * @get_gpu_clock_counter: Retrieves GPU clock counter
  186. *
  187. * @get_max_engine_clock_in_mhz: Retrieves maximum GPU clock in MHz
  188. *
  189. * @alloc_pasid: Allocate a PASID
  190. * @free_pasid: Free a PASID
  191. *
  192. * @program_sh_mem_settings: A function that should initiate the memory
  193. * properties such as main aperture memory type (cache / non cached) and
  194. * secondary aperture base address, size and memory type.
  195. * This function is used only for no cp scheduling mode.
  196. *
  197. * @set_pasid_vmid_mapping: Exposes pasid/vmid pair to the H/W for no cp
  198. * scheduling mode. Only used for no cp scheduling mode.
  199. *
  200. * @hqd_load: Loads the mqd structure to a H/W hqd slot. used only for no cp
  201. * sceduling mode.
  202. *
  203. * @hqd_sdma_load: Loads the SDMA mqd structure to a H/W SDMA hqd slot.
  204. * used only for no HWS mode.
  205. *
  206. * @hqd_dump: Dumps CPC HQD registers to an array of address-value pairs.
  207. * Array is allocated with kmalloc, needs to be freed with kfree by caller.
  208. *
  209. * @hqd_sdma_dump: Dumps SDMA HQD registers to an array of address-value pairs.
  210. * Array is allocated with kmalloc, needs to be freed with kfree by caller.
  211. *
  212. * @hqd_is_occupies: Checks if a hqd slot is occupied.
  213. *
  214. * @hqd_destroy: Destructs and preempts the queue assigned to that hqd slot.
  215. *
  216. * @hqd_sdma_is_occupied: Checks if an SDMA hqd slot is occupied.
  217. *
  218. * @hqd_sdma_destroy: Destructs and preempts the SDMA queue assigned to that
  219. * SDMA hqd slot.
  220. *
  221. * @get_fw_version: Returns FW versions from the header
  222. *
  223. * @set_scratch_backing_va: Sets VA for scratch backing memory of a VMID.
  224. * Only used for no cp scheduling mode
  225. *
  226. * @get_tile_config: Returns GPU-specific tiling mode information
  227. *
  228. * @get_cu_info: Retrieves activated cu info
  229. *
  230. * @get_vram_usage: Returns current VRAM usage
  231. *
  232. * @create_process_vm: Create a VM address space for a given process and GPU
  233. *
  234. * @destroy_process_vm: Destroy a VM
  235. *
  236. * @get_process_page_dir: Get physical address of a VM page directory
  237. *
  238. * @set_vm_context_page_table_base: Program page table base for a VMID
  239. *
  240. * @alloc_memory_of_gpu: Allocate GPUVM memory
  241. *
  242. * @free_memory_of_gpu: Free GPUVM memory
  243. *
  244. * @map_memory_to_gpu: Map GPUVM memory into a specific VM address
  245. * space. Allocates and updates page tables and page directories as
  246. * needed. This function may return before all page table updates have
  247. * completed. This allows multiple map operations (on multiple GPUs)
  248. * to happen concurrently. Use sync_memory to synchronize with all
  249. * pending updates.
  250. *
  251. * @unmap_memor_to_gpu: Unmap GPUVM memory from a specific VM address space
  252. *
  253. * @sync_memory: Wait for pending page table updates to complete
  254. *
  255. * @map_gtt_bo_to_kernel: Map a GTT BO for kernel access
  256. * Pins the BO, maps it to kernel address space. Such BOs are never evicted.
  257. * The kernel virtual address remains valid until the BO is freed.
  258. *
  259. * @restore_process_bos: Restore all BOs that belong to the
  260. * process. This is intended for restoring memory mappings after a TTM
  261. * eviction.
  262. *
  263. * @invalidate_tlbs: Invalidate TLBs for a specific PASID
  264. *
  265. * @invalidate_tlbs_vmid: Invalidate TLBs for a specific VMID
  266. *
  267. * @submit_ib: Submits an IB to the engine specified by inserting the
  268. * IB to the corresponding ring (ring type). The IB is executed with the
  269. * specified VMID in a user mode context.
  270. *
  271. * @get_vm_fault_info: Return information about a recent VM fault on
  272. * GFXv7 and v8. If multiple VM faults occurred since the last call of
  273. * this function, it will return information about the first of those
  274. * faults. On GFXv9 VM fault information is fully contained in the IH
  275. * packet and this function is not needed.
  276. *
  277. * @read_vmid_from_vmfault_reg: On Hawaii the VMID is not set in the
  278. * IH ring entry. This function allows the KFD ISR to get the VMID
  279. * from the fault status register as early as possible.
  280. *
  281. * @gpu_recover: let kgd reset gpu after kfd detect CPC hang
  282. *
  283. * @set_compute_idle: Indicates that compute is idle on a device. This
  284. * can be used to change power profiles depending on compute activity.
  285. *
  286. * @get_hive_id: Returns hive id of current device, 0 if xgmi is not enabled
  287. *
  288. * This structure contains function pointers to services that the kgd driver
  289. * provides to amdkfd driver.
  290. *
  291. */
  292. struct kfd2kgd_calls {
  293. int (*init_gtt_mem_allocation)(struct kgd_dev *kgd, size_t size,
  294. void **mem_obj, uint64_t *gpu_addr,
  295. void **cpu_ptr, bool mqd_gfx9);
  296. void (*free_gtt_mem)(struct kgd_dev *kgd, void *mem_obj);
  297. void (*get_local_mem_info)(struct kgd_dev *kgd,
  298. struct kfd_local_mem_info *mem_info);
  299. uint64_t (*get_gpu_clock_counter)(struct kgd_dev *kgd);
  300. uint32_t (*get_max_engine_clock_in_mhz)(struct kgd_dev *kgd);
  301. int (*alloc_pasid)(unsigned int bits);
  302. void (*free_pasid)(unsigned int pasid);
  303. /* Register access functions */
  304. void (*program_sh_mem_settings)(struct kgd_dev *kgd, uint32_t vmid,
  305. uint32_t sh_mem_config, uint32_t sh_mem_ape1_base,
  306. uint32_t sh_mem_ape1_limit, uint32_t sh_mem_bases);
  307. int (*set_pasid_vmid_mapping)(struct kgd_dev *kgd, unsigned int pasid,
  308. unsigned int vmid);
  309. int (*init_interrupts)(struct kgd_dev *kgd, uint32_t pipe_id);
  310. int (*hqd_load)(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
  311. uint32_t queue_id, uint32_t __user *wptr,
  312. uint32_t wptr_shift, uint32_t wptr_mask,
  313. struct mm_struct *mm);
  314. int (*hqd_sdma_load)(struct kgd_dev *kgd, void *mqd,
  315. uint32_t __user *wptr, struct mm_struct *mm);
  316. int (*hqd_dump)(struct kgd_dev *kgd,
  317. uint32_t pipe_id, uint32_t queue_id,
  318. uint32_t (**dump)[2], uint32_t *n_regs);
  319. int (*hqd_sdma_dump)(struct kgd_dev *kgd,
  320. uint32_t engine_id, uint32_t queue_id,
  321. uint32_t (**dump)[2], uint32_t *n_regs);
  322. bool (*hqd_is_occupied)(struct kgd_dev *kgd, uint64_t queue_address,
  323. uint32_t pipe_id, uint32_t queue_id);
  324. int (*hqd_destroy)(struct kgd_dev *kgd, void *mqd, uint32_t reset_type,
  325. unsigned int timeout, uint32_t pipe_id,
  326. uint32_t queue_id);
  327. bool (*hqd_sdma_is_occupied)(struct kgd_dev *kgd, void *mqd);
  328. int (*hqd_sdma_destroy)(struct kgd_dev *kgd, void *mqd,
  329. unsigned int timeout);
  330. int (*address_watch_disable)(struct kgd_dev *kgd);
  331. int (*address_watch_execute)(struct kgd_dev *kgd,
  332. unsigned int watch_point_id,
  333. uint32_t cntl_val,
  334. uint32_t addr_hi,
  335. uint32_t addr_lo);
  336. int (*wave_control_execute)(struct kgd_dev *kgd,
  337. uint32_t gfx_index_val,
  338. uint32_t sq_cmd);
  339. uint32_t (*address_watch_get_offset)(struct kgd_dev *kgd,
  340. unsigned int watch_point_id,
  341. unsigned int reg_offset);
  342. bool (*get_atc_vmid_pasid_mapping_valid)(
  343. struct kgd_dev *kgd,
  344. uint8_t vmid);
  345. uint16_t (*get_atc_vmid_pasid_mapping_pasid)(
  346. struct kgd_dev *kgd,
  347. uint8_t vmid);
  348. uint16_t (*get_fw_version)(struct kgd_dev *kgd,
  349. enum kgd_engine_type type);
  350. void (*set_scratch_backing_va)(struct kgd_dev *kgd,
  351. uint64_t va, uint32_t vmid);
  352. int (*get_tile_config)(struct kgd_dev *kgd, struct tile_config *config);
  353. void (*get_cu_info)(struct kgd_dev *kgd,
  354. struct kfd_cu_info *cu_info);
  355. uint64_t (*get_vram_usage)(struct kgd_dev *kgd);
  356. int (*create_process_vm)(struct kgd_dev *kgd, unsigned int pasid, void **vm,
  357. void **process_info, struct dma_fence **ef);
  358. int (*acquire_process_vm)(struct kgd_dev *kgd, struct file *filp,
  359. unsigned int pasid, void **vm, void **process_info,
  360. struct dma_fence **ef);
  361. void (*destroy_process_vm)(struct kgd_dev *kgd, void *vm);
  362. void (*release_process_vm)(struct kgd_dev *kgd, void *vm);
  363. uint64_t (*get_process_page_dir)(void *vm);
  364. void (*set_vm_context_page_table_base)(struct kgd_dev *kgd,
  365. uint32_t vmid, uint64_t page_table_base);
  366. int (*alloc_memory_of_gpu)(struct kgd_dev *kgd, uint64_t va,
  367. uint64_t size, void *vm,
  368. struct kgd_mem **mem, uint64_t *offset,
  369. uint32_t flags);
  370. int (*free_memory_of_gpu)(struct kgd_dev *kgd, struct kgd_mem *mem);
  371. int (*map_memory_to_gpu)(struct kgd_dev *kgd, struct kgd_mem *mem,
  372. void *vm);
  373. int (*unmap_memory_to_gpu)(struct kgd_dev *kgd, struct kgd_mem *mem,
  374. void *vm);
  375. int (*sync_memory)(struct kgd_dev *kgd, struct kgd_mem *mem, bool intr);
  376. int (*map_gtt_bo_to_kernel)(struct kgd_dev *kgd, struct kgd_mem *mem,
  377. void **kptr, uint64_t *size);
  378. int (*restore_process_bos)(void *process_info, struct dma_fence **ef);
  379. int (*invalidate_tlbs)(struct kgd_dev *kgd, uint16_t pasid);
  380. int (*invalidate_tlbs_vmid)(struct kgd_dev *kgd, uint16_t vmid);
  381. int (*submit_ib)(struct kgd_dev *kgd, enum kgd_engine_type engine,
  382. uint32_t vmid, uint64_t gpu_addr,
  383. uint32_t *ib_cmd, uint32_t ib_len);
  384. int (*get_vm_fault_info)(struct kgd_dev *kgd,
  385. struct kfd_vm_fault_info *info);
  386. uint32_t (*read_vmid_from_vmfault_reg)(struct kgd_dev *kgd);
  387. void (*gpu_recover)(struct kgd_dev *kgd);
  388. void (*set_compute_idle)(struct kgd_dev *kgd, bool idle);
  389. uint64_t (*get_hive_id)(struct kgd_dev *kgd);
  390. };
  391. /**
  392. * struct kgd2kfd_calls
  393. *
  394. * @exit: Notifies amdkfd that kgd module is unloaded
  395. *
  396. * @probe: Notifies amdkfd about a probe done on a device in the kgd driver.
  397. *
  398. * @device_init: Initialize the newly probed device (if it is a device that
  399. * amdkfd supports)
  400. *
  401. * @device_exit: Notifies amdkfd about a removal of a kgd device
  402. *
  403. * @suspend: Notifies amdkfd about a suspend action done to a kgd device
  404. *
  405. * @resume: Notifies amdkfd about a resume action done to a kgd device
  406. *
  407. * @quiesce_mm: Quiesce all user queue access to specified MM address space
  408. *
  409. * @resume_mm: Resume user queue access to specified MM address space
  410. *
  411. * @schedule_evict_and_restore_process: Schedules work queue that will prepare
  412. * for safe eviction of KFD BOs that belong to the specified process.
  413. *
  414. * @pre_reset: Notifies amdkfd that amdgpu about to reset the gpu
  415. *
  416. * @post_reset: Notify amdkfd that amgpu successfully reseted the gpu
  417. *
  418. * This structure contains function callback pointers so the kgd driver
  419. * will notify to the amdkfd about certain status changes.
  420. *
  421. */
  422. struct kgd2kfd_calls {
  423. void (*exit)(void);
  424. struct kfd_dev* (*probe)(struct kgd_dev *kgd, struct pci_dev *pdev,
  425. const struct kfd2kgd_calls *f2g);
  426. bool (*device_init)(struct kfd_dev *kfd,
  427. const struct kgd2kfd_shared_resources *gpu_resources);
  428. void (*device_exit)(struct kfd_dev *kfd);
  429. void (*interrupt)(struct kfd_dev *kfd, const void *ih_ring_entry);
  430. void (*suspend)(struct kfd_dev *kfd);
  431. int (*resume)(struct kfd_dev *kfd);
  432. int (*quiesce_mm)(struct mm_struct *mm);
  433. int (*resume_mm)(struct mm_struct *mm);
  434. int (*schedule_evict_and_restore_process)(struct mm_struct *mm,
  435. struct dma_fence *fence);
  436. int (*pre_reset)(struct kfd_dev *kfd);
  437. int (*post_reset)(struct kfd_dev *kfd);
  438. };
  439. int kgd2kfd_init(unsigned interface_version,
  440. const struct kgd2kfd_calls **g2f);
  441. #endif /* KGD_KFD_INTERFACE_H_INCLUDED */