kfd_priv.h 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645
  1. /*
  2. * Copyright 2014 Advanced Micro Devices, Inc.
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice shall be included in
  12. * all copies or substantial portions of the Software.
  13. *
  14. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  17. * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18. * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19. * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20. * OTHER DEALINGS IN THE SOFTWARE.
  21. */
  22. #ifndef KFD_PRIV_H_INCLUDED
  23. #define KFD_PRIV_H_INCLUDED
  24. #include <linux/hashtable.h>
  25. #include <linux/mmu_notifier.h>
  26. #include <linux/mutex.h>
  27. #include <linux/types.h>
  28. #include <linux/atomic.h>
  29. #include <linux/workqueue.h>
  30. #include <linux/spinlock.h>
  31. #include <linux/kfd_ioctl.h>
  32. #include <kgd_kfd_interface.h>
  33. #define KFD_SYSFS_FILE_MODE 0444
  34. /*
  35. * When working with cp scheduler we should assign the HIQ manually or via
  36. * the radeon driver to a fixed hqd slot, here are the fixed HIQ hqd slot
  37. * definitions for Kaveri. In Kaveri only the first ME queues participates
  38. * in the cp scheduling taking that in mind we set the HIQ slot in the
  39. * second ME.
  40. */
  41. #define KFD_CIK_HIQ_PIPE 4
  42. #define KFD_CIK_HIQ_QUEUE 0
  43. /* GPU ID hash width in bits */
  44. #define KFD_GPU_ID_HASH_WIDTH 16
  45. /* Macro for allocating structures */
  46. #define kfd_alloc_struct(ptr_to_struct) \
  47. ((typeof(ptr_to_struct)) kzalloc(sizeof(*ptr_to_struct), GFP_KERNEL))
  48. #define KFD_MAX_NUM_OF_PROCESSES 512
  49. #define KFD_MAX_NUM_OF_QUEUES_PER_PROCESS 1024
  50. /*
  51. * Kernel module parameter to specify maximum number of supported queues per
  52. * device
  53. */
  54. extern int max_num_of_queues_per_device;
  55. #define KFD_MAX_NUM_OF_QUEUES_PER_DEVICE_DEFAULT 4096
  56. #define KFD_MAX_NUM_OF_QUEUES_PER_DEVICE \
  57. (KFD_MAX_NUM_OF_PROCESSES * \
  58. KFD_MAX_NUM_OF_QUEUES_PER_PROCESS)
  59. #define KFD_KERNEL_QUEUE_SIZE 2048
  60. /* Kernel module parameter to specify the scheduling policy */
  61. extern int sched_policy;
  62. /**
  63. * enum kfd_sched_policy
  64. *
  65. * @KFD_SCHED_POLICY_HWS: H/W scheduling policy known as command processor (cp)
  66. * scheduling. In this scheduling mode we're using the firmware code to
  67. * schedule the user mode queues and kernel queues such as HIQ and DIQ.
  68. * the HIQ queue is used as a special queue that dispatches the configuration
  69. * to the cp and the user mode queues list that are currently running.
  70. * the DIQ queue is a debugging queue that dispatches debugging commands to the
  71. * firmware.
  72. * in this scheduling mode user mode queues over subscription feature is
  73. * enabled.
  74. *
  75. * @KFD_SCHED_POLICY_HWS_NO_OVERSUBSCRIPTION: The same as above but the over
  76. * subscription feature disabled.
  77. *
  78. * @KFD_SCHED_POLICY_NO_HWS: no H/W scheduling policy is a mode which directly
  79. * set the command processor registers and sets the queues "manually". This
  80. * mode is used *ONLY* for debugging proposes.
  81. *
  82. */
  83. enum kfd_sched_policy {
  84. KFD_SCHED_POLICY_HWS = 0,
  85. KFD_SCHED_POLICY_HWS_NO_OVERSUBSCRIPTION,
  86. KFD_SCHED_POLICY_NO_HWS
  87. };
  88. enum cache_policy {
  89. cache_policy_coherent,
  90. cache_policy_noncoherent
  91. };
  92. enum asic_family_type {
  93. CHIP_KAVERI = 0,
  94. CHIP_CARRIZO
  95. };
  96. struct kfd_device_info {
  97. unsigned int asic_family;
  98. unsigned int max_pasid_bits;
  99. size_t ih_ring_entry_size;
  100. uint8_t num_of_watch_points;
  101. uint16_t mqd_size_aligned;
  102. };
  103. struct kfd_mem_obj {
  104. uint32_t range_start;
  105. uint32_t range_end;
  106. uint64_t gpu_addr;
  107. uint32_t *cpu_ptr;
  108. };
  109. struct kfd_dev {
  110. struct kgd_dev *kgd;
  111. const struct kfd_device_info *device_info;
  112. struct pci_dev *pdev;
  113. unsigned int id; /* topology stub index */
  114. phys_addr_t doorbell_base; /* Start of actual doorbells used by
  115. * KFD. It is aligned for mapping
  116. * into user mode
  117. */
  118. size_t doorbell_id_offset; /* Doorbell offset (from KFD doorbell
  119. * to HW doorbell, GFX reserved some
  120. * at the start)
  121. */
  122. size_t doorbell_process_limit; /* Number of processes we have doorbell
  123. * space for.
  124. */
  125. u32 __iomem *doorbell_kernel_ptr; /* This is a pointer for a doorbells
  126. * page used by kernel queue
  127. */
  128. struct kgd2kfd_shared_resources shared_resources;
  129. const struct kfd2kgd_calls *kfd2kgd;
  130. struct mutex doorbell_mutex;
  131. unsigned long doorbell_available_index[DIV_ROUND_UP(
  132. KFD_MAX_NUM_OF_QUEUES_PER_PROCESS, BITS_PER_LONG)];
  133. void *gtt_mem;
  134. uint64_t gtt_start_gpu_addr;
  135. void *gtt_start_cpu_ptr;
  136. void *gtt_sa_bitmap;
  137. struct mutex gtt_sa_lock;
  138. unsigned int gtt_sa_chunk_size;
  139. unsigned int gtt_sa_num_of_chunks;
  140. /* QCM Device instance */
  141. struct device_queue_manager *dqm;
  142. bool init_complete;
  143. };
  144. /* KGD2KFD callbacks */
  145. void kgd2kfd_exit(void);
  146. struct kfd_dev *kgd2kfd_probe(struct kgd_dev *kgd,
  147. struct pci_dev *pdev, const struct kfd2kgd_calls *f2g);
  148. bool kgd2kfd_device_init(struct kfd_dev *kfd,
  149. const struct kgd2kfd_shared_resources *gpu_resources);
  150. void kgd2kfd_device_exit(struct kfd_dev *kfd);
  151. enum kfd_mempool {
  152. KFD_MEMPOOL_SYSTEM_CACHEABLE = 1,
  153. KFD_MEMPOOL_SYSTEM_WRITECOMBINE = 2,
  154. KFD_MEMPOOL_FRAMEBUFFER = 3,
  155. };
  156. /* Character device interface */
  157. int kfd_chardev_init(void);
  158. void kfd_chardev_exit(void);
  159. struct device *kfd_chardev(void);
  160. /**
  161. * enum kfd_preempt_type_filter
  162. *
  163. * @KFD_PREEMPT_TYPE_FILTER_SINGLE_QUEUE: Preempts single queue.
  164. *
  165. * @KFD_PRERMPT_TYPE_FILTER_ALL_QUEUES: Preempts all queues in the
  166. * running queues list.
  167. *
  168. * @KFD_PRERMPT_TYPE_FILTER_BY_PASID: Preempts queues that belongs to
  169. * specific process.
  170. *
  171. */
  172. enum kfd_preempt_type_filter {
  173. KFD_PREEMPT_TYPE_FILTER_SINGLE_QUEUE,
  174. KFD_PREEMPT_TYPE_FILTER_ALL_QUEUES,
  175. KFD_PREEMPT_TYPE_FILTER_BY_PASID
  176. };
  177. enum kfd_preempt_type {
  178. KFD_PREEMPT_TYPE_WAVEFRONT,
  179. KFD_PREEMPT_TYPE_WAVEFRONT_RESET
  180. };
  181. /**
  182. * enum kfd_queue_type
  183. *
  184. * @KFD_QUEUE_TYPE_COMPUTE: Regular user mode queue type.
  185. *
  186. * @KFD_QUEUE_TYPE_SDMA: Sdma user mode queue type.
  187. *
  188. * @KFD_QUEUE_TYPE_HIQ: HIQ queue type.
  189. *
  190. * @KFD_QUEUE_TYPE_DIQ: DIQ queue type.
  191. */
  192. enum kfd_queue_type {
  193. KFD_QUEUE_TYPE_COMPUTE,
  194. KFD_QUEUE_TYPE_SDMA,
  195. KFD_QUEUE_TYPE_HIQ,
  196. KFD_QUEUE_TYPE_DIQ
  197. };
  198. enum kfd_queue_format {
  199. KFD_QUEUE_FORMAT_PM4,
  200. KFD_QUEUE_FORMAT_AQL
  201. };
  202. /**
  203. * struct queue_properties
  204. *
  205. * @type: The queue type.
  206. *
  207. * @queue_id: Queue identifier.
  208. *
  209. * @queue_address: Queue ring buffer address.
  210. *
  211. * @queue_size: Queue ring buffer size.
  212. *
  213. * @priority: Defines the queue priority relative to other queues in the
  214. * process.
  215. * This is just an indication and HW scheduling may override the priority as
  216. * necessary while keeping the relative prioritization.
  217. * the priority granularity is from 0 to f which f is the highest priority.
  218. * currently all queues are initialized with the highest priority.
  219. *
  220. * @queue_percent: This field is partially implemented and currently a zero in
  221. * this field defines that the queue is non active.
  222. *
  223. * @read_ptr: User space address which points to the number of dwords the
  224. * cp read from the ring buffer. This field updates automatically by the H/W.
  225. *
  226. * @write_ptr: Defines the number of dwords written to the ring buffer.
  227. *
  228. * @doorbell_ptr: This field aim is to notify the H/W of new packet written to
  229. * the queue ring buffer. This field should be similar to write_ptr and the user
  230. * should update this field after he updated the write_ptr.
  231. *
  232. * @doorbell_off: The doorbell offset in the doorbell pci-bar.
  233. *
  234. * @is_interop: Defines if this is a interop queue. Interop queue means that the
  235. * queue can access both graphics and compute resources.
  236. *
  237. * @is_active: Defines if the queue is active or not.
  238. *
  239. * @vmid: If the scheduling mode is no cp scheduling the field defines the vmid
  240. * of the queue.
  241. *
  242. * This structure represents the queue properties for each queue no matter if
  243. * it's user mode or kernel mode queue.
  244. *
  245. */
  246. struct queue_properties {
  247. enum kfd_queue_type type;
  248. enum kfd_queue_format format;
  249. unsigned int queue_id;
  250. uint64_t queue_address;
  251. uint64_t queue_size;
  252. uint32_t priority;
  253. uint32_t queue_percent;
  254. uint32_t *read_ptr;
  255. uint32_t *write_ptr;
  256. uint32_t __iomem *doorbell_ptr;
  257. uint32_t doorbell_off;
  258. bool is_interop;
  259. bool is_active;
  260. /* Not relevant for user mode queues in cp scheduling */
  261. unsigned int vmid;
  262. /* Relevant only for sdma queues*/
  263. uint32_t sdma_engine_id;
  264. uint32_t sdma_queue_id;
  265. uint32_t sdma_vm_addr;
  266. /* Relevant only for VI */
  267. uint64_t eop_ring_buffer_address;
  268. uint32_t eop_ring_buffer_size;
  269. uint64_t ctx_save_restore_area_address;
  270. uint32_t ctx_save_restore_area_size;
  271. };
  272. /**
  273. * struct queue
  274. *
  275. * @list: Queue linked list.
  276. *
  277. * @mqd: The queue MQD.
  278. *
  279. * @mqd_mem_obj: The MQD local gpu memory object.
  280. *
  281. * @gart_mqd_addr: The MQD gart mc address.
  282. *
  283. * @properties: The queue properties.
  284. *
  285. * @mec: Used only in no cp scheduling mode and identifies to micro engine id
  286. * that the queue should be execute on.
  287. *
  288. * @pipe: Used only in no cp scheduling mode and identifies the queue's pipe id.
  289. *
  290. * @queue: Used only in no cp scheduliong mode and identifies the queue's slot.
  291. *
  292. * @process: The kfd process that created this queue.
  293. *
  294. * @device: The kfd device that created this queue.
  295. *
  296. * This structure represents user mode compute queues.
  297. * It contains all the necessary data to handle such queues.
  298. *
  299. */
  300. struct queue {
  301. struct list_head list;
  302. void *mqd;
  303. struct kfd_mem_obj *mqd_mem_obj;
  304. uint64_t gart_mqd_addr;
  305. struct queue_properties properties;
  306. uint32_t mec;
  307. uint32_t pipe;
  308. uint32_t queue;
  309. unsigned int sdma_id;
  310. struct kfd_process *process;
  311. struct kfd_dev *device;
  312. };
  313. /*
  314. * Please read the kfd_mqd_manager.h description.
  315. */
  316. enum KFD_MQD_TYPE {
  317. KFD_MQD_TYPE_COMPUTE = 0, /* for no cp scheduling */
  318. KFD_MQD_TYPE_HIQ, /* for hiq */
  319. KFD_MQD_TYPE_CP, /* for cp queues and diq */
  320. KFD_MQD_TYPE_SDMA, /* for sdma queues */
  321. KFD_MQD_TYPE_MAX
  322. };
  323. struct scheduling_resources {
  324. unsigned int vmid_mask;
  325. enum kfd_queue_type type;
  326. uint64_t queue_mask;
  327. uint64_t gws_mask;
  328. uint32_t oac_mask;
  329. uint32_t gds_heap_base;
  330. uint32_t gds_heap_size;
  331. };
  332. struct process_queue_manager {
  333. /* data */
  334. struct kfd_process *process;
  335. unsigned int num_concurrent_processes;
  336. struct list_head queues;
  337. unsigned long *queue_slot_bitmap;
  338. };
  339. struct qcm_process_device {
  340. /* The Device Queue Manager that owns this data */
  341. struct device_queue_manager *dqm;
  342. struct process_queue_manager *pqm;
  343. /* Queues list */
  344. struct list_head queues_list;
  345. struct list_head priv_queue_list;
  346. unsigned int queue_count;
  347. unsigned int vmid;
  348. bool is_debug;
  349. /*
  350. * All the memory management data should be here too
  351. */
  352. uint64_t gds_context_area;
  353. uint32_t sh_mem_config;
  354. uint32_t sh_mem_bases;
  355. uint32_t sh_mem_ape1_base;
  356. uint32_t sh_mem_ape1_limit;
  357. uint32_t page_table_base;
  358. uint32_t gds_size;
  359. uint32_t num_gws;
  360. uint32_t num_oac;
  361. };
  362. /* Data that is per-process-per device. */
  363. struct kfd_process_device {
  364. /*
  365. * List of all per-device data for a process.
  366. * Starts from kfd_process.per_device_data.
  367. */
  368. struct list_head per_device_list;
  369. /* The device that owns this data. */
  370. struct kfd_dev *dev;
  371. /* per-process-per device QCM data structure */
  372. struct qcm_process_device qpd;
  373. /*Apertures*/
  374. uint64_t lds_base;
  375. uint64_t lds_limit;
  376. uint64_t gpuvm_base;
  377. uint64_t gpuvm_limit;
  378. uint64_t scratch_base;
  379. uint64_t scratch_limit;
  380. /* Is this process/pasid bound to this device? (amd_iommu_bind_pasid) */
  381. bool bound;
  382. };
  383. #define qpd_to_pdd(x) container_of(x, struct kfd_process_device, qpd)
  384. /* Process data */
  385. struct kfd_process {
  386. /*
  387. * kfd_process are stored in an mm_struct*->kfd_process*
  388. * hash table (kfd_processes in kfd_process.c)
  389. */
  390. struct hlist_node kfd_processes;
  391. struct mm_struct *mm;
  392. struct mutex mutex;
  393. /*
  394. * In any process, the thread that started main() is the lead
  395. * thread and outlives the rest.
  396. * It is here because amd_iommu_bind_pasid wants a task_struct.
  397. */
  398. struct task_struct *lead_thread;
  399. /* We want to receive a notification when the mm_struct is destroyed */
  400. struct mmu_notifier mmu_notifier;
  401. /* Use for delayed freeing of kfd_process structure */
  402. struct rcu_head rcu;
  403. unsigned int pasid;
  404. /*
  405. * List of kfd_process_device structures,
  406. * one for each device the process is using.
  407. */
  408. struct list_head per_device_data;
  409. struct process_queue_manager pqm;
  410. /* The process's queues. */
  411. size_t queue_array_size;
  412. /* Size is queue_array_size, up to MAX_PROCESS_QUEUES. */
  413. struct kfd_queue **queues;
  414. unsigned long allocated_queue_bitmap[DIV_ROUND_UP(KFD_MAX_NUM_OF_QUEUES_PER_PROCESS, BITS_PER_LONG)];
  415. /*Is the user space process 32 bit?*/
  416. bool is_32bit_user_mode;
  417. };
  418. /**
  419. * Ioctl function type.
  420. *
  421. * \param filep pointer to file structure.
  422. * \param p amdkfd process pointer.
  423. * \param data pointer to arg that was copied from user.
  424. */
  425. typedef int amdkfd_ioctl_t(struct file *filep, struct kfd_process *p,
  426. void *data);
  427. struct amdkfd_ioctl_desc {
  428. unsigned int cmd;
  429. int flags;
  430. amdkfd_ioctl_t *func;
  431. unsigned int cmd_drv;
  432. const char *name;
  433. };
  434. void kfd_process_create_wq(void);
  435. void kfd_process_destroy_wq(void);
  436. struct kfd_process *kfd_create_process(const struct task_struct *);
  437. struct kfd_process *kfd_get_process(const struct task_struct *);
  438. struct kfd_process_device *kfd_bind_process_to_device(struct kfd_dev *dev,
  439. struct kfd_process *p);
  440. void kfd_unbind_process_from_device(struct kfd_dev *dev, unsigned int pasid);
  441. struct kfd_process_device *kfd_get_process_device_data(struct kfd_dev *dev,
  442. struct kfd_process *p);
  443. struct kfd_process_device *kfd_create_process_device_data(struct kfd_dev *dev,
  444. struct kfd_process *p);
  445. /* Process device data iterator */
  446. struct kfd_process_device *kfd_get_first_process_device_data(struct kfd_process *p);
  447. struct kfd_process_device *kfd_get_next_process_device_data(struct kfd_process *p,
  448. struct kfd_process_device *pdd);
  449. bool kfd_has_process_device_data(struct kfd_process *p);
  450. /* PASIDs */
  451. int kfd_pasid_init(void);
  452. void kfd_pasid_exit(void);
  453. bool kfd_set_pasid_limit(unsigned int new_limit);
  454. unsigned int kfd_get_pasid_limit(void);
  455. unsigned int kfd_pasid_alloc(void);
  456. void kfd_pasid_free(unsigned int pasid);
  457. /* Doorbells */
  458. void kfd_doorbell_init(struct kfd_dev *kfd);
  459. int kfd_doorbell_mmap(struct kfd_process *process, struct vm_area_struct *vma);
  460. u32 __iomem *kfd_get_kernel_doorbell(struct kfd_dev *kfd,
  461. unsigned int *doorbell_off);
  462. void kfd_release_kernel_doorbell(struct kfd_dev *kfd, u32 __iomem *db_addr);
  463. u32 read_kernel_doorbell(u32 __iomem *db);
  464. void write_kernel_doorbell(u32 __iomem *db, u32 value);
  465. unsigned int kfd_queue_id_to_doorbell(struct kfd_dev *kfd,
  466. struct kfd_process *process,
  467. unsigned int queue_id);
  468. /* GTT Sub-Allocator */
  469. int kfd_gtt_sa_allocate(struct kfd_dev *kfd, unsigned int size,
  470. struct kfd_mem_obj **mem_obj);
  471. int kfd_gtt_sa_free(struct kfd_dev *kfd, struct kfd_mem_obj *mem_obj);
  472. extern struct device *kfd_device;
  473. /* Topology */
  474. int kfd_topology_init(void);
  475. void kfd_topology_shutdown(void);
  476. int kfd_topology_add_device(struct kfd_dev *gpu);
  477. int kfd_topology_remove_device(struct kfd_dev *gpu);
  478. struct kfd_dev *kfd_device_by_id(uint32_t gpu_id);
  479. struct kfd_dev *kfd_device_by_pci_dev(const struct pci_dev *pdev);
  480. struct kfd_dev *kfd_topology_enum_kfd_devices(uint8_t idx);
  481. /* Interrupts */
  482. void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry);
  483. /* Power Management */
  484. void kgd2kfd_suspend(struct kfd_dev *kfd);
  485. int kgd2kfd_resume(struct kfd_dev *kfd);
  486. /* amdkfd Apertures */
  487. int kfd_init_apertures(struct kfd_process *process);
  488. /* Queue Context Management */
  489. inline uint32_t lower_32(uint64_t x);
  490. inline uint32_t upper_32(uint64_t x);
  491. struct cik_sdma_rlc_registers *get_sdma_mqd(void *mqd);
  492. inline uint32_t get_sdma_base_addr(struct cik_sdma_rlc_registers *m);
  493. int init_queue(struct queue **q, struct queue_properties properties);
  494. void uninit_queue(struct queue *q);
  495. void print_queue_properties(struct queue_properties *q);
  496. void print_queue(struct queue *q);
  497. struct mqd_manager *mqd_manager_init(enum KFD_MQD_TYPE type,
  498. struct kfd_dev *dev);
  499. struct mqd_manager *mqd_manager_init_cik(enum KFD_MQD_TYPE type,
  500. struct kfd_dev *dev);
  501. struct mqd_manager *mqd_manager_init_vi(enum KFD_MQD_TYPE type,
  502. struct kfd_dev *dev);
  503. struct device_queue_manager *device_queue_manager_init(struct kfd_dev *dev);
  504. void device_queue_manager_uninit(struct device_queue_manager *dqm);
  505. struct kernel_queue *kernel_queue_init(struct kfd_dev *dev,
  506. enum kfd_queue_type type);
  507. void kernel_queue_uninit(struct kernel_queue *kq);
  508. /* Process Queue Manager */
  509. struct process_queue_node {
  510. struct queue *q;
  511. struct kernel_queue *kq;
  512. struct list_head process_queue_list;
  513. };
  514. int pqm_init(struct process_queue_manager *pqm, struct kfd_process *p);
  515. void pqm_uninit(struct process_queue_manager *pqm);
  516. int pqm_create_queue(struct process_queue_manager *pqm,
  517. struct kfd_dev *dev,
  518. struct file *f,
  519. struct queue_properties *properties,
  520. unsigned int flags,
  521. enum kfd_queue_type type,
  522. unsigned int *qid);
  523. int pqm_destroy_queue(struct process_queue_manager *pqm, unsigned int qid);
  524. int pqm_update_queue(struct process_queue_manager *pqm, unsigned int qid,
  525. struct queue_properties *p);
  526. /* Packet Manager */
  527. #define KFD_HIQ_TIMEOUT (500)
  528. #define KFD_FENCE_COMPLETED (100)
  529. #define KFD_FENCE_INIT (10)
  530. #define KFD_UNMAP_LATENCY (150)
  531. struct packet_manager {
  532. struct device_queue_manager *dqm;
  533. struct kernel_queue *priv_queue;
  534. struct mutex lock;
  535. bool allocated;
  536. struct kfd_mem_obj *ib_buffer_obj;
  537. };
  538. int pm_init(struct packet_manager *pm, struct device_queue_manager *dqm);
  539. void pm_uninit(struct packet_manager *pm);
  540. int pm_send_set_resources(struct packet_manager *pm,
  541. struct scheduling_resources *res);
  542. int pm_send_runlist(struct packet_manager *pm, struct list_head *dqm_queues);
  543. int pm_send_query_status(struct packet_manager *pm, uint64_t fence_address,
  544. uint32_t fence_value);
  545. int pm_send_unmap_queue(struct packet_manager *pm, enum kfd_queue_type type,
  546. enum kfd_preempt_type_filter mode,
  547. uint32_t filter_param, bool reset,
  548. unsigned int sdma_engine);
  549. void pm_release_ib(struct packet_manager *pm);
  550. uint64_t kfd_get_number_elems(struct kfd_dev *kfd);
  551. phys_addr_t kfd_get_process_doorbells(struct kfd_dev *dev,
  552. struct kfd_process *process);
  553. #endif