kfd_priv.h 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600
  1. /*
  2. * Copyright 2014 Advanced Micro Devices, Inc.
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice shall be included in
  12. * all copies or substantial portions of the Software.
  13. *
  14. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  17. * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18. * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19. * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20. * OTHER DEALINGS IN THE SOFTWARE.
  21. */
  22. #ifndef KFD_PRIV_H_INCLUDED
  23. #define KFD_PRIV_H_INCLUDED
  24. #include <linux/hashtable.h>
  25. #include <linux/mmu_notifier.h>
  26. #include <linux/mutex.h>
  27. #include <linux/types.h>
  28. #include <linux/atomic.h>
  29. #include <linux/workqueue.h>
  30. #include <linux/spinlock.h>
  31. #include <linux/kfd_ioctl.h>
  32. #include <kgd_kfd_interface.h>
  33. #define KFD_SYSFS_FILE_MODE 0444
  34. /*
  35. * When working with cp scheduler we should assign the HIQ manually or via
  36. * the radeon driver to a fixed hqd slot, here are the fixed HIQ hqd slot
  37. * definitions for Kaveri. In Kaveri only the first ME queues participates
  38. * in the cp scheduling taking that in mind we set the HIQ slot in the
  39. * second ME.
  40. */
  41. #define KFD_CIK_HIQ_PIPE 4
  42. #define KFD_CIK_HIQ_QUEUE 0
  43. /* GPU ID hash width in bits */
  44. #define KFD_GPU_ID_HASH_WIDTH 16
  45. /* Macro for allocating structures */
  46. #define kfd_alloc_struct(ptr_to_struct) \
  47. ((typeof(ptr_to_struct)) kzalloc(sizeof(*ptr_to_struct), GFP_KERNEL))
  48. /* Kernel module parameter to specify maximum number of supported processes */
  49. extern int max_num_of_processes;
  50. #define KFD_MAX_NUM_OF_PROCESSES_DEFAULT 32
  51. #define KFD_MAX_NUM_OF_PROCESSES 512
  52. /*
  53. * Kernel module parameter to specify maximum number of supported queues
  54. * per process
  55. */
  56. extern int max_num_of_queues_per_process;
  57. #define KFD_MAX_NUM_OF_QUEUES_PER_PROCESS_DEFAULT 128
  58. #define KFD_MAX_NUM_OF_QUEUES_PER_PROCESS 1024
  59. #define KFD_KERNEL_QUEUE_SIZE 2048
  60. /* Kernel module parameter to specify the scheduling policy */
  61. extern int sched_policy;
  62. /**
  63. * enum kfd_sched_policy
  64. *
  65. * @KFD_SCHED_POLICY_HWS: H/W scheduling policy known as command processor (cp)
  66. * scheduling. In this scheduling mode we're using the firmware code to
  67. * schedule the user mode queues and kernel queues such as HIQ and DIQ.
  68. * the HIQ queue is used as a special queue that dispatches the configuration
  69. * to the cp and the user mode queues list that are currently running.
  70. * the DIQ queue is a debugging queue that dispatches debugging commands to the
  71. * firmware.
  72. * in this scheduling mode user mode queues over subscription feature is
  73. * enabled.
  74. *
  75. * @KFD_SCHED_POLICY_HWS_NO_OVERSUBSCRIPTION: The same as above but the over
  76. * subscription feature disabled.
  77. *
  78. * @KFD_SCHED_POLICY_NO_HWS: no H/W scheduling policy is a mode which directly
  79. * set the command processor registers and sets the queues "manually". This
  80. * mode is used *ONLY* for debugging proposes.
  81. *
  82. */
  83. enum kfd_sched_policy {
  84. KFD_SCHED_POLICY_HWS = 0,
  85. KFD_SCHED_POLICY_HWS_NO_OVERSUBSCRIPTION,
  86. KFD_SCHED_POLICY_NO_HWS
  87. };
  88. enum cache_policy {
  89. cache_policy_coherent,
  90. cache_policy_noncoherent
  91. };
  92. struct kfd_device_info {
  93. unsigned int max_pasid_bits;
  94. size_t ih_ring_entry_size;
  95. uint16_t mqd_size_aligned;
  96. };
  97. struct kfd_dev {
  98. struct kgd_dev *kgd;
  99. const struct kfd_device_info *device_info;
  100. struct pci_dev *pdev;
  101. unsigned int id; /* topology stub index */
  102. phys_addr_t doorbell_base; /* Start of actual doorbells used by
  103. * KFD. It is aligned for mapping
  104. * into user mode
  105. */
  106. size_t doorbell_id_offset; /* Doorbell offset (from KFD doorbell
  107. * to HW doorbell, GFX reserved some
  108. * at the start)
  109. */
  110. size_t doorbell_process_limit; /* Number of processes we have doorbell
  111. * space for.
  112. */
  113. u32 __iomem *doorbell_kernel_ptr; /* This is a pointer for a doorbells
  114. * page used by kernel queue
  115. */
  116. struct kgd2kfd_shared_resources shared_resources;
  117. void *interrupt_ring;
  118. size_t interrupt_ring_size;
  119. atomic_t interrupt_ring_rptr;
  120. atomic_t interrupt_ring_wptr;
  121. struct work_struct interrupt_work;
  122. spinlock_t interrupt_lock;
  123. /* QCM Device instance */
  124. struct device_queue_manager *dqm;
  125. bool init_complete;
  126. /*
  127. * Interrupts of interest to KFD are copied
  128. * from the HW ring into a SW ring.
  129. */
  130. bool interrupts_active;
  131. };
  132. /* KGD2KFD callbacks */
  133. void kgd2kfd_exit(void);
  134. struct kfd_dev *kgd2kfd_probe(struct kgd_dev *kgd, struct pci_dev *pdev);
  135. bool kgd2kfd_device_init(struct kfd_dev *kfd,
  136. const struct kgd2kfd_shared_resources *gpu_resources);
  137. void kgd2kfd_device_exit(struct kfd_dev *kfd);
  138. extern const struct kfd2kgd_calls *kfd2kgd;
  139. struct kfd_mem_obj {
  140. void *bo;
  141. uint64_t gpu_addr;
  142. uint32_t *cpu_ptr;
  143. };
  144. enum kfd_mempool {
  145. KFD_MEMPOOL_SYSTEM_CACHEABLE = 1,
  146. KFD_MEMPOOL_SYSTEM_WRITECOMBINE = 2,
  147. KFD_MEMPOOL_FRAMEBUFFER = 3,
  148. };
  149. /* Character device interface */
  150. int kfd_chardev_init(void);
  151. void kfd_chardev_exit(void);
  152. struct device *kfd_chardev(void);
  153. /**
  154. * enum kfd_preempt_type_filter
  155. *
  156. * @KFD_PREEMPT_TYPE_FILTER_SINGLE_QUEUE: Preempts single queue.
  157. *
  158. * @KFD_PRERMPT_TYPE_FILTER_ALL_QUEUES: Preempts all queues in the
  159. * running queues list.
  160. *
  161. * @KFD_PRERMPT_TYPE_FILTER_BY_PASID: Preempts queues that belongs to
  162. * specific process.
  163. *
  164. */
  165. enum kfd_preempt_type_filter {
  166. KFD_PREEMPT_TYPE_FILTER_SINGLE_QUEUE,
  167. KFD_PREEMPT_TYPE_FILTER_ALL_QUEUES,
  168. KFD_PREEMPT_TYPE_FILTER_BY_PASID
  169. };
  170. enum kfd_preempt_type {
  171. KFD_PREEMPT_TYPE_WAVEFRONT,
  172. KFD_PREEMPT_TYPE_WAVEFRONT_RESET
  173. };
  174. /**
  175. * enum kfd_queue_type
  176. *
  177. * @KFD_QUEUE_TYPE_COMPUTE: Regular user mode queue type.
  178. *
  179. * @KFD_QUEUE_TYPE_SDMA: Sdma user mode queue type.
  180. *
  181. * @KFD_QUEUE_TYPE_HIQ: HIQ queue type.
  182. *
  183. * @KFD_QUEUE_TYPE_DIQ: DIQ queue type.
  184. */
  185. enum kfd_queue_type {
  186. KFD_QUEUE_TYPE_COMPUTE,
  187. KFD_QUEUE_TYPE_SDMA,
  188. KFD_QUEUE_TYPE_HIQ,
  189. KFD_QUEUE_TYPE_DIQ
  190. };
  191. enum kfd_queue_format {
  192. KFD_QUEUE_FORMAT_PM4,
  193. KFD_QUEUE_FORMAT_AQL
  194. };
  195. /**
  196. * struct queue_properties
  197. *
  198. * @type: The queue type.
  199. *
  200. * @queue_id: Queue identifier.
  201. *
  202. * @queue_address: Queue ring buffer address.
  203. *
  204. * @queue_size: Queue ring buffer size.
  205. *
  206. * @priority: Defines the queue priority relative to other queues in the
  207. * process.
  208. * This is just an indication and HW scheduling may override the priority as
  209. * necessary while keeping the relative prioritization.
  210. * the priority granularity is from 0 to f which f is the highest priority.
  211. * currently all queues are initialized with the highest priority.
  212. *
  213. * @queue_percent: This field is partially implemented and currently a zero in
  214. * this field defines that the queue is non active.
  215. *
  216. * @read_ptr: User space address which points to the number of dwords the
  217. * cp read from the ring buffer. This field updates automatically by the H/W.
  218. *
  219. * @write_ptr: Defines the number of dwords written to the ring buffer.
  220. *
  221. * @doorbell_ptr: This field aim is to notify the H/W of new packet written to
  222. * the queue ring buffer. This field should be similar to write_ptr and the user
  223. * should update this field after he updated the write_ptr.
  224. *
  225. * @doorbell_off: The doorbell offset in the doorbell pci-bar.
  226. *
  227. * @is_interop: Defines if this is a interop queue. Interop queue means that the
  228. * queue can access both graphics and compute resources.
  229. *
  230. * @is_active: Defines if the queue is active or not.
  231. *
  232. * @vmid: If the scheduling mode is no cp scheduling the field defines the vmid
  233. * of the queue.
  234. *
  235. * This structure represents the queue properties for each queue no matter if
  236. * it's user mode or kernel mode queue.
  237. *
  238. */
  239. struct queue_properties {
  240. enum kfd_queue_type type;
  241. enum kfd_queue_format format;
  242. unsigned int queue_id;
  243. uint64_t queue_address;
  244. uint64_t queue_size;
  245. uint32_t priority;
  246. uint32_t queue_percent;
  247. uint32_t *read_ptr;
  248. uint32_t *write_ptr;
  249. uint32_t __iomem *doorbell_ptr;
  250. uint32_t doorbell_off;
  251. bool is_interop;
  252. bool is_active;
  253. /* Not relevant for user mode queues in cp scheduling */
  254. unsigned int vmid;
  255. };
  256. /**
  257. * struct queue
  258. *
  259. * @list: Queue linked list.
  260. *
  261. * @mqd: The queue MQD.
  262. *
  263. * @mqd_mem_obj: The MQD local gpu memory object.
  264. *
  265. * @gart_mqd_addr: The MQD gart mc address.
  266. *
  267. * @properties: The queue properties.
  268. *
  269. * @mec: Used only in no cp scheduling mode and identifies to micro engine id
  270. * that the queue should be execute on.
  271. *
  272. * @pipe: Used only in no cp scheduling mode and identifies the queue's pipe id.
  273. *
  274. * @queue: Used only in no cp scheduliong mode and identifies the queue's slot.
  275. *
  276. * @process: The kfd process that created this queue.
  277. *
  278. * @device: The kfd device that created this queue.
  279. *
  280. * This structure represents user mode compute queues.
  281. * It contains all the necessary data to handle such queues.
  282. *
  283. */
  284. struct queue {
  285. struct list_head list;
  286. void *mqd;
  287. struct kfd_mem_obj *mqd_mem_obj;
  288. uint64_t gart_mqd_addr;
  289. struct queue_properties properties;
  290. uint32_t mec;
  291. uint32_t pipe;
  292. uint32_t queue;
  293. struct kfd_process *process;
  294. struct kfd_dev *device;
  295. };
  296. /*
  297. * Please read the kfd_mqd_manager.h description.
  298. */
  299. enum KFD_MQD_TYPE {
  300. KFD_MQD_TYPE_CIK_COMPUTE = 0, /* for no cp scheduling */
  301. KFD_MQD_TYPE_CIK_HIQ, /* for hiq */
  302. KFD_MQD_TYPE_CIK_CP, /* for cp queues and diq */
  303. KFD_MQD_TYPE_CIK_SDMA, /* for sdma queues */
  304. KFD_MQD_TYPE_MAX
  305. };
  306. struct scheduling_resources {
  307. unsigned int vmid_mask;
  308. enum kfd_queue_type type;
  309. uint64_t queue_mask;
  310. uint64_t gws_mask;
  311. uint32_t oac_mask;
  312. uint32_t gds_heap_base;
  313. uint32_t gds_heap_size;
  314. };
  315. struct process_queue_manager {
  316. /* data */
  317. struct kfd_process *process;
  318. unsigned int num_concurrent_processes;
  319. struct list_head queues;
  320. unsigned long *queue_slot_bitmap;
  321. };
  322. struct qcm_process_device {
  323. /* The Device Queue Manager that owns this data */
  324. struct device_queue_manager *dqm;
  325. struct process_queue_manager *pqm;
  326. /* Device Queue Manager lock */
  327. struct mutex *lock;
  328. /* Queues list */
  329. struct list_head queues_list;
  330. struct list_head priv_queue_list;
  331. unsigned int queue_count;
  332. unsigned int vmid;
  333. bool is_debug;
  334. /*
  335. * All the memory management data should be here too
  336. */
  337. uint64_t gds_context_area;
  338. uint32_t sh_mem_config;
  339. uint32_t sh_mem_bases;
  340. uint32_t sh_mem_ape1_base;
  341. uint32_t sh_mem_ape1_limit;
  342. uint32_t page_table_base;
  343. uint32_t gds_size;
  344. uint32_t num_gws;
  345. uint32_t num_oac;
  346. };
  347. /* Data that is per-process-per device. */
  348. struct kfd_process_device {
  349. /*
  350. * List of all per-device data for a process.
  351. * Starts from kfd_process.per_device_data.
  352. */
  353. struct list_head per_device_list;
  354. /* The device that owns this data. */
  355. struct kfd_dev *dev;
  356. /* per-process-per device QCM data structure */
  357. struct qcm_process_device qpd;
  358. /*Apertures*/
  359. uint64_t lds_base;
  360. uint64_t lds_limit;
  361. uint64_t gpuvm_base;
  362. uint64_t gpuvm_limit;
  363. uint64_t scratch_base;
  364. uint64_t scratch_limit;
  365. /* Is this process/pasid bound to this device? (amd_iommu_bind_pasid) */
  366. bool bound;
  367. };
  368. #define qpd_to_pdd(x) container_of(x, struct kfd_process_device, qpd)
  369. /* Process data */
  370. struct kfd_process {
  371. /*
  372. * kfd_process are stored in an mm_struct*->kfd_process*
  373. * hash table (kfd_processes in kfd_process.c)
  374. */
  375. struct hlist_node kfd_processes;
  376. struct mm_struct *mm;
  377. struct mutex mutex;
  378. /*
  379. * In any process, the thread that started main() is the lead
  380. * thread and outlives the rest.
  381. * It is here because amd_iommu_bind_pasid wants a task_struct.
  382. */
  383. struct task_struct *lead_thread;
  384. /* We want to receive a notification when the mm_struct is destroyed */
  385. struct mmu_notifier mmu_notifier;
  386. /* Use for delayed freeing of kfd_process structure */
  387. struct rcu_head rcu;
  388. unsigned int pasid;
  389. /*
  390. * List of kfd_process_device structures,
  391. * one for each device the process is using.
  392. */
  393. struct list_head per_device_data;
  394. struct process_queue_manager pqm;
  395. /* The process's queues. */
  396. size_t queue_array_size;
  397. /* Size is queue_array_size, up to MAX_PROCESS_QUEUES. */
  398. struct kfd_queue **queues;
  399. unsigned long allocated_queue_bitmap[DIV_ROUND_UP(KFD_MAX_NUM_OF_QUEUES_PER_PROCESS, BITS_PER_LONG)];
  400. /*Is the user space process 32 bit?*/
  401. bool is_32bit_user_mode;
  402. };
  403. void kfd_process_create_wq(void);
  404. void kfd_process_destroy_wq(void);
  405. struct kfd_process *kfd_create_process(const struct task_struct *);
  406. struct kfd_process *kfd_get_process(const struct task_struct *);
  407. struct kfd_process_device *kfd_bind_process_to_device(struct kfd_dev *dev,
  408. struct kfd_process *p);
  409. void kfd_unbind_process_from_device(struct kfd_dev *dev, unsigned int pasid);
  410. struct kfd_process_device *kfd_get_process_device_data(struct kfd_dev *dev,
  411. struct kfd_process *p,
  412. int create_pdd);
  413. /* Process device data iterator */
  414. struct kfd_process_device *kfd_get_first_process_device_data(struct kfd_process *p);
  415. struct kfd_process_device *kfd_get_next_process_device_data(struct kfd_process *p,
  416. struct kfd_process_device *pdd);
  417. bool kfd_has_process_device_data(struct kfd_process *p);
  418. /* PASIDs */
  419. int kfd_pasid_init(void);
  420. void kfd_pasid_exit(void);
  421. bool kfd_set_pasid_limit(unsigned int new_limit);
  422. unsigned int kfd_get_pasid_limit(void);
  423. unsigned int kfd_pasid_alloc(void);
  424. void kfd_pasid_free(unsigned int pasid);
  425. /* Doorbells */
  426. void kfd_doorbell_init(struct kfd_dev *kfd);
  427. int kfd_doorbell_mmap(struct kfd_process *process, struct vm_area_struct *vma);
  428. u32 __iomem *kfd_get_kernel_doorbell(struct kfd_dev *kfd,
  429. unsigned int *doorbell_off);
  430. void kfd_release_kernel_doorbell(struct kfd_dev *kfd, u32 __iomem *db_addr);
  431. u32 read_kernel_doorbell(u32 __iomem *db);
  432. void write_kernel_doorbell(u32 __iomem *db, u32 value);
  433. unsigned int kfd_queue_id_to_doorbell(struct kfd_dev *kfd,
  434. struct kfd_process *process,
  435. unsigned int queue_id);
  436. extern struct device *kfd_device;
  437. /* Topology */
  438. int kfd_topology_init(void);
  439. void kfd_topology_shutdown(void);
  440. int kfd_topology_add_device(struct kfd_dev *gpu);
  441. int kfd_topology_remove_device(struct kfd_dev *gpu);
  442. struct kfd_dev *kfd_device_by_id(uint32_t gpu_id);
  443. struct kfd_dev *kfd_device_by_pci_dev(const struct pci_dev *pdev);
  444. struct kfd_dev *kfd_topology_enum_kfd_devices(uint8_t idx);
  445. /* Interrupts */
  446. int kfd_interrupt_init(struct kfd_dev *dev);
  447. void kfd_interrupt_exit(struct kfd_dev *dev);
  448. void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry);
  449. bool enqueue_ih_ring_entry(struct kfd_dev *kfd, const void *ih_ring_entry);
  450. /* Power Management */
  451. void kgd2kfd_suspend(struct kfd_dev *kfd);
  452. int kgd2kfd_resume(struct kfd_dev *kfd);
  453. /* amdkfd Apertures */
  454. int kfd_init_apertures(struct kfd_process *process);
  455. /* Queue Context Management */
  456. inline uint32_t lower_32(uint64_t x);
  457. inline uint32_t upper_32(uint64_t x);
  458. int init_queue(struct queue **q, struct queue_properties properties);
  459. void uninit_queue(struct queue *q);
  460. void print_queue_properties(struct queue_properties *q);
  461. void print_queue(struct queue *q);
  462. struct mqd_manager *mqd_manager_init(enum KFD_MQD_TYPE type,
  463. struct kfd_dev *dev);
  464. struct device_queue_manager *device_queue_manager_init(struct kfd_dev *dev);
  465. void device_queue_manager_uninit(struct device_queue_manager *dqm);
  466. struct kernel_queue *kernel_queue_init(struct kfd_dev *dev,
  467. enum kfd_queue_type type);
  468. void kernel_queue_uninit(struct kernel_queue *kq);
  469. /* Process Queue Manager */
  470. struct process_queue_node {
  471. struct queue *q;
  472. struct kernel_queue *kq;
  473. struct list_head process_queue_list;
  474. };
  475. int pqm_init(struct process_queue_manager *pqm, struct kfd_process *p);
  476. void pqm_uninit(struct process_queue_manager *pqm);
  477. int pqm_create_queue(struct process_queue_manager *pqm,
  478. struct kfd_dev *dev,
  479. struct file *f,
  480. struct queue_properties *properties,
  481. unsigned int flags,
  482. enum kfd_queue_type type,
  483. unsigned int *qid);
  484. int pqm_destroy_queue(struct process_queue_manager *pqm, unsigned int qid);
  485. int pqm_update_queue(struct process_queue_manager *pqm, unsigned int qid,
  486. struct queue_properties *p);
  487. /* Packet Manager */
  488. #define KFD_HIQ_TIMEOUT (500)
  489. #define KFD_FENCE_COMPLETED (100)
  490. #define KFD_FENCE_INIT (10)
  491. #define KFD_UNMAP_LATENCY (150)
  492. struct packet_manager {
  493. struct device_queue_manager *dqm;
  494. struct kernel_queue *priv_queue;
  495. struct mutex lock;
  496. bool allocated;
  497. struct kfd_mem_obj *ib_buffer_obj;
  498. };
  499. int pm_init(struct packet_manager *pm, struct device_queue_manager *dqm);
  500. void pm_uninit(struct packet_manager *pm);
  501. int pm_send_set_resources(struct packet_manager *pm,
  502. struct scheduling_resources *res);
  503. int pm_send_runlist(struct packet_manager *pm, struct list_head *dqm_queues);
  504. int pm_send_query_status(struct packet_manager *pm, uint64_t fence_address,
  505. uint32_t fence_value);
  506. int pm_send_unmap_queue(struct packet_manager *pm, enum kfd_queue_type type,
  507. enum kfd_preempt_type_filter mode,
  508. uint32_t filter_param, bool reset,
  509. unsigned int sdma_engine);
  510. void pm_release_ib(struct packet_manager *pm);
  511. uint64_t kfd_get_number_elems(struct kfd_dev *kfd);
  512. phys_addr_t kfd_get_process_doorbells(struct kfd_dev *dev,
  513. struct kfd_process *process);
  514. #endif