kfd_device_queue_manager.c 32 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292
  1. /*
  2. * Copyright 2014 Advanced Micro Devices, Inc.
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice shall be included in
  12. * all copies or substantial portions of the Software.
  13. *
  14. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  17. * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18. * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19. * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20. * OTHER DEALINGS IN THE SOFTWARE.
  21. *
  22. */
  23. #include <linux/slab.h>
  24. #include <linux/list.h>
  25. #include <linux/types.h>
  26. #include <linux/printk.h>
  27. #include <linux/bitops.h>
  28. #include <linux/sched.h>
  29. #include "kfd_priv.h"
  30. #include "kfd_device_queue_manager.h"
  31. #include "kfd_mqd_manager.h"
  32. #include "cik_regs.h"
  33. #include "kfd_kernel_queue.h"
  34. /* Size of the per-pipe EOP queue */
  35. #define CIK_HPD_EOP_BYTES_LOG2 11
  36. #define CIK_HPD_EOP_BYTES (1U << CIK_HPD_EOP_BYTES_LOG2)
  37. static int set_pasid_vmid_mapping(struct device_queue_manager *dqm,
  38. unsigned int pasid, unsigned int vmid);
  39. static int create_compute_queue_nocpsch(struct device_queue_manager *dqm,
  40. struct queue *q,
  41. struct qcm_process_device *qpd);
  42. static int execute_queues_cpsch(struct device_queue_manager *dqm,
  43. enum kfd_unmap_queues_filter filter,
  44. uint32_t filter_param);
  45. static int unmap_queues_cpsch(struct device_queue_manager *dqm,
  46. enum kfd_unmap_queues_filter filter,
  47. uint32_t filter_param);
  48. static int map_queues_cpsch(struct device_queue_manager *dqm);
  49. static int create_sdma_queue_nocpsch(struct device_queue_manager *dqm,
  50. struct queue *q,
  51. struct qcm_process_device *qpd);
  52. static void deallocate_sdma_queue(struct device_queue_manager *dqm,
  53. unsigned int sdma_queue_id);
  54. static inline
  55. enum KFD_MQD_TYPE get_mqd_type_from_queue_type(enum kfd_queue_type type)
  56. {
  57. if (type == KFD_QUEUE_TYPE_SDMA)
  58. return KFD_MQD_TYPE_SDMA;
  59. return KFD_MQD_TYPE_CP;
  60. }
  61. static bool is_pipe_enabled(struct device_queue_manager *dqm, int mec, int pipe)
  62. {
  63. int i;
  64. int pipe_offset = mec * dqm->dev->shared_resources.num_pipe_per_mec
  65. + pipe * dqm->dev->shared_resources.num_queue_per_pipe;
  66. /* queue is available for KFD usage if bit is 1 */
  67. for (i = 0; i < dqm->dev->shared_resources.num_queue_per_pipe; ++i)
  68. if (test_bit(pipe_offset + i,
  69. dqm->dev->shared_resources.queue_bitmap))
  70. return true;
  71. return false;
  72. }
  73. unsigned int get_queues_num(struct device_queue_manager *dqm)
  74. {
  75. return bitmap_weight(dqm->dev->shared_resources.queue_bitmap,
  76. KGD_MAX_QUEUES);
  77. }
  78. unsigned int get_queues_per_pipe(struct device_queue_manager *dqm)
  79. {
  80. return dqm->dev->shared_resources.num_queue_per_pipe;
  81. }
  82. unsigned int get_pipes_per_mec(struct device_queue_manager *dqm)
  83. {
  84. return dqm->dev->shared_resources.num_pipe_per_mec;
  85. }
  86. void program_sh_mem_settings(struct device_queue_manager *dqm,
  87. struct qcm_process_device *qpd)
  88. {
  89. return dqm->dev->kfd2kgd->program_sh_mem_settings(
  90. dqm->dev->kgd, qpd->vmid,
  91. qpd->sh_mem_config,
  92. qpd->sh_mem_ape1_base,
  93. qpd->sh_mem_ape1_limit,
  94. qpd->sh_mem_bases);
  95. }
  96. static int allocate_vmid(struct device_queue_manager *dqm,
  97. struct qcm_process_device *qpd,
  98. struct queue *q)
  99. {
  100. int bit, allocated_vmid;
  101. if (dqm->vmid_bitmap == 0)
  102. return -ENOMEM;
  103. bit = find_first_bit((unsigned long *)&dqm->vmid_bitmap,
  104. dqm->dev->vm_info.vmid_num_kfd);
  105. clear_bit(bit, (unsigned long *)&dqm->vmid_bitmap);
  106. allocated_vmid = bit + dqm->dev->vm_info.first_vmid_kfd;
  107. pr_debug("vmid allocation %d\n", allocated_vmid);
  108. qpd->vmid = allocated_vmid;
  109. q->properties.vmid = allocated_vmid;
  110. set_pasid_vmid_mapping(dqm, q->process->pasid, q->properties.vmid);
  111. program_sh_mem_settings(dqm, qpd);
  112. return 0;
  113. }
  114. static void deallocate_vmid(struct device_queue_manager *dqm,
  115. struct qcm_process_device *qpd,
  116. struct queue *q)
  117. {
  118. int bit = qpd->vmid - dqm->dev->vm_info.first_vmid_kfd;
  119. /* Release the vmid mapping */
  120. set_pasid_vmid_mapping(dqm, 0, qpd->vmid);
  121. set_bit(bit, (unsigned long *)&dqm->vmid_bitmap);
  122. qpd->vmid = 0;
  123. q->properties.vmid = 0;
  124. }
  125. static int create_queue_nocpsch(struct device_queue_manager *dqm,
  126. struct queue *q,
  127. struct qcm_process_device *qpd,
  128. int *allocated_vmid)
  129. {
  130. int retval;
  131. print_queue(q);
  132. mutex_lock(&dqm->lock);
  133. if (dqm->total_queue_count >= max_num_of_queues_per_device) {
  134. pr_warn("Can't create new usermode queue because %d queues were already created\n",
  135. dqm->total_queue_count);
  136. retval = -EPERM;
  137. goto out_unlock;
  138. }
  139. if (list_empty(&qpd->queues_list)) {
  140. retval = allocate_vmid(dqm, qpd, q);
  141. if (retval)
  142. goto out_unlock;
  143. }
  144. *allocated_vmid = qpd->vmid;
  145. q->properties.vmid = qpd->vmid;
  146. if (q->properties.type == KFD_QUEUE_TYPE_COMPUTE)
  147. retval = create_compute_queue_nocpsch(dqm, q, qpd);
  148. else if (q->properties.type == KFD_QUEUE_TYPE_SDMA)
  149. retval = create_sdma_queue_nocpsch(dqm, q, qpd);
  150. else
  151. retval = -EINVAL;
  152. if (retval) {
  153. if (list_empty(&qpd->queues_list)) {
  154. deallocate_vmid(dqm, qpd, q);
  155. *allocated_vmid = 0;
  156. }
  157. goto out_unlock;
  158. }
  159. list_add(&q->list, &qpd->queues_list);
  160. qpd->queue_count++;
  161. if (q->properties.is_active)
  162. dqm->queue_count++;
  163. if (q->properties.type == KFD_QUEUE_TYPE_SDMA)
  164. dqm->sdma_queue_count++;
  165. /*
  166. * Unconditionally increment this counter, regardless of the queue's
  167. * type or whether the queue is active.
  168. */
  169. dqm->total_queue_count++;
  170. pr_debug("Total of %d queues are accountable so far\n",
  171. dqm->total_queue_count);
  172. out_unlock:
  173. mutex_unlock(&dqm->lock);
  174. return retval;
  175. }
  176. static int allocate_hqd(struct device_queue_manager *dqm, struct queue *q)
  177. {
  178. bool set;
  179. int pipe, bit, i;
  180. set = false;
  181. for (pipe = dqm->next_pipe_to_allocate, i = 0;
  182. i < get_pipes_per_mec(dqm);
  183. pipe = ((pipe + 1) % get_pipes_per_mec(dqm)), ++i) {
  184. if (!is_pipe_enabled(dqm, 0, pipe))
  185. continue;
  186. if (dqm->allocated_queues[pipe] != 0) {
  187. bit = find_first_bit(
  188. (unsigned long *)&dqm->allocated_queues[pipe],
  189. get_queues_per_pipe(dqm));
  190. clear_bit(bit,
  191. (unsigned long *)&dqm->allocated_queues[pipe]);
  192. q->pipe = pipe;
  193. q->queue = bit;
  194. set = true;
  195. break;
  196. }
  197. }
  198. if (!set)
  199. return -EBUSY;
  200. pr_debug("hqd slot - pipe %d, queue %d\n", q->pipe, q->queue);
  201. /* horizontal hqd allocation */
  202. dqm->next_pipe_to_allocate = (pipe + 1) % get_pipes_per_mec(dqm);
  203. return 0;
  204. }
  205. static inline void deallocate_hqd(struct device_queue_manager *dqm,
  206. struct queue *q)
  207. {
  208. set_bit(q->queue, (unsigned long *)&dqm->allocated_queues[q->pipe]);
  209. }
  210. static int create_compute_queue_nocpsch(struct device_queue_manager *dqm,
  211. struct queue *q,
  212. struct qcm_process_device *qpd)
  213. {
  214. int retval;
  215. struct mqd_manager *mqd;
  216. mqd = dqm->ops.get_mqd_manager(dqm, KFD_MQD_TYPE_COMPUTE);
  217. if (!mqd)
  218. return -ENOMEM;
  219. retval = allocate_hqd(dqm, q);
  220. if (retval)
  221. return retval;
  222. retval = mqd->init_mqd(mqd, &q->mqd, &q->mqd_mem_obj,
  223. &q->gart_mqd_addr, &q->properties);
  224. if (retval)
  225. goto out_deallocate_hqd;
  226. pr_debug("Loading mqd to hqd on pipe %d, queue %d\n",
  227. q->pipe, q->queue);
  228. dqm->dev->kfd2kgd->set_scratch_backing_va(
  229. dqm->dev->kgd, qpd->sh_hidden_private_base, qpd->vmid);
  230. if (!q->properties.is_active)
  231. return 0;
  232. retval = mqd->load_mqd(mqd, q->mqd, q->pipe, q->queue, &q->properties,
  233. q->process->mm);
  234. if (retval)
  235. goto out_uninit_mqd;
  236. return 0;
  237. out_uninit_mqd:
  238. mqd->uninit_mqd(mqd, q->mqd, q->mqd_mem_obj);
  239. out_deallocate_hqd:
  240. deallocate_hqd(dqm, q);
  241. return retval;
  242. }
  243. /* Access to DQM has to be locked before calling destroy_queue_nocpsch_locked
  244. * to avoid asynchronized access
  245. */
  246. static int destroy_queue_nocpsch_locked(struct device_queue_manager *dqm,
  247. struct qcm_process_device *qpd,
  248. struct queue *q)
  249. {
  250. int retval;
  251. struct mqd_manager *mqd;
  252. mqd = dqm->ops.get_mqd_manager(dqm,
  253. get_mqd_type_from_queue_type(q->properties.type));
  254. if (!mqd)
  255. return -ENOMEM;
  256. if (q->properties.type == KFD_QUEUE_TYPE_COMPUTE) {
  257. deallocate_hqd(dqm, q);
  258. } else if (q->properties.type == KFD_QUEUE_TYPE_SDMA) {
  259. dqm->sdma_queue_count--;
  260. deallocate_sdma_queue(dqm, q->sdma_id);
  261. } else {
  262. pr_debug("q->properties.type %d is invalid\n",
  263. q->properties.type);
  264. return -EINVAL;
  265. }
  266. dqm->total_queue_count--;
  267. retval = mqd->destroy_mqd(mqd, q->mqd,
  268. KFD_PREEMPT_TYPE_WAVEFRONT_RESET,
  269. KFD_UNMAP_LATENCY_MS,
  270. q->pipe, q->queue);
  271. if (retval == -ETIME)
  272. qpd->reset_wavefronts = true;
  273. mqd->uninit_mqd(mqd, q->mqd, q->mqd_mem_obj);
  274. list_del(&q->list);
  275. if (list_empty(&qpd->queues_list)) {
  276. if (qpd->reset_wavefronts) {
  277. pr_warn("Resetting wave fronts (nocpsch) on dev %p\n",
  278. dqm->dev);
  279. /* dbgdev_wave_reset_wavefronts has to be called before
  280. * deallocate_vmid(), i.e. when vmid is still in use.
  281. */
  282. dbgdev_wave_reset_wavefronts(dqm->dev,
  283. qpd->pqm->process);
  284. qpd->reset_wavefronts = false;
  285. }
  286. deallocate_vmid(dqm, qpd, q);
  287. }
  288. qpd->queue_count--;
  289. if (q->properties.is_active)
  290. dqm->queue_count--;
  291. return retval;
  292. }
  293. static int destroy_queue_nocpsch(struct device_queue_manager *dqm,
  294. struct qcm_process_device *qpd,
  295. struct queue *q)
  296. {
  297. int retval;
  298. mutex_lock(&dqm->lock);
  299. retval = destroy_queue_nocpsch_locked(dqm, qpd, q);
  300. mutex_unlock(&dqm->lock);
  301. return retval;
  302. }
  303. static int update_queue(struct device_queue_manager *dqm, struct queue *q)
  304. {
  305. int retval;
  306. struct mqd_manager *mqd;
  307. bool prev_active = false;
  308. mutex_lock(&dqm->lock);
  309. mqd = dqm->ops.get_mqd_manager(dqm,
  310. get_mqd_type_from_queue_type(q->properties.type));
  311. if (!mqd) {
  312. retval = -ENOMEM;
  313. goto out_unlock;
  314. }
  315. /* Save previous activity state for counters */
  316. prev_active = q->properties.is_active;
  317. /* Make sure the queue is unmapped before updating the MQD */
  318. if (sched_policy != KFD_SCHED_POLICY_NO_HWS) {
  319. retval = unmap_queues_cpsch(dqm,
  320. KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
  321. if (retval) {
  322. pr_err("unmap queue failed\n");
  323. goto out_unlock;
  324. }
  325. } else if (prev_active &&
  326. (q->properties.type == KFD_QUEUE_TYPE_COMPUTE ||
  327. q->properties.type == KFD_QUEUE_TYPE_SDMA)) {
  328. retval = mqd->destroy_mqd(mqd, q->mqd,
  329. KFD_PREEMPT_TYPE_WAVEFRONT_DRAIN,
  330. KFD_UNMAP_LATENCY_MS, q->pipe, q->queue);
  331. if (retval) {
  332. pr_err("destroy mqd failed\n");
  333. goto out_unlock;
  334. }
  335. }
  336. retval = mqd->update_mqd(mqd, q->mqd, &q->properties);
  337. /*
  338. * check active state vs. the previous state and modify
  339. * counter accordingly. map_queues_cpsch uses the
  340. * dqm->queue_count to determine whether a new runlist must be
  341. * uploaded.
  342. */
  343. if (q->properties.is_active && !prev_active)
  344. dqm->queue_count++;
  345. else if (!q->properties.is_active && prev_active)
  346. dqm->queue_count--;
  347. if (sched_policy != KFD_SCHED_POLICY_NO_HWS)
  348. retval = map_queues_cpsch(dqm);
  349. else if (q->properties.is_active &&
  350. (q->properties.type == KFD_QUEUE_TYPE_COMPUTE ||
  351. q->properties.type == KFD_QUEUE_TYPE_SDMA))
  352. retval = mqd->load_mqd(mqd, q->mqd, q->pipe, q->queue,
  353. &q->properties, q->process->mm);
  354. out_unlock:
  355. mutex_unlock(&dqm->lock);
  356. return retval;
  357. }
  358. static struct mqd_manager *get_mqd_manager(
  359. struct device_queue_manager *dqm, enum KFD_MQD_TYPE type)
  360. {
  361. struct mqd_manager *mqd;
  362. if (WARN_ON(type >= KFD_MQD_TYPE_MAX))
  363. return NULL;
  364. pr_debug("mqd type %d\n", type);
  365. mqd = dqm->mqds[type];
  366. if (!mqd) {
  367. mqd = mqd_manager_init(type, dqm->dev);
  368. if (!mqd)
  369. pr_err("mqd manager is NULL");
  370. dqm->mqds[type] = mqd;
  371. }
  372. return mqd;
  373. }
  374. static int register_process(struct device_queue_manager *dqm,
  375. struct qcm_process_device *qpd)
  376. {
  377. struct device_process_node *n;
  378. int retval;
  379. n = kzalloc(sizeof(*n), GFP_KERNEL);
  380. if (!n)
  381. return -ENOMEM;
  382. n->qpd = qpd;
  383. mutex_lock(&dqm->lock);
  384. list_add(&n->list, &dqm->queues);
  385. retval = dqm->asic_ops.update_qpd(dqm, qpd);
  386. dqm->processes_count++;
  387. mutex_unlock(&dqm->lock);
  388. return retval;
  389. }
  390. static int unregister_process(struct device_queue_manager *dqm,
  391. struct qcm_process_device *qpd)
  392. {
  393. int retval;
  394. struct device_process_node *cur, *next;
  395. pr_debug("qpd->queues_list is %s\n",
  396. list_empty(&qpd->queues_list) ? "empty" : "not empty");
  397. retval = 0;
  398. mutex_lock(&dqm->lock);
  399. list_for_each_entry_safe(cur, next, &dqm->queues, list) {
  400. if (qpd == cur->qpd) {
  401. list_del(&cur->list);
  402. kfree(cur);
  403. dqm->processes_count--;
  404. goto out;
  405. }
  406. }
  407. /* qpd not found in dqm list */
  408. retval = 1;
  409. out:
  410. mutex_unlock(&dqm->lock);
  411. return retval;
  412. }
  413. static int
  414. set_pasid_vmid_mapping(struct device_queue_manager *dqm, unsigned int pasid,
  415. unsigned int vmid)
  416. {
  417. uint32_t pasid_mapping;
  418. pasid_mapping = (pasid == 0) ? 0 :
  419. (uint32_t)pasid |
  420. ATC_VMID_PASID_MAPPING_VALID;
  421. return dqm->dev->kfd2kgd->set_pasid_vmid_mapping(
  422. dqm->dev->kgd, pasid_mapping,
  423. vmid);
  424. }
  425. static void init_interrupts(struct device_queue_manager *dqm)
  426. {
  427. unsigned int i;
  428. for (i = 0 ; i < get_pipes_per_mec(dqm) ; i++)
  429. if (is_pipe_enabled(dqm, 0, i))
  430. dqm->dev->kfd2kgd->init_interrupts(dqm->dev->kgd, i);
  431. }
  432. static int initialize_nocpsch(struct device_queue_manager *dqm)
  433. {
  434. int pipe, queue;
  435. pr_debug("num of pipes: %d\n", get_pipes_per_mec(dqm));
  436. dqm->allocated_queues = kcalloc(get_pipes_per_mec(dqm),
  437. sizeof(unsigned int), GFP_KERNEL);
  438. if (!dqm->allocated_queues)
  439. return -ENOMEM;
  440. mutex_init(&dqm->lock);
  441. INIT_LIST_HEAD(&dqm->queues);
  442. dqm->queue_count = dqm->next_pipe_to_allocate = 0;
  443. dqm->sdma_queue_count = 0;
  444. for (pipe = 0; pipe < get_pipes_per_mec(dqm); pipe++) {
  445. int pipe_offset = pipe * get_queues_per_pipe(dqm);
  446. for (queue = 0; queue < get_queues_per_pipe(dqm); queue++)
  447. if (test_bit(pipe_offset + queue,
  448. dqm->dev->shared_resources.queue_bitmap))
  449. dqm->allocated_queues[pipe] |= 1 << queue;
  450. }
  451. dqm->vmid_bitmap = (1 << dqm->dev->vm_info.vmid_num_kfd) - 1;
  452. dqm->sdma_bitmap = (1 << CIK_SDMA_QUEUES) - 1;
  453. return 0;
  454. }
  455. static void uninitialize(struct device_queue_manager *dqm)
  456. {
  457. int i;
  458. WARN_ON(dqm->queue_count > 0 || dqm->processes_count > 0);
  459. kfree(dqm->allocated_queues);
  460. for (i = 0 ; i < KFD_MQD_TYPE_MAX ; i++)
  461. kfree(dqm->mqds[i]);
  462. mutex_destroy(&dqm->lock);
  463. kfd_gtt_sa_free(dqm->dev, dqm->pipeline_mem);
  464. }
  465. static int start_nocpsch(struct device_queue_manager *dqm)
  466. {
  467. init_interrupts(dqm);
  468. return 0;
  469. }
  470. static int stop_nocpsch(struct device_queue_manager *dqm)
  471. {
  472. return 0;
  473. }
  474. static int allocate_sdma_queue(struct device_queue_manager *dqm,
  475. unsigned int *sdma_queue_id)
  476. {
  477. int bit;
  478. if (dqm->sdma_bitmap == 0)
  479. return -ENOMEM;
  480. bit = find_first_bit((unsigned long *)&dqm->sdma_bitmap,
  481. CIK_SDMA_QUEUES);
  482. clear_bit(bit, (unsigned long *)&dqm->sdma_bitmap);
  483. *sdma_queue_id = bit;
  484. return 0;
  485. }
  486. static void deallocate_sdma_queue(struct device_queue_manager *dqm,
  487. unsigned int sdma_queue_id)
  488. {
  489. if (sdma_queue_id >= CIK_SDMA_QUEUES)
  490. return;
  491. set_bit(sdma_queue_id, (unsigned long *)&dqm->sdma_bitmap);
  492. }
  493. static int create_sdma_queue_nocpsch(struct device_queue_manager *dqm,
  494. struct queue *q,
  495. struct qcm_process_device *qpd)
  496. {
  497. struct mqd_manager *mqd;
  498. int retval;
  499. mqd = dqm->ops.get_mqd_manager(dqm, KFD_MQD_TYPE_SDMA);
  500. if (!mqd)
  501. return -ENOMEM;
  502. retval = allocate_sdma_queue(dqm, &q->sdma_id);
  503. if (retval)
  504. return retval;
  505. q->properties.sdma_queue_id = q->sdma_id / CIK_SDMA_QUEUES_PER_ENGINE;
  506. q->properties.sdma_engine_id = q->sdma_id % CIK_SDMA_QUEUES_PER_ENGINE;
  507. pr_debug("SDMA id is: %d\n", q->sdma_id);
  508. pr_debug("SDMA queue id: %d\n", q->properties.sdma_queue_id);
  509. pr_debug("SDMA engine id: %d\n", q->properties.sdma_engine_id);
  510. dqm->asic_ops.init_sdma_vm(dqm, q, qpd);
  511. retval = mqd->init_mqd(mqd, &q->mqd, &q->mqd_mem_obj,
  512. &q->gart_mqd_addr, &q->properties);
  513. if (retval)
  514. goto out_deallocate_sdma_queue;
  515. retval = mqd->load_mqd(mqd, q->mqd, 0, 0, &q->properties, NULL);
  516. if (retval)
  517. goto out_uninit_mqd;
  518. return 0;
  519. out_uninit_mqd:
  520. mqd->uninit_mqd(mqd, q->mqd, q->mqd_mem_obj);
  521. out_deallocate_sdma_queue:
  522. deallocate_sdma_queue(dqm, q->sdma_id);
  523. return retval;
  524. }
  525. /*
  526. * Device Queue Manager implementation for cp scheduler
  527. */
  528. static int set_sched_resources(struct device_queue_manager *dqm)
  529. {
  530. int i, mec;
  531. struct scheduling_resources res;
  532. res.vmid_mask = dqm->dev->shared_resources.compute_vmid_bitmap;
  533. res.queue_mask = 0;
  534. for (i = 0; i < KGD_MAX_QUEUES; ++i) {
  535. mec = (i / dqm->dev->shared_resources.num_queue_per_pipe)
  536. / dqm->dev->shared_resources.num_pipe_per_mec;
  537. if (!test_bit(i, dqm->dev->shared_resources.queue_bitmap))
  538. continue;
  539. /* only acquire queues from the first MEC */
  540. if (mec > 0)
  541. continue;
  542. /* This situation may be hit in the future if a new HW
  543. * generation exposes more than 64 queues. If so, the
  544. * definition of res.queue_mask needs updating
  545. */
  546. if (WARN_ON(i >= (sizeof(res.queue_mask)*8))) {
  547. pr_err("Invalid queue enabled by amdgpu: %d\n", i);
  548. break;
  549. }
  550. res.queue_mask |= (1ull << i);
  551. }
  552. res.gws_mask = res.oac_mask = res.gds_heap_base =
  553. res.gds_heap_size = 0;
  554. pr_debug("Scheduling resources:\n"
  555. "vmid mask: 0x%8X\n"
  556. "queue mask: 0x%8llX\n",
  557. res.vmid_mask, res.queue_mask);
  558. return pm_send_set_resources(&dqm->packets, &res);
  559. }
  560. static int initialize_cpsch(struct device_queue_manager *dqm)
  561. {
  562. pr_debug("num of pipes: %d\n", get_pipes_per_mec(dqm));
  563. mutex_init(&dqm->lock);
  564. INIT_LIST_HEAD(&dqm->queues);
  565. dqm->queue_count = dqm->processes_count = 0;
  566. dqm->sdma_queue_count = 0;
  567. dqm->active_runlist = false;
  568. dqm->sdma_bitmap = (1 << CIK_SDMA_QUEUES) - 1;
  569. return 0;
  570. }
  571. static int start_cpsch(struct device_queue_manager *dqm)
  572. {
  573. int retval;
  574. retval = 0;
  575. retval = pm_init(&dqm->packets, dqm);
  576. if (retval)
  577. goto fail_packet_manager_init;
  578. retval = set_sched_resources(dqm);
  579. if (retval)
  580. goto fail_set_sched_resources;
  581. pr_debug("Allocating fence memory\n");
  582. /* allocate fence memory on the gart */
  583. retval = kfd_gtt_sa_allocate(dqm->dev, sizeof(*dqm->fence_addr),
  584. &dqm->fence_mem);
  585. if (retval)
  586. goto fail_allocate_vidmem;
  587. dqm->fence_addr = dqm->fence_mem->cpu_ptr;
  588. dqm->fence_gpu_addr = dqm->fence_mem->gpu_addr;
  589. init_interrupts(dqm);
  590. mutex_lock(&dqm->lock);
  591. execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
  592. mutex_unlock(&dqm->lock);
  593. return 0;
  594. fail_allocate_vidmem:
  595. fail_set_sched_resources:
  596. pm_uninit(&dqm->packets);
  597. fail_packet_manager_init:
  598. return retval;
  599. }
  600. static int stop_cpsch(struct device_queue_manager *dqm)
  601. {
  602. mutex_lock(&dqm->lock);
  603. unmap_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0);
  604. mutex_unlock(&dqm->lock);
  605. kfd_gtt_sa_free(dqm->dev, dqm->fence_mem);
  606. pm_uninit(&dqm->packets);
  607. return 0;
  608. }
  609. static int create_kernel_queue_cpsch(struct device_queue_manager *dqm,
  610. struct kernel_queue *kq,
  611. struct qcm_process_device *qpd)
  612. {
  613. mutex_lock(&dqm->lock);
  614. if (dqm->total_queue_count >= max_num_of_queues_per_device) {
  615. pr_warn("Can't create new kernel queue because %d queues were already created\n",
  616. dqm->total_queue_count);
  617. mutex_unlock(&dqm->lock);
  618. return -EPERM;
  619. }
  620. /*
  621. * Unconditionally increment this counter, regardless of the queue's
  622. * type or whether the queue is active.
  623. */
  624. dqm->total_queue_count++;
  625. pr_debug("Total of %d queues are accountable so far\n",
  626. dqm->total_queue_count);
  627. list_add(&kq->list, &qpd->priv_queue_list);
  628. dqm->queue_count++;
  629. qpd->is_debug = true;
  630. execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
  631. mutex_unlock(&dqm->lock);
  632. return 0;
  633. }
  634. static void destroy_kernel_queue_cpsch(struct device_queue_manager *dqm,
  635. struct kernel_queue *kq,
  636. struct qcm_process_device *qpd)
  637. {
  638. mutex_lock(&dqm->lock);
  639. list_del(&kq->list);
  640. dqm->queue_count--;
  641. qpd->is_debug = false;
  642. execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0);
  643. /*
  644. * Unconditionally decrement this counter, regardless of the queue's
  645. * type.
  646. */
  647. dqm->total_queue_count--;
  648. pr_debug("Total of %d queues are accountable so far\n",
  649. dqm->total_queue_count);
  650. mutex_unlock(&dqm->lock);
  651. }
  652. static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q,
  653. struct qcm_process_device *qpd, int *allocate_vmid)
  654. {
  655. int retval;
  656. struct mqd_manager *mqd;
  657. retval = 0;
  658. if (allocate_vmid)
  659. *allocate_vmid = 0;
  660. mutex_lock(&dqm->lock);
  661. if (dqm->total_queue_count >= max_num_of_queues_per_device) {
  662. pr_warn("Can't create new usermode queue because %d queues were already created\n",
  663. dqm->total_queue_count);
  664. retval = -EPERM;
  665. goto out;
  666. }
  667. if (q->properties.type == KFD_QUEUE_TYPE_SDMA) {
  668. retval = allocate_sdma_queue(dqm, &q->sdma_id);
  669. if (retval)
  670. goto out;
  671. q->properties.sdma_queue_id =
  672. q->sdma_id / CIK_SDMA_QUEUES_PER_ENGINE;
  673. q->properties.sdma_engine_id =
  674. q->sdma_id % CIK_SDMA_QUEUES_PER_ENGINE;
  675. }
  676. mqd = dqm->ops.get_mqd_manager(dqm,
  677. get_mqd_type_from_queue_type(q->properties.type));
  678. if (!mqd) {
  679. retval = -ENOMEM;
  680. goto out;
  681. }
  682. dqm->asic_ops.init_sdma_vm(dqm, q, qpd);
  683. retval = mqd->init_mqd(mqd, &q->mqd, &q->mqd_mem_obj,
  684. &q->gart_mqd_addr, &q->properties);
  685. if (retval)
  686. goto out;
  687. list_add(&q->list, &qpd->queues_list);
  688. qpd->queue_count++;
  689. if (q->properties.is_active) {
  690. dqm->queue_count++;
  691. retval = execute_queues_cpsch(dqm,
  692. KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
  693. }
  694. if (q->properties.type == KFD_QUEUE_TYPE_SDMA)
  695. dqm->sdma_queue_count++;
  696. /*
  697. * Unconditionally increment this counter, regardless of the queue's
  698. * type or whether the queue is active.
  699. */
  700. dqm->total_queue_count++;
  701. pr_debug("Total of %d queues are accountable so far\n",
  702. dqm->total_queue_count);
  703. out:
  704. mutex_unlock(&dqm->lock);
  705. return retval;
  706. }
  707. int amdkfd_fence_wait_timeout(unsigned int *fence_addr,
  708. unsigned int fence_value,
  709. unsigned int timeout_ms)
  710. {
  711. unsigned long end_jiffies = msecs_to_jiffies(timeout_ms) + jiffies;
  712. while (*fence_addr != fence_value) {
  713. if (time_after(jiffies, end_jiffies)) {
  714. pr_err("qcm fence wait loop timeout expired\n");
  715. return -ETIME;
  716. }
  717. schedule();
  718. }
  719. return 0;
  720. }
  721. static int unmap_sdma_queues(struct device_queue_manager *dqm,
  722. unsigned int sdma_engine)
  723. {
  724. return pm_send_unmap_queue(&dqm->packets, KFD_QUEUE_TYPE_SDMA,
  725. KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0, false,
  726. sdma_engine);
  727. }
  728. /* dqm->lock mutex has to be locked before calling this function */
  729. static int map_queues_cpsch(struct device_queue_manager *dqm)
  730. {
  731. int retval;
  732. if (dqm->queue_count <= 0 || dqm->processes_count <= 0)
  733. return 0;
  734. if (dqm->active_runlist)
  735. return 0;
  736. retval = pm_send_runlist(&dqm->packets, &dqm->queues);
  737. if (retval) {
  738. pr_err("failed to execute runlist\n");
  739. return retval;
  740. }
  741. dqm->active_runlist = true;
  742. return retval;
  743. }
  744. /* dqm->lock mutex has to be locked before calling this function */
  745. static int unmap_queues_cpsch(struct device_queue_manager *dqm,
  746. enum kfd_unmap_queues_filter filter,
  747. uint32_t filter_param)
  748. {
  749. int retval = 0;
  750. if (!dqm->active_runlist)
  751. return retval;
  752. pr_debug("Before destroying queues, sdma queue count is : %u\n",
  753. dqm->sdma_queue_count);
  754. if (dqm->sdma_queue_count > 0) {
  755. unmap_sdma_queues(dqm, 0);
  756. unmap_sdma_queues(dqm, 1);
  757. }
  758. retval = pm_send_unmap_queue(&dqm->packets, KFD_QUEUE_TYPE_COMPUTE,
  759. filter, filter_param, false, 0);
  760. if (retval)
  761. return retval;
  762. *dqm->fence_addr = KFD_FENCE_INIT;
  763. pm_send_query_status(&dqm->packets, dqm->fence_gpu_addr,
  764. KFD_FENCE_COMPLETED);
  765. /* should be timed out */
  766. retval = amdkfd_fence_wait_timeout(dqm->fence_addr, KFD_FENCE_COMPLETED,
  767. QUEUE_PREEMPT_DEFAULT_TIMEOUT_MS);
  768. if (retval)
  769. return retval;
  770. pm_release_ib(&dqm->packets);
  771. dqm->active_runlist = false;
  772. return retval;
  773. }
  774. /* dqm->lock mutex has to be locked before calling this function */
  775. static int execute_queues_cpsch(struct device_queue_manager *dqm,
  776. enum kfd_unmap_queues_filter filter,
  777. uint32_t filter_param)
  778. {
  779. int retval;
  780. retval = unmap_queues_cpsch(dqm, filter, filter_param);
  781. if (retval) {
  782. pr_err("The cp might be in an unrecoverable state due to an unsuccessful queues preemption\n");
  783. return retval;
  784. }
  785. return map_queues_cpsch(dqm);
  786. }
  787. static int destroy_queue_cpsch(struct device_queue_manager *dqm,
  788. struct qcm_process_device *qpd,
  789. struct queue *q)
  790. {
  791. int retval;
  792. struct mqd_manager *mqd;
  793. bool preempt_all_queues;
  794. preempt_all_queues = false;
  795. retval = 0;
  796. /* remove queue from list to prevent rescheduling after preemption */
  797. mutex_lock(&dqm->lock);
  798. if (qpd->is_debug) {
  799. /*
  800. * error, currently we do not allow to destroy a queue
  801. * of a currently debugged process
  802. */
  803. retval = -EBUSY;
  804. goto failed_try_destroy_debugged_queue;
  805. }
  806. mqd = dqm->ops.get_mqd_manager(dqm,
  807. get_mqd_type_from_queue_type(q->properties.type));
  808. if (!mqd) {
  809. retval = -ENOMEM;
  810. goto failed;
  811. }
  812. if (q->properties.type == KFD_QUEUE_TYPE_SDMA) {
  813. dqm->sdma_queue_count--;
  814. deallocate_sdma_queue(dqm, q->sdma_id);
  815. }
  816. list_del(&q->list);
  817. qpd->queue_count--;
  818. if (q->properties.is_active)
  819. dqm->queue_count--;
  820. retval = execute_queues_cpsch(dqm,
  821. KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
  822. if (retval == -ETIME)
  823. qpd->reset_wavefronts = true;
  824. mqd->uninit_mqd(mqd, q->mqd, q->mqd_mem_obj);
  825. /*
  826. * Unconditionally decrement this counter, regardless of the queue's
  827. * type
  828. */
  829. dqm->total_queue_count--;
  830. pr_debug("Total of %d queues are accountable so far\n",
  831. dqm->total_queue_count);
  832. mutex_unlock(&dqm->lock);
  833. return 0;
  834. failed:
  835. failed_try_destroy_debugged_queue:
  836. mutex_unlock(&dqm->lock);
  837. return retval;
  838. }
  839. /*
  840. * Low bits must be 0000/FFFF as required by HW, high bits must be 0 to
  841. * stay in user mode.
  842. */
  843. #define APE1_FIXED_BITS_MASK 0xFFFF80000000FFFFULL
  844. /* APE1 limit is inclusive and 64K aligned. */
  845. #define APE1_LIMIT_ALIGNMENT 0xFFFF
  846. static bool set_cache_memory_policy(struct device_queue_manager *dqm,
  847. struct qcm_process_device *qpd,
  848. enum cache_policy default_policy,
  849. enum cache_policy alternate_policy,
  850. void __user *alternate_aperture_base,
  851. uint64_t alternate_aperture_size)
  852. {
  853. bool retval;
  854. mutex_lock(&dqm->lock);
  855. if (alternate_aperture_size == 0) {
  856. /* base > limit disables APE1 */
  857. qpd->sh_mem_ape1_base = 1;
  858. qpd->sh_mem_ape1_limit = 0;
  859. } else {
  860. /*
  861. * In FSA64, APE1_Base[63:0] = { 16{SH_MEM_APE1_BASE[31]},
  862. * SH_MEM_APE1_BASE[31:0], 0x0000 }
  863. * APE1_Limit[63:0] = { 16{SH_MEM_APE1_LIMIT[31]},
  864. * SH_MEM_APE1_LIMIT[31:0], 0xFFFF }
  865. * Verify that the base and size parameters can be
  866. * represented in this format and convert them.
  867. * Additionally restrict APE1 to user-mode addresses.
  868. */
  869. uint64_t base = (uintptr_t)alternate_aperture_base;
  870. uint64_t limit = base + alternate_aperture_size - 1;
  871. if (limit <= base || (base & APE1_FIXED_BITS_MASK) != 0 ||
  872. (limit & APE1_FIXED_BITS_MASK) != APE1_LIMIT_ALIGNMENT) {
  873. retval = false;
  874. goto out;
  875. }
  876. qpd->sh_mem_ape1_base = base >> 16;
  877. qpd->sh_mem_ape1_limit = limit >> 16;
  878. }
  879. retval = dqm->asic_ops.set_cache_memory_policy(
  880. dqm,
  881. qpd,
  882. default_policy,
  883. alternate_policy,
  884. alternate_aperture_base,
  885. alternate_aperture_size);
  886. if ((sched_policy == KFD_SCHED_POLICY_NO_HWS) && (qpd->vmid != 0))
  887. program_sh_mem_settings(dqm, qpd);
  888. pr_debug("sh_mem_config: 0x%x, ape1_base: 0x%x, ape1_limit: 0x%x\n",
  889. qpd->sh_mem_config, qpd->sh_mem_ape1_base,
  890. qpd->sh_mem_ape1_limit);
  891. out:
  892. mutex_unlock(&dqm->lock);
  893. return retval;
  894. }
  895. static int process_termination_nocpsch(struct device_queue_manager *dqm,
  896. struct qcm_process_device *qpd)
  897. {
  898. struct queue *q, *next;
  899. struct device_process_node *cur, *next_dpn;
  900. int retval = 0;
  901. mutex_lock(&dqm->lock);
  902. /* Clear all user mode queues */
  903. list_for_each_entry_safe(q, next, &qpd->queues_list, list) {
  904. int ret;
  905. ret = destroy_queue_nocpsch_locked(dqm, qpd, q);
  906. if (ret)
  907. retval = ret;
  908. }
  909. /* Unregister process */
  910. list_for_each_entry_safe(cur, next_dpn, &dqm->queues, list) {
  911. if (qpd == cur->qpd) {
  912. list_del(&cur->list);
  913. kfree(cur);
  914. dqm->processes_count--;
  915. break;
  916. }
  917. }
  918. mutex_unlock(&dqm->lock);
  919. return retval;
  920. }
  921. static int process_termination_cpsch(struct device_queue_manager *dqm,
  922. struct qcm_process_device *qpd)
  923. {
  924. int retval;
  925. struct queue *q, *next;
  926. struct kernel_queue *kq, *kq_next;
  927. struct mqd_manager *mqd;
  928. struct device_process_node *cur, *next_dpn;
  929. enum kfd_unmap_queues_filter filter =
  930. KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES;
  931. retval = 0;
  932. mutex_lock(&dqm->lock);
  933. /* Clean all kernel queues */
  934. list_for_each_entry_safe(kq, kq_next, &qpd->priv_queue_list, list) {
  935. list_del(&kq->list);
  936. dqm->queue_count--;
  937. qpd->is_debug = false;
  938. dqm->total_queue_count--;
  939. filter = KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES;
  940. }
  941. /* Clear all user mode queues */
  942. list_for_each_entry(q, &qpd->queues_list, list) {
  943. if (q->properties.type == KFD_QUEUE_TYPE_SDMA)
  944. dqm->sdma_queue_count--;
  945. if (q->properties.is_active)
  946. dqm->queue_count--;
  947. dqm->total_queue_count--;
  948. }
  949. /* Unregister process */
  950. list_for_each_entry_safe(cur, next_dpn, &dqm->queues, list) {
  951. if (qpd == cur->qpd) {
  952. list_del(&cur->list);
  953. kfree(cur);
  954. dqm->processes_count--;
  955. break;
  956. }
  957. }
  958. retval = execute_queues_cpsch(dqm, filter, 0);
  959. if (retval || qpd->reset_wavefronts) {
  960. pr_warn("Resetting wave fronts (cpsch) on dev %p\n", dqm->dev);
  961. dbgdev_wave_reset_wavefronts(dqm->dev, qpd->pqm->process);
  962. qpd->reset_wavefronts = false;
  963. }
  964. /* lastly, free mqd resources */
  965. list_for_each_entry_safe(q, next, &qpd->queues_list, list) {
  966. mqd = dqm->ops.get_mqd_manager(dqm,
  967. get_mqd_type_from_queue_type(q->properties.type));
  968. if (!mqd) {
  969. retval = -ENOMEM;
  970. goto out;
  971. }
  972. list_del(&q->list);
  973. qpd->queue_count--;
  974. mqd->uninit_mqd(mqd, q->mqd, q->mqd_mem_obj);
  975. }
  976. out:
  977. mutex_unlock(&dqm->lock);
  978. return retval;
  979. }
  980. struct device_queue_manager *device_queue_manager_init(struct kfd_dev *dev)
  981. {
  982. struct device_queue_manager *dqm;
  983. pr_debug("Loading device queue manager\n");
  984. dqm = kzalloc(sizeof(*dqm), GFP_KERNEL);
  985. if (!dqm)
  986. return NULL;
  987. dqm->dev = dev;
  988. switch (sched_policy) {
  989. case KFD_SCHED_POLICY_HWS:
  990. case KFD_SCHED_POLICY_HWS_NO_OVERSUBSCRIPTION:
  991. /* initialize dqm for cp scheduling */
  992. dqm->ops.create_queue = create_queue_cpsch;
  993. dqm->ops.initialize = initialize_cpsch;
  994. dqm->ops.start = start_cpsch;
  995. dqm->ops.stop = stop_cpsch;
  996. dqm->ops.destroy_queue = destroy_queue_cpsch;
  997. dqm->ops.update_queue = update_queue;
  998. dqm->ops.get_mqd_manager = get_mqd_manager;
  999. dqm->ops.register_process = register_process;
  1000. dqm->ops.unregister_process = unregister_process;
  1001. dqm->ops.uninitialize = uninitialize;
  1002. dqm->ops.create_kernel_queue = create_kernel_queue_cpsch;
  1003. dqm->ops.destroy_kernel_queue = destroy_kernel_queue_cpsch;
  1004. dqm->ops.set_cache_memory_policy = set_cache_memory_policy;
  1005. dqm->ops.process_termination = process_termination_cpsch;
  1006. break;
  1007. case KFD_SCHED_POLICY_NO_HWS:
  1008. /* initialize dqm for no cp scheduling */
  1009. dqm->ops.start = start_nocpsch;
  1010. dqm->ops.stop = stop_nocpsch;
  1011. dqm->ops.create_queue = create_queue_nocpsch;
  1012. dqm->ops.destroy_queue = destroy_queue_nocpsch;
  1013. dqm->ops.update_queue = update_queue;
  1014. dqm->ops.get_mqd_manager = get_mqd_manager;
  1015. dqm->ops.register_process = register_process;
  1016. dqm->ops.unregister_process = unregister_process;
  1017. dqm->ops.initialize = initialize_nocpsch;
  1018. dqm->ops.uninitialize = uninitialize;
  1019. dqm->ops.set_cache_memory_policy = set_cache_memory_policy;
  1020. dqm->ops.process_termination = process_termination_nocpsch;
  1021. break;
  1022. default:
  1023. pr_err("Invalid scheduling policy %d\n", sched_policy);
  1024. goto out_free;
  1025. }
  1026. switch (dev->device_info->asic_family) {
  1027. case CHIP_CARRIZO:
  1028. device_queue_manager_init_vi(&dqm->asic_ops);
  1029. break;
  1030. case CHIP_KAVERI:
  1031. device_queue_manager_init_cik(&dqm->asic_ops);
  1032. break;
  1033. default:
  1034. WARN(1, "Unexpected ASIC family %u",
  1035. dev->device_info->asic_family);
  1036. goto out_free;
  1037. }
  1038. if (!dqm->ops.initialize(dqm))
  1039. return dqm;
  1040. out_free:
  1041. kfree(dqm);
  1042. return NULL;
  1043. }
  1044. void device_queue_manager_uninit(struct device_queue_manager *dqm)
  1045. {
  1046. dqm->ops.uninitialize(dqm);
  1047. kfree(dqm);
  1048. }