kfd_packet_manager.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617
  1. /*
  2. * Copyright 2014 Advanced Micro Devices, Inc.
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice shall be included in
  12. * all copies or substantial portions of the Software.
  13. *
  14. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  17. * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18. * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19. * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20. * OTHER DEALINGS IN THE SOFTWARE.
  21. *
  22. */
  23. #include <linux/slab.h>
  24. #include <linux/mutex.h>
  25. #include "kfd_device_queue_manager.h"
  26. #include "kfd_kernel_queue.h"
  27. #include "kfd_priv.h"
  28. #include "kfd_pm4_headers_vi.h"
  29. #include "kfd_pm4_opcodes.h"
  30. static inline void inc_wptr(unsigned int *wptr, unsigned int increment_bytes,
  31. unsigned int buffer_size_bytes)
  32. {
  33. unsigned int temp = *wptr + increment_bytes / sizeof(uint32_t);
  34. WARN((temp * sizeof(uint32_t)) > buffer_size_bytes,
  35. "Runlist IB overflow");
  36. *wptr = temp;
  37. }
  38. static unsigned int build_pm4_header(unsigned int opcode, size_t packet_size)
  39. {
  40. union PM4_MES_TYPE_3_HEADER header;
  41. header.u32All = 0;
  42. header.opcode = opcode;
  43. header.count = packet_size / 4 - 2;
  44. header.type = PM4_TYPE_3;
  45. return header.u32All;
  46. }
  47. static void pm_calc_rlib_size(struct packet_manager *pm,
  48. unsigned int *rlib_size,
  49. bool *over_subscription)
  50. {
  51. unsigned int process_count, queue_count, compute_queue_count;
  52. unsigned int map_queue_size;
  53. unsigned int max_proc_per_quantum = 1;
  54. struct kfd_dev *dev = pm->dqm->dev;
  55. process_count = pm->dqm->processes_count;
  56. queue_count = pm->dqm->queue_count;
  57. compute_queue_count = queue_count - pm->dqm->sdma_queue_count;
  58. /* check if there is over subscription
  59. * Note: the arbitration between the number of VMIDs and
  60. * hws_max_conc_proc has been done in
  61. * kgd2kfd_device_init().
  62. */
  63. *over_subscription = false;
  64. if (dev->max_proc_per_quantum > 1)
  65. max_proc_per_quantum = dev->max_proc_per_quantum;
  66. if ((process_count > max_proc_per_quantum) ||
  67. compute_queue_count > get_queues_num(pm->dqm)) {
  68. *over_subscription = true;
  69. pr_debug("Over subscribed runlist\n");
  70. }
  71. map_queue_size = sizeof(struct pm4_mes_map_queues);
  72. /* calculate run list ib allocation size */
  73. *rlib_size = process_count * sizeof(struct pm4_mes_map_process) +
  74. queue_count * map_queue_size;
  75. /*
  76. * Increase the allocation size in case we need a chained run list
  77. * when over subscription
  78. */
  79. if (*over_subscription)
  80. *rlib_size += sizeof(struct pm4_mes_runlist);
  81. pr_debug("runlist ib size %d\n", *rlib_size);
  82. }
  83. static int pm_allocate_runlist_ib(struct packet_manager *pm,
  84. unsigned int **rl_buffer,
  85. uint64_t *rl_gpu_buffer,
  86. unsigned int *rl_buffer_size,
  87. bool *is_over_subscription)
  88. {
  89. int retval;
  90. if (WARN_ON(pm->allocated))
  91. return -EINVAL;
  92. pm_calc_rlib_size(pm, rl_buffer_size, is_over_subscription);
  93. retval = kfd_gtt_sa_allocate(pm->dqm->dev, *rl_buffer_size,
  94. &pm->ib_buffer_obj);
  95. if (retval) {
  96. pr_err("Failed to allocate runlist IB\n");
  97. return retval;
  98. }
  99. *(void **)rl_buffer = pm->ib_buffer_obj->cpu_ptr;
  100. *rl_gpu_buffer = pm->ib_buffer_obj->gpu_addr;
  101. memset(*rl_buffer, 0, *rl_buffer_size);
  102. pm->allocated = true;
  103. return retval;
  104. }
  105. static int pm_create_runlist(struct packet_manager *pm, uint32_t *buffer,
  106. uint64_t ib, size_t ib_size_in_dwords, bool chain)
  107. {
  108. struct pm4_mes_runlist *packet;
  109. int concurrent_proc_cnt = 0;
  110. struct kfd_dev *kfd = pm->dqm->dev;
  111. if (WARN_ON(!ib))
  112. return -EFAULT;
  113. /* Determine the number of processes to map together to HW:
  114. * it can not exceed the number of VMIDs available to the
  115. * scheduler, and it is determined by the smaller of the number
  116. * of processes in the runlist and kfd module parameter
  117. * hws_max_conc_proc.
  118. * Note: the arbitration between the number of VMIDs and
  119. * hws_max_conc_proc has been done in
  120. * kgd2kfd_device_init().
  121. */
  122. concurrent_proc_cnt = min(pm->dqm->processes_count,
  123. kfd->max_proc_per_quantum);
  124. packet = (struct pm4_mes_runlist *)buffer;
  125. memset(buffer, 0, sizeof(struct pm4_mes_runlist));
  126. packet->header.u32All = build_pm4_header(IT_RUN_LIST,
  127. sizeof(struct pm4_mes_runlist));
  128. packet->bitfields4.ib_size = ib_size_in_dwords;
  129. packet->bitfields4.chain = chain ? 1 : 0;
  130. packet->bitfields4.offload_polling = 0;
  131. packet->bitfields4.valid = 1;
  132. packet->bitfields4.process_cnt = concurrent_proc_cnt;
  133. packet->ordinal2 = lower_32_bits(ib);
  134. packet->bitfields3.ib_base_hi = upper_32_bits(ib);
  135. return 0;
  136. }
  137. static int pm_create_map_process(struct packet_manager *pm, uint32_t *buffer,
  138. struct qcm_process_device *qpd)
  139. {
  140. struct pm4_mes_map_process *packet;
  141. packet = (struct pm4_mes_map_process *)buffer;
  142. memset(buffer, 0, sizeof(struct pm4_mes_map_process));
  143. packet->header.u32All = build_pm4_header(IT_MAP_PROCESS,
  144. sizeof(struct pm4_mes_map_process));
  145. packet->bitfields2.diq_enable = (qpd->is_debug) ? 1 : 0;
  146. packet->bitfields2.process_quantum = 1;
  147. packet->bitfields2.pasid = qpd->pqm->process->pasid;
  148. packet->bitfields3.page_table_base = qpd->page_table_base;
  149. packet->bitfields10.gds_size = qpd->gds_size;
  150. packet->bitfields10.num_gws = qpd->num_gws;
  151. packet->bitfields10.num_oac = qpd->num_oac;
  152. packet->bitfields10.num_queues = (qpd->is_debug) ? 0 : qpd->queue_count;
  153. packet->sh_mem_config = qpd->sh_mem_config;
  154. packet->sh_mem_bases = qpd->sh_mem_bases;
  155. packet->sh_mem_ape1_base = qpd->sh_mem_ape1_base;
  156. packet->sh_mem_ape1_limit = qpd->sh_mem_ape1_limit;
  157. /* TODO: scratch support */
  158. packet->sh_hidden_private_base_vmid = 0;
  159. packet->gds_addr_lo = lower_32_bits(qpd->gds_context_area);
  160. packet->gds_addr_hi = upper_32_bits(qpd->gds_context_area);
  161. return 0;
  162. }
  163. static int pm_create_map_queue(struct packet_manager *pm, uint32_t *buffer,
  164. struct queue *q, bool is_static)
  165. {
  166. struct pm4_mes_map_queues *packet;
  167. bool use_static = is_static;
  168. packet = (struct pm4_mes_map_queues *)buffer;
  169. memset(buffer, 0, sizeof(struct pm4_mes_map_queues));
  170. packet->header.u32All = build_pm4_header(IT_MAP_QUEUES,
  171. sizeof(struct pm4_mes_map_queues));
  172. packet->bitfields2.alloc_format =
  173. alloc_format__mes_map_queues__one_per_pipe_vi;
  174. packet->bitfields2.num_queues = 1;
  175. packet->bitfields2.queue_sel =
  176. queue_sel__mes_map_queues__map_to_hws_determined_queue_slots_vi;
  177. packet->bitfields2.engine_sel =
  178. engine_sel__mes_map_queues__compute_vi;
  179. packet->bitfields2.queue_type =
  180. queue_type__mes_map_queues__normal_compute_vi;
  181. switch (q->properties.type) {
  182. case KFD_QUEUE_TYPE_COMPUTE:
  183. if (use_static)
  184. packet->bitfields2.queue_type =
  185. queue_type__mes_map_queues__normal_latency_static_queue_vi;
  186. break;
  187. case KFD_QUEUE_TYPE_DIQ:
  188. packet->bitfields2.queue_type =
  189. queue_type__mes_map_queues__debug_interface_queue_vi;
  190. break;
  191. case KFD_QUEUE_TYPE_SDMA:
  192. packet->bitfields2.engine_sel = q->properties.sdma_engine_id +
  193. engine_sel__mes_map_queues__sdma0_vi;
  194. use_static = false; /* no static queues under SDMA */
  195. break;
  196. default:
  197. WARN(1, "queue type %d", q->properties.type);
  198. return -EINVAL;
  199. }
  200. packet->bitfields3.doorbell_offset =
  201. q->properties.doorbell_off;
  202. packet->mqd_addr_lo =
  203. lower_32_bits(q->gart_mqd_addr);
  204. packet->mqd_addr_hi =
  205. upper_32_bits(q->gart_mqd_addr);
  206. packet->wptr_addr_lo =
  207. lower_32_bits((uint64_t)q->properties.write_ptr);
  208. packet->wptr_addr_hi =
  209. upper_32_bits((uint64_t)q->properties.write_ptr);
  210. return 0;
  211. }
  212. static int pm_create_runlist_ib(struct packet_manager *pm,
  213. struct list_head *queues,
  214. uint64_t *rl_gpu_addr,
  215. size_t *rl_size_bytes)
  216. {
  217. unsigned int alloc_size_bytes;
  218. unsigned int *rl_buffer, rl_wptr, i;
  219. int retval, proccesses_mapped;
  220. struct device_process_node *cur;
  221. struct qcm_process_device *qpd;
  222. struct queue *q;
  223. struct kernel_queue *kq;
  224. bool is_over_subscription;
  225. rl_wptr = retval = proccesses_mapped = 0;
  226. retval = pm_allocate_runlist_ib(pm, &rl_buffer, rl_gpu_addr,
  227. &alloc_size_bytes, &is_over_subscription);
  228. if (retval)
  229. return retval;
  230. *rl_size_bytes = alloc_size_bytes;
  231. pm->ib_size_bytes = alloc_size_bytes;
  232. pr_debug("Building runlist ib process count: %d queues count %d\n",
  233. pm->dqm->processes_count, pm->dqm->queue_count);
  234. /* build the run list ib packet */
  235. list_for_each_entry(cur, queues, list) {
  236. qpd = cur->qpd;
  237. /* build map process packet */
  238. if (proccesses_mapped >= pm->dqm->processes_count) {
  239. pr_debug("Not enough space left in runlist IB\n");
  240. pm_release_ib(pm);
  241. return -ENOMEM;
  242. }
  243. retval = pm_create_map_process(pm, &rl_buffer[rl_wptr], qpd);
  244. if (retval)
  245. return retval;
  246. proccesses_mapped++;
  247. inc_wptr(&rl_wptr, sizeof(struct pm4_mes_map_process),
  248. alloc_size_bytes);
  249. list_for_each_entry(kq, &qpd->priv_queue_list, list) {
  250. if (!kq->queue->properties.is_active)
  251. continue;
  252. pr_debug("static_queue, mapping kernel q %d, is debug status %d\n",
  253. kq->queue->queue, qpd->is_debug);
  254. retval = pm_create_map_queue(pm,
  255. &rl_buffer[rl_wptr],
  256. kq->queue,
  257. qpd->is_debug);
  258. if (retval)
  259. return retval;
  260. inc_wptr(&rl_wptr,
  261. sizeof(struct pm4_mes_map_queues),
  262. alloc_size_bytes);
  263. }
  264. list_for_each_entry(q, &qpd->queues_list, list) {
  265. if (!q->properties.is_active)
  266. continue;
  267. pr_debug("static_queue, mapping user queue %d, is debug status %d\n",
  268. q->queue, qpd->is_debug);
  269. retval = pm_create_map_queue(pm,
  270. &rl_buffer[rl_wptr],
  271. q,
  272. qpd->is_debug);
  273. if (retval)
  274. return retval;
  275. inc_wptr(&rl_wptr,
  276. sizeof(struct pm4_mes_map_queues),
  277. alloc_size_bytes);
  278. }
  279. }
  280. pr_debug("Finished map process and queues to runlist\n");
  281. if (is_over_subscription)
  282. retval = pm_create_runlist(pm, &rl_buffer[rl_wptr],
  283. *rl_gpu_addr,
  284. alloc_size_bytes / sizeof(uint32_t),
  285. true);
  286. for (i = 0; i < alloc_size_bytes / sizeof(uint32_t); i++)
  287. pr_debug("0x%2X ", rl_buffer[i]);
  288. pr_debug("\n");
  289. return retval;
  290. }
  291. int pm_init(struct packet_manager *pm, struct device_queue_manager *dqm)
  292. {
  293. pm->dqm = dqm;
  294. mutex_init(&pm->lock);
  295. pm->priv_queue = kernel_queue_init(dqm->dev, KFD_QUEUE_TYPE_HIQ);
  296. if (!pm->priv_queue) {
  297. mutex_destroy(&pm->lock);
  298. return -ENOMEM;
  299. }
  300. pm->allocated = false;
  301. return 0;
  302. }
  303. void pm_uninit(struct packet_manager *pm)
  304. {
  305. mutex_destroy(&pm->lock);
  306. kernel_queue_uninit(pm->priv_queue);
  307. }
  308. int pm_send_set_resources(struct packet_manager *pm,
  309. struct scheduling_resources *res)
  310. {
  311. struct pm4_mes_set_resources *packet;
  312. int retval = 0;
  313. mutex_lock(&pm->lock);
  314. pm->priv_queue->ops.acquire_packet_buffer(pm->priv_queue,
  315. sizeof(*packet) / sizeof(uint32_t),
  316. (unsigned int **)&packet);
  317. if (!packet) {
  318. pr_err("Failed to allocate buffer on kernel queue\n");
  319. retval = -ENOMEM;
  320. goto out;
  321. }
  322. memset(packet, 0, sizeof(struct pm4_mes_set_resources));
  323. packet->header.u32All = build_pm4_header(IT_SET_RESOURCES,
  324. sizeof(struct pm4_mes_set_resources));
  325. packet->bitfields2.queue_type =
  326. queue_type__mes_set_resources__hsa_interface_queue_hiq;
  327. packet->bitfields2.vmid_mask = res->vmid_mask;
  328. packet->bitfields2.unmap_latency = KFD_UNMAP_LATENCY_MS / 100;
  329. packet->bitfields7.oac_mask = res->oac_mask;
  330. packet->bitfields8.gds_heap_base = res->gds_heap_base;
  331. packet->bitfields8.gds_heap_size = res->gds_heap_size;
  332. packet->gws_mask_lo = lower_32_bits(res->gws_mask);
  333. packet->gws_mask_hi = upper_32_bits(res->gws_mask);
  334. packet->queue_mask_lo = lower_32_bits(res->queue_mask);
  335. packet->queue_mask_hi = upper_32_bits(res->queue_mask);
  336. pm->priv_queue->ops.submit_packet(pm->priv_queue);
  337. out:
  338. mutex_unlock(&pm->lock);
  339. return retval;
  340. }
  341. int pm_send_runlist(struct packet_manager *pm, struct list_head *dqm_queues)
  342. {
  343. uint64_t rl_gpu_ib_addr;
  344. uint32_t *rl_buffer;
  345. size_t rl_ib_size, packet_size_dwords;
  346. int retval;
  347. retval = pm_create_runlist_ib(pm, dqm_queues, &rl_gpu_ib_addr,
  348. &rl_ib_size);
  349. if (retval)
  350. goto fail_create_runlist_ib;
  351. pr_debug("runlist IB address: 0x%llX\n", rl_gpu_ib_addr);
  352. packet_size_dwords = sizeof(struct pm4_mes_runlist) / sizeof(uint32_t);
  353. mutex_lock(&pm->lock);
  354. retval = pm->priv_queue->ops.acquire_packet_buffer(pm->priv_queue,
  355. packet_size_dwords, &rl_buffer);
  356. if (retval)
  357. goto fail_acquire_packet_buffer;
  358. retval = pm_create_runlist(pm, rl_buffer, rl_gpu_ib_addr,
  359. rl_ib_size / sizeof(uint32_t), false);
  360. if (retval)
  361. goto fail_create_runlist;
  362. pm->priv_queue->ops.submit_packet(pm->priv_queue);
  363. mutex_unlock(&pm->lock);
  364. return retval;
  365. fail_create_runlist:
  366. pm->priv_queue->ops.rollback_packet(pm->priv_queue);
  367. fail_acquire_packet_buffer:
  368. mutex_unlock(&pm->lock);
  369. fail_create_runlist_ib:
  370. pm_release_ib(pm);
  371. return retval;
  372. }
  373. int pm_send_query_status(struct packet_manager *pm, uint64_t fence_address,
  374. uint32_t fence_value)
  375. {
  376. int retval;
  377. struct pm4_mes_query_status *packet;
  378. if (WARN_ON(!fence_address))
  379. return -EFAULT;
  380. mutex_lock(&pm->lock);
  381. retval = pm->priv_queue->ops.acquire_packet_buffer(
  382. pm->priv_queue,
  383. sizeof(struct pm4_mes_query_status) / sizeof(uint32_t),
  384. (unsigned int **)&packet);
  385. if (retval)
  386. goto fail_acquire_packet_buffer;
  387. packet->header.u32All = build_pm4_header(IT_QUERY_STATUS,
  388. sizeof(struct pm4_mes_query_status));
  389. packet->bitfields2.context_id = 0;
  390. packet->bitfields2.interrupt_sel =
  391. interrupt_sel__mes_query_status__completion_status;
  392. packet->bitfields2.command =
  393. command__mes_query_status__fence_only_after_write_ack;
  394. packet->addr_hi = upper_32_bits((uint64_t)fence_address);
  395. packet->addr_lo = lower_32_bits((uint64_t)fence_address);
  396. packet->data_hi = upper_32_bits((uint64_t)fence_value);
  397. packet->data_lo = lower_32_bits((uint64_t)fence_value);
  398. pm->priv_queue->ops.submit_packet(pm->priv_queue);
  399. fail_acquire_packet_buffer:
  400. mutex_unlock(&pm->lock);
  401. return retval;
  402. }
  403. int pm_send_unmap_queue(struct packet_manager *pm, enum kfd_queue_type type,
  404. enum kfd_unmap_queues_filter filter,
  405. uint32_t filter_param, bool reset,
  406. unsigned int sdma_engine)
  407. {
  408. int retval;
  409. uint32_t *buffer;
  410. struct pm4_mes_unmap_queues *packet;
  411. mutex_lock(&pm->lock);
  412. retval = pm->priv_queue->ops.acquire_packet_buffer(
  413. pm->priv_queue,
  414. sizeof(struct pm4_mes_unmap_queues) / sizeof(uint32_t),
  415. &buffer);
  416. if (retval)
  417. goto err_acquire_packet_buffer;
  418. packet = (struct pm4_mes_unmap_queues *)buffer;
  419. memset(buffer, 0, sizeof(struct pm4_mes_unmap_queues));
  420. pr_debug("static_queue: unmapping queues: filter is %d , reset is %d , type is %d\n",
  421. filter, reset, type);
  422. packet->header.u32All = build_pm4_header(IT_UNMAP_QUEUES,
  423. sizeof(struct pm4_mes_unmap_queues));
  424. switch (type) {
  425. case KFD_QUEUE_TYPE_COMPUTE:
  426. case KFD_QUEUE_TYPE_DIQ:
  427. packet->bitfields2.engine_sel =
  428. engine_sel__mes_unmap_queues__compute;
  429. break;
  430. case KFD_QUEUE_TYPE_SDMA:
  431. packet->bitfields2.engine_sel =
  432. engine_sel__mes_unmap_queues__sdma0 + sdma_engine;
  433. break;
  434. default:
  435. WARN(1, "queue type %d", type);
  436. retval = -EINVAL;
  437. goto err_invalid;
  438. }
  439. if (reset)
  440. packet->bitfields2.action =
  441. action__mes_unmap_queues__reset_queues;
  442. else
  443. packet->bitfields2.action =
  444. action__mes_unmap_queues__preempt_queues;
  445. switch (filter) {
  446. case KFD_UNMAP_QUEUES_FILTER_SINGLE_QUEUE:
  447. packet->bitfields2.queue_sel =
  448. queue_sel__mes_unmap_queues__perform_request_on_specified_queues;
  449. packet->bitfields2.num_queues = 1;
  450. packet->bitfields3b.doorbell_offset0 = filter_param;
  451. break;
  452. case KFD_UNMAP_QUEUES_FILTER_BY_PASID:
  453. packet->bitfields2.queue_sel =
  454. queue_sel__mes_unmap_queues__perform_request_on_pasid_queues;
  455. packet->bitfields3a.pasid = filter_param;
  456. break;
  457. case KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES:
  458. packet->bitfields2.queue_sel =
  459. queue_sel__mes_unmap_queues__unmap_all_queues;
  460. break;
  461. case KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES:
  462. /* in this case, we do not preempt static queues */
  463. packet->bitfields2.queue_sel =
  464. queue_sel__mes_unmap_queues__unmap_all_non_static_queues;
  465. break;
  466. default:
  467. WARN(1, "filter %d", filter);
  468. retval = -EINVAL;
  469. goto err_invalid;
  470. }
  471. pm->priv_queue->ops.submit_packet(pm->priv_queue);
  472. mutex_unlock(&pm->lock);
  473. return 0;
  474. err_invalid:
  475. pm->priv_queue->ops.rollback_packet(pm->priv_queue);
  476. err_acquire_packet_buffer:
  477. mutex_unlock(&pm->lock);
  478. return retval;
  479. }
  480. void pm_release_ib(struct packet_manager *pm)
  481. {
  482. mutex_lock(&pm->lock);
  483. if (pm->allocated) {
  484. kfd_gtt_sa_free(pm->dqm->dev, pm->ib_buffer_obj);
  485. pm->allocated = false;
  486. }
  487. mutex_unlock(&pm->lock);
  488. }
  489. #if defined(CONFIG_DEBUG_FS)
  490. int pm_debugfs_runlist(struct seq_file *m, void *data)
  491. {
  492. struct packet_manager *pm = data;
  493. mutex_lock(&pm->lock);
  494. if (!pm->allocated) {
  495. seq_puts(m, " No active runlist\n");
  496. goto out;
  497. }
  498. seq_hex_dump(m, " ", DUMP_PREFIX_OFFSET, 32, 4,
  499. pm->ib_buffer_obj->cpu_ptr, pm->ib_size_bytes, false);
  500. out:
  501. mutex_unlock(&pm->lock);
  502. return 0;
  503. }
  504. #endif