kfd_process.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657
  1. /*
  2. * Copyright 2014 Advanced Micro Devices, Inc.
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice shall be included in
  12. * all copies or substantial portions of the Software.
  13. *
  14. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  17. * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18. * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19. * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20. * OTHER DEALINGS IN THE SOFTWARE.
  21. */
  22. #include <linux/mutex.h>
  23. #include <linux/log2.h>
  24. #include <linux/sched.h>
  25. #include <linux/sched/mm.h>
  26. #include <linux/sched/task.h>
  27. #include <linux/slab.h>
  28. #include <linux/amd-iommu.h>
  29. #include <linux/notifier.h>
  30. #include <linux/compat.h>
  31. #include <linux/mman.h>
  32. struct mm_struct;
  33. #include "kfd_priv.h"
  34. #include "kfd_dbgmgr.h"
  35. /*
  36. * List of struct kfd_process (field kfd_process).
  37. * Unique/indexed by mm_struct*
  38. */
  39. #define KFD_PROCESS_TABLE_SIZE 5 /* bits: 32 entries */
  40. static DEFINE_HASHTABLE(kfd_processes_table, KFD_PROCESS_TABLE_SIZE);
  41. static DEFINE_MUTEX(kfd_processes_mutex);
  42. DEFINE_STATIC_SRCU(kfd_processes_srcu);
  43. static struct workqueue_struct *kfd_process_wq;
  44. static struct kfd_process *find_process(const struct task_struct *thread);
  45. static void kfd_process_ref_release(struct kref *ref);
  46. static struct kfd_process *create_process(const struct task_struct *thread,
  47. struct file *filep);
  48. static int kfd_process_init_cwsr(struct kfd_process *p, struct file *filep);
  49. void kfd_process_create_wq(void)
  50. {
  51. if (!kfd_process_wq)
  52. kfd_process_wq = alloc_workqueue("kfd_process_wq", 0, 0);
  53. }
  54. void kfd_process_destroy_wq(void)
  55. {
  56. if (kfd_process_wq) {
  57. destroy_workqueue(kfd_process_wq);
  58. kfd_process_wq = NULL;
  59. }
  60. }
  61. struct kfd_process *kfd_create_process(struct file *filep)
  62. {
  63. struct kfd_process *process;
  64. struct task_struct *thread = current;
  65. if (!thread->mm)
  66. return ERR_PTR(-EINVAL);
  67. /* Only the pthreads threading model is supported. */
  68. if (thread->group_leader->mm != thread->mm)
  69. return ERR_PTR(-EINVAL);
  70. /*
  71. * take kfd processes mutex before starting of process creation
  72. * so there won't be a case where two threads of the same process
  73. * create two kfd_process structures
  74. */
  75. mutex_lock(&kfd_processes_mutex);
  76. /* A prior open of /dev/kfd could have already created the process. */
  77. process = find_process(thread);
  78. if (process)
  79. pr_debug("Process already found\n");
  80. else
  81. process = create_process(thread, filep);
  82. mutex_unlock(&kfd_processes_mutex);
  83. return process;
  84. }
  85. struct kfd_process *kfd_get_process(const struct task_struct *thread)
  86. {
  87. struct kfd_process *process;
  88. if (!thread->mm)
  89. return ERR_PTR(-EINVAL);
  90. /* Only the pthreads threading model is supported. */
  91. if (thread->group_leader->mm != thread->mm)
  92. return ERR_PTR(-EINVAL);
  93. process = find_process(thread);
  94. return process;
  95. }
  96. static struct kfd_process *find_process_by_mm(const struct mm_struct *mm)
  97. {
  98. struct kfd_process *process;
  99. hash_for_each_possible_rcu(kfd_processes_table, process,
  100. kfd_processes, (uintptr_t)mm)
  101. if (process->mm == mm)
  102. return process;
  103. return NULL;
  104. }
  105. static struct kfd_process *find_process(const struct task_struct *thread)
  106. {
  107. struct kfd_process *p;
  108. int idx;
  109. idx = srcu_read_lock(&kfd_processes_srcu);
  110. p = find_process_by_mm(thread->mm);
  111. srcu_read_unlock(&kfd_processes_srcu, idx);
  112. return p;
  113. }
  114. void kfd_unref_process(struct kfd_process *p)
  115. {
  116. kref_put(&p->ref, kfd_process_ref_release);
  117. }
  118. static void kfd_process_destroy_pdds(struct kfd_process *p)
  119. {
  120. struct kfd_process_device *pdd, *temp;
  121. list_for_each_entry_safe(pdd, temp, &p->per_device_data,
  122. per_device_list) {
  123. pr_debug("Releasing pdd (topology id %d) for process (pasid %d)\n",
  124. pdd->dev->id, p->pasid);
  125. list_del(&pdd->per_device_list);
  126. if (pdd->qpd.cwsr_kaddr)
  127. free_pages((unsigned long)pdd->qpd.cwsr_kaddr,
  128. get_order(KFD_CWSR_TBA_TMA_SIZE));
  129. kfree(pdd);
  130. }
  131. }
  132. /* No process locking is needed in this function, because the process
  133. * is not findable any more. We must assume that no other thread is
  134. * using it any more, otherwise we couldn't safely free the process
  135. * structure in the end.
  136. */
  137. static void kfd_process_wq_release(struct work_struct *work)
  138. {
  139. struct kfd_process *p = container_of(work, struct kfd_process,
  140. release_work);
  141. struct kfd_process_device *pdd;
  142. pr_debug("Releasing process (pasid %d) in workqueue\n", p->pasid);
  143. list_for_each_entry(pdd, &p->per_device_data, per_device_list) {
  144. if (pdd->bound == PDD_BOUND)
  145. amd_iommu_unbind_pasid(pdd->dev->pdev, p->pasid);
  146. }
  147. kfd_process_destroy_pdds(p);
  148. kfd_event_free_process(p);
  149. kfd_pasid_free(p->pasid);
  150. kfd_free_process_doorbells(p);
  151. mutex_destroy(&p->mutex);
  152. put_task_struct(p->lead_thread);
  153. kfree(p);
  154. }
  155. static void kfd_process_ref_release(struct kref *ref)
  156. {
  157. struct kfd_process *p = container_of(ref, struct kfd_process, ref);
  158. INIT_WORK(&p->release_work, kfd_process_wq_release);
  159. queue_work(kfd_process_wq, &p->release_work);
  160. }
  161. static void kfd_process_destroy_delayed(struct rcu_head *rcu)
  162. {
  163. struct kfd_process *p = container_of(rcu, struct kfd_process, rcu);
  164. kfd_unref_process(p);
  165. }
  166. static void kfd_process_notifier_release(struct mmu_notifier *mn,
  167. struct mm_struct *mm)
  168. {
  169. struct kfd_process *p;
  170. struct kfd_process_device *pdd = NULL;
  171. /*
  172. * The kfd_process structure can not be free because the
  173. * mmu_notifier srcu is read locked
  174. */
  175. p = container_of(mn, struct kfd_process, mmu_notifier);
  176. if (WARN_ON(p->mm != mm))
  177. return;
  178. mutex_lock(&kfd_processes_mutex);
  179. hash_del_rcu(&p->kfd_processes);
  180. mutex_unlock(&kfd_processes_mutex);
  181. synchronize_srcu(&kfd_processes_srcu);
  182. mutex_lock(&p->mutex);
  183. /* Iterate over all process device data structures and if the
  184. * pdd is in debug mode, we should first force unregistration,
  185. * then we will be able to destroy the queues
  186. */
  187. list_for_each_entry(pdd, &p->per_device_data, per_device_list) {
  188. struct kfd_dev *dev = pdd->dev;
  189. mutex_lock(kfd_get_dbgmgr_mutex());
  190. if (dev && dev->dbgmgr && dev->dbgmgr->pasid == p->pasid) {
  191. if (!kfd_dbgmgr_unregister(dev->dbgmgr, p)) {
  192. kfd_dbgmgr_destroy(dev->dbgmgr);
  193. dev->dbgmgr = NULL;
  194. }
  195. }
  196. mutex_unlock(kfd_get_dbgmgr_mutex());
  197. }
  198. kfd_process_dequeue_from_all_devices(p);
  199. pqm_uninit(&p->pqm);
  200. /* Indicate to other users that MM is no longer valid */
  201. p->mm = NULL;
  202. mutex_unlock(&p->mutex);
  203. mmu_notifier_unregister_no_release(&p->mmu_notifier, mm);
  204. mmu_notifier_call_srcu(&p->rcu, &kfd_process_destroy_delayed);
  205. }
  206. static const struct mmu_notifier_ops kfd_process_mmu_notifier_ops = {
  207. .release = kfd_process_notifier_release,
  208. };
  209. static int kfd_process_init_cwsr(struct kfd_process *p, struct file *filep)
  210. {
  211. unsigned long offset;
  212. struct kfd_process_device *pdd = NULL;
  213. struct kfd_dev *dev = NULL;
  214. struct qcm_process_device *qpd = NULL;
  215. list_for_each_entry(pdd, &p->per_device_data, per_device_list) {
  216. dev = pdd->dev;
  217. qpd = &pdd->qpd;
  218. if (!dev->cwsr_enabled || qpd->cwsr_kaddr)
  219. continue;
  220. offset = (dev->id | KFD_MMAP_RESERVED_MEM_MASK) << PAGE_SHIFT;
  221. qpd->tba_addr = (int64_t)vm_mmap(filep, 0,
  222. KFD_CWSR_TBA_TMA_SIZE, PROT_READ | PROT_EXEC,
  223. MAP_SHARED, offset);
  224. if (IS_ERR_VALUE(qpd->tba_addr)) {
  225. int err = qpd->tba_addr;
  226. pr_err("Failure to set tba address. error %d.\n", err);
  227. qpd->tba_addr = 0;
  228. qpd->cwsr_kaddr = NULL;
  229. return err;
  230. }
  231. memcpy(qpd->cwsr_kaddr, dev->cwsr_isa, dev->cwsr_isa_size);
  232. qpd->tma_addr = qpd->tba_addr + KFD_CWSR_TMA_OFFSET;
  233. pr_debug("set tba :0x%llx, tma:0x%llx, cwsr_kaddr:%p for pqm.\n",
  234. qpd->tba_addr, qpd->tma_addr, qpd->cwsr_kaddr);
  235. }
  236. return 0;
  237. }
  238. static struct kfd_process *create_process(const struct task_struct *thread,
  239. struct file *filep)
  240. {
  241. struct kfd_process *process;
  242. int err = -ENOMEM;
  243. process = kzalloc(sizeof(*process), GFP_KERNEL);
  244. if (!process)
  245. goto err_alloc_process;
  246. process->pasid = kfd_pasid_alloc();
  247. if (process->pasid == 0)
  248. goto err_alloc_pasid;
  249. if (kfd_alloc_process_doorbells(process) < 0)
  250. goto err_alloc_doorbells;
  251. kref_init(&process->ref);
  252. mutex_init(&process->mutex);
  253. process->mm = thread->mm;
  254. /* register notifier */
  255. process->mmu_notifier.ops = &kfd_process_mmu_notifier_ops;
  256. err = mmu_notifier_register(&process->mmu_notifier, process->mm);
  257. if (err)
  258. goto err_mmu_notifier;
  259. hash_add_rcu(kfd_processes_table, &process->kfd_processes,
  260. (uintptr_t)process->mm);
  261. process->lead_thread = thread->group_leader;
  262. get_task_struct(process->lead_thread);
  263. INIT_LIST_HEAD(&process->per_device_data);
  264. kfd_event_init_process(process);
  265. err = pqm_init(&process->pqm, process);
  266. if (err != 0)
  267. goto err_process_pqm_init;
  268. /* init process apertures*/
  269. process->is_32bit_user_mode = in_compat_syscall();
  270. err = kfd_init_apertures(process);
  271. if (err != 0)
  272. goto err_init_apertures;
  273. err = kfd_process_init_cwsr(process, filep);
  274. if (err)
  275. goto err_init_cwsr;
  276. return process;
  277. err_init_cwsr:
  278. kfd_process_destroy_pdds(process);
  279. err_init_apertures:
  280. pqm_uninit(&process->pqm);
  281. err_process_pqm_init:
  282. hash_del_rcu(&process->kfd_processes);
  283. synchronize_rcu();
  284. mmu_notifier_unregister_no_release(&process->mmu_notifier, process->mm);
  285. err_mmu_notifier:
  286. mutex_destroy(&process->mutex);
  287. kfd_free_process_doorbells(process);
  288. err_alloc_doorbells:
  289. kfd_pasid_free(process->pasid);
  290. err_alloc_pasid:
  291. kfree(process);
  292. err_alloc_process:
  293. return ERR_PTR(err);
  294. }
  295. struct kfd_process_device *kfd_get_process_device_data(struct kfd_dev *dev,
  296. struct kfd_process *p)
  297. {
  298. struct kfd_process_device *pdd = NULL;
  299. list_for_each_entry(pdd, &p->per_device_data, per_device_list)
  300. if (pdd->dev == dev)
  301. return pdd;
  302. return NULL;
  303. }
  304. struct kfd_process_device *kfd_create_process_device_data(struct kfd_dev *dev,
  305. struct kfd_process *p)
  306. {
  307. struct kfd_process_device *pdd = NULL;
  308. pdd = kzalloc(sizeof(*pdd), GFP_KERNEL);
  309. if (!pdd)
  310. return NULL;
  311. pdd->dev = dev;
  312. INIT_LIST_HEAD(&pdd->qpd.queues_list);
  313. INIT_LIST_HEAD(&pdd->qpd.priv_queue_list);
  314. pdd->qpd.dqm = dev->dqm;
  315. pdd->qpd.pqm = &p->pqm;
  316. pdd->process = p;
  317. pdd->bound = PDD_UNBOUND;
  318. pdd->already_dequeued = false;
  319. list_add(&pdd->per_device_list, &p->per_device_data);
  320. return pdd;
  321. }
  322. /*
  323. * Direct the IOMMU to bind the process (specifically the pasid->mm)
  324. * to the device.
  325. * Unbinding occurs when the process dies or the device is removed.
  326. *
  327. * Assumes that the process lock is held.
  328. */
  329. struct kfd_process_device *kfd_bind_process_to_device(struct kfd_dev *dev,
  330. struct kfd_process *p)
  331. {
  332. struct kfd_process_device *pdd;
  333. int err;
  334. pdd = kfd_get_process_device_data(dev, p);
  335. if (!pdd) {
  336. pr_err("Process device data doesn't exist\n");
  337. return ERR_PTR(-ENOMEM);
  338. }
  339. if (pdd->bound == PDD_BOUND) {
  340. return pdd;
  341. } else if (unlikely(pdd->bound == PDD_BOUND_SUSPENDED)) {
  342. pr_err("Binding PDD_BOUND_SUSPENDED pdd is unexpected!\n");
  343. return ERR_PTR(-EINVAL);
  344. }
  345. err = amd_iommu_bind_pasid(dev->pdev, p->pasid, p->lead_thread);
  346. if (err < 0)
  347. return ERR_PTR(err);
  348. pdd->bound = PDD_BOUND;
  349. return pdd;
  350. }
  351. /*
  352. * Bind processes do the device that have been temporarily unbound
  353. * (PDD_BOUND_SUSPENDED) in kfd_unbind_processes_from_device.
  354. */
  355. int kfd_bind_processes_to_device(struct kfd_dev *dev)
  356. {
  357. struct kfd_process_device *pdd;
  358. struct kfd_process *p;
  359. unsigned int temp;
  360. int err = 0;
  361. int idx = srcu_read_lock(&kfd_processes_srcu);
  362. hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) {
  363. mutex_lock(&p->mutex);
  364. pdd = kfd_get_process_device_data(dev, p);
  365. if (pdd->bound != PDD_BOUND_SUSPENDED) {
  366. mutex_unlock(&p->mutex);
  367. continue;
  368. }
  369. err = amd_iommu_bind_pasid(dev->pdev, p->pasid,
  370. p->lead_thread);
  371. if (err < 0) {
  372. pr_err("Unexpected pasid %d binding failure\n",
  373. p->pasid);
  374. mutex_unlock(&p->mutex);
  375. break;
  376. }
  377. pdd->bound = PDD_BOUND;
  378. mutex_unlock(&p->mutex);
  379. }
  380. srcu_read_unlock(&kfd_processes_srcu, idx);
  381. return err;
  382. }
  383. /*
  384. * Mark currently bound processes as PDD_BOUND_SUSPENDED. These
  385. * processes will be restored to PDD_BOUND state in
  386. * kfd_bind_processes_to_device.
  387. */
  388. void kfd_unbind_processes_from_device(struct kfd_dev *dev)
  389. {
  390. struct kfd_process_device *pdd;
  391. struct kfd_process *p;
  392. unsigned int temp;
  393. int idx = srcu_read_lock(&kfd_processes_srcu);
  394. hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) {
  395. mutex_lock(&p->mutex);
  396. pdd = kfd_get_process_device_data(dev, p);
  397. if (pdd->bound == PDD_BOUND)
  398. pdd->bound = PDD_BOUND_SUSPENDED;
  399. mutex_unlock(&p->mutex);
  400. }
  401. srcu_read_unlock(&kfd_processes_srcu, idx);
  402. }
  403. void kfd_process_iommu_unbind_callback(struct kfd_dev *dev, unsigned int pasid)
  404. {
  405. struct kfd_process *p;
  406. struct kfd_process_device *pdd;
  407. /*
  408. * Look for the process that matches the pasid. If there is no such
  409. * process, we either released it in amdkfd's own notifier, or there
  410. * is a bug. Unfortunately, there is no way to tell...
  411. */
  412. p = kfd_lookup_process_by_pasid(pasid);
  413. if (!p)
  414. return;
  415. pr_debug("Unbinding process %d from IOMMU\n", pasid);
  416. mutex_lock(kfd_get_dbgmgr_mutex());
  417. if (dev->dbgmgr && dev->dbgmgr->pasid == p->pasid) {
  418. if (!kfd_dbgmgr_unregister(dev->dbgmgr, p)) {
  419. kfd_dbgmgr_destroy(dev->dbgmgr);
  420. dev->dbgmgr = NULL;
  421. }
  422. }
  423. mutex_unlock(kfd_get_dbgmgr_mutex());
  424. mutex_lock(&p->mutex);
  425. pdd = kfd_get_process_device_data(dev, p);
  426. if (pdd)
  427. /* For GPU relying on IOMMU, we need to dequeue here
  428. * when PASID is still bound.
  429. */
  430. kfd_process_dequeue_from_device(pdd);
  431. mutex_unlock(&p->mutex);
  432. kfd_unref_process(p);
  433. }
  434. struct kfd_process_device *kfd_get_first_process_device_data(
  435. struct kfd_process *p)
  436. {
  437. return list_first_entry(&p->per_device_data,
  438. struct kfd_process_device,
  439. per_device_list);
  440. }
  441. struct kfd_process_device *kfd_get_next_process_device_data(
  442. struct kfd_process *p,
  443. struct kfd_process_device *pdd)
  444. {
  445. if (list_is_last(&pdd->per_device_list, &p->per_device_data))
  446. return NULL;
  447. return list_next_entry(pdd, per_device_list);
  448. }
  449. bool kfd_has_process_device_data(struct kfd_process *p)
  450. {
  451. return !(list_empty(&p->per_device_data));
  452. }
  453. /* This increments the process->ref counter. */
  454. struct kfd_process *kfd_lookup_process_by_pasid(unsigned int pasid)
  455. {
  456. struct kfd_process *p, *ret_p = NULL;
  457. unsigned int temp;
  458. int idx = srcu_read_lock(&kfd_processes_srcu);
  459. hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) {
  460. if (p->pasid == pasid) {
  461. kref_get(&p->ref);
  462. ret_p = p;
  463. break;
  464. }
  465. }
  466. srcu_read_unlock(&kfd_processes_srcu, idx);
  467. return ret_p;
  468. }
  469. int kfd_reserved_mem_mmap(struct kfd_process *process,
  470. struct vm_area_struct *vma)
  471. {
  472. struct kfd_dev *dev = kfd_device_by_id(vma->vm_pgoff);
  473. struct kfd_process_device *pdd;
  474. struct qcm_process_device *qpd;
  475. if (!dev)
  476. return -EINVAL;
  477. if ((vma->vm_end - vma->vm_start) != KFD_CWSR_TBA_TMA_SIZE) {
  478. pr_err("Incorrect CWSR mapping size.\n");
  479. return -EINVAL;
  480. }
  481. pdd = kfd_get_process_device_data(dev, process);
  482. if (!pdd)
  483. return -EINVAL;
  484. qpd = &pdd->qpd;
  485. qpd->cwsr_kaddr = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
  486. get_order(KFD_CWSR_TBA_TMA_SIZE));
  487. if (!qpd->cwsr_kaddr) {
  488. pr_err("Error allocating per process CWSR buffer.\n");
  489. return -ENOMEM;
  490. }
  491. vma->vm_flags |= VM_IO | VM_DONTCOPY | VM_DONTEXPAND
  492. | VM_NORESERVE | VM_DONTDUMP | VM_PFNMAP;
  493. /* Mapping pages to user process */
  494. return remap_pfn_range(vma, vma->vm_start,
  495. PFN_DOWN(__pa(qpd->cwsr_kaddr)),
  496. KFD_CWSR_TBA_TMA_SIZE, vma->vm_page_prot);
  497. }
  498. #if defined(CONFIG_DEBUG_FS)
  499. int kfd_debugfs_mqds_by_process(struct seq_file *m, void *data)
  500. {
  501. struct kfd_process *p;
  502. unsigned int temp;
  503. int r = 0;
  504. int idx = srcu_read_lock(&kfd_processes_srcu);
  505. hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) {
  506. seq_printf(m, "Process %d PASID %d:\n",
  507. p->lead_thread->tgid, p->pasid);
  508. mutex_lock(&p->mutex);
  509. r = pqm_debugfs_mqds(m, &p->pqm);
  510. mutex_unlock(&p->mutex);
  511. if (r)
  512. break;
  513. }
  514. srcu_read_unlock(&kfd_processes_srcu, idx);
  515. return r;
  516. }
  517. #endif