kfd_process.c 27 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069
  1. /*
  2. * Copyright 2014 Advanced Micro Devices, Inc.
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice shall be included in
  12. * all copies or substantial portions of the Software.
  13. *
  14. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  17. * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18. * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19. * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20. * OTHER DEALINGS IN THE SOFTWARE.
  21. */
  22. #include <linux/mutex.h>
  23. #include <linux/log2.h>
  24. #include <linux/sched.h>
  25. #include <linux/sched/mm.h>
  26. #include <linux/sched/task.h>
  27. #include <linux/slab.h>
  28. #include <linux/amd-iommu.h>
  29. #include <linux/notifier.h>
  30. #include <linux/compat.h>
  31. #include <linux/mman.h>
  32. #include <linux/file.h>
  33. struct mm_struct;
  34. #include "kfd_priv.h"
  35. #include "kfd_device_queue_manager.h"
  36. #include "kfd_dbgmgr.h"
  37. #include "kfd_iommu.h"
  38. /*
  39. * List of struct kfd_process (field kfd_process).
  40. * Unique/indexed by mm_struct*
  41. */
  42. DEFINE_HASHTABLE(kfd_processes_table, KFD_PROCESS_TABLE_SIZE);
  43. static DEFINE_MUTEX(kfd_processes_mutex);
  44. DEFINE_SRCU(kfd_processes_srcu);
  45. /* For process termination handling */
  46. static struct workqueue_struct *kfd_process_wq;
  47. /* Ordered, single-threaded workqueue for restoring evicted
  48. * processes. Restoring multiple processes concurrently under memory
  49. * pressure can lead to processes blocking each other from validating
  50. * their BOs and result in a live-lock situation where processes
  51. * remain evicted indefinitely.
  52. */
  53. static struct workqueue_struct *kfd_restore_wq;
  54. static struct kfd_process *find_process(const struct task_struct *thread);
  55. static void kfd_process_ref_release(struct kref *ref);
  56. static struct kfd_process *create_process(const struct task_struct *thread,
  57. struct file *filep);
  58. static void evict_process_worker(struct work_struct *work);
  59. static void restore_process_worker(struct work_struct *work);
  60. int kfd_process_create_wq(void)
  61. {
  62. if (!kfd_process_wq)
  63. kfd_process_wq = alloc_workqueue("kfd_process_wq", 0, 0);
  64. if (!kfd_restore_wq)
  65. kfd_restore_wq = alloc_ordered_workqueue("kfd_restore_wq", 0);
  66. if (!kfd_process_wq || !kfd_restore_wq) {
  67. kfd_process_destroy_wq();
  68. return -ENOMEM;
  69. }
  70. return 0;
  71. }
  72. void kfd_process_destroy_wq(void)
  73. {
  74. if (kfd_process_wq) {
  75. destroy_workqueue(kfd_process_wq);
  76. kfd_process_wq = NULL;
  77. }
  78. if (kfd_restore_wq) {
  79. destroy_workqueue(kfd_restore_wq);
  80. kfd_restore_wq = NULL;
  81. }
  82. }
  83. static void kfd_process_free_gpuvm(struct kgd_mem *mem,
  84. struct kfd_process_device *pdd)
  85. {
  86. struct kfd_dev *dev = pdd->dev;
  87. dev->kfd2kgd->unmap_memory_to_gpu(dev->kgd, mem, pdd->vm);
  88. dev->kfd2kgd->free_memory_of_gpu(dev->kgd, mem);
  89. }
  90. /* kfd_process_alloc_gpuvm - Allocate GPU VM for the KFD process
  91. * This function should be only called right after the process
  92. * is created and when kfd_processes_mutex is still being held
  93. * to avoid concurrency. Because of that exclusiveness, we do
  94. * not need to take p->mutex.
  95. */
  96. static int kfd_process_alloc_gpuvm(struct kfd_process_device *pdd,
  97. uint64_t gpu_va, uint32_t size,
  98. uint32_t flags, void **kptr)
  99. {
  100. struct kfd_dev *kdev = pdd->dev;
  101. struct kgd_mem *mem = NULL;
  102. int handle;
  103. int err;
  104. err = kdev->kfd2kgd->alloc_memory_of_gpu(kdev->kgd, gpu_va, size,
  105. pdd->vm, &mem, NULL, flags);
  106. if (err)
  107. goto err_alloc_mem;
  108. err = kdev->kfd2kgd->map_memory_to_gpu(kdev->kgd, mem, pdd->vm);
  109. if (err)
  110. goto err_map_mem;
  111. err = kdev->kfd2kgd->sync_memory(kdev->kgd, mem, true);
  112. if (err) {
  113. pr_debug("Sync memory failed, wait interrupted by user signal\n");
  114. goto sync_memory_failed;
  115. }
  116. /* Create an obj handle so kfd_process_device_remove_obj_handle
  117. * will take care of the bo removal when the process finishes.
  118. * We do not need to take p->mutex, because the process is just
  119. * created and the ioctls have not had the chance to run.
  120. */
  121. handle = kfd_process_device_create_obj_handle(pdd, mem);
  122. if (handle < 0) {
  123. err = handle;
  124. goto free_gpuvm;
  125. }
  126. if (kptr) {
  127. err = kdev->kfd2kgd->map_gtt_bo_to_kernel(kdev->kgd,
  128. (struct kgd_mem *)mem, kptr, NULL);
  129. if (err) {
  130. pr_debug("Map GTT BO to kernel failed\n");
  131. goto free_obj_handle;
  132. }
  133. }
  134. return err;
  135. free_obj_handle:
  136. kfd_process_device_remove_obj_handle(pdd, handle);
  137. free_gpuvm:
  138. sync_memory_failed:
  139. kfd_process_free_gpuvm(mem, pdd);
  140. return err;
  141. err_map_mem:
  142. kdev->kfd2kgd->free_memory_of_gpu(kdev->kgd, mem);
  143. err_alloc_mem:
  144. *kptr = NULL;
  145. return err;
  146. }
  147. /* kfd_process_device_reserve_ib_mem - Reserve memory inside the
  148. * process for IB usage The memory reserved is for KFD to submit
  149. * IB to AMDGPU from kernel. If the memory is reserved
  150. * successfully, ib_kaddr will have the CPU/kernel
  151. * address. Check ib_kaddr before accessing the memory.
  152. */
  153. static int kfd_process_device_reserve_ib_mem(struct kfd_process_device *pdd)
  154. {
  155. struct qcm_process_device *qpd = &pdd->qpd;
  156. uint32_t flags = ALLOC_MEM_FLAGS_GTT |
  157. ALLOC_MEM_FLAGS_NO_SUBSTITUTE |
  158. ALLOC_MEM_FLAGS_WRITABLE |
  159. ALLOC_MEM_FLAGS_EXECUTABLE;
  160. void *kaddr;
  161. int ret;
  162. if (qpd->ib_kaddr || !qpd->ib_base)
  163. return 0;
  164. /* ib_base is only set for dGPU */
  165. ret = kfd_process_alloc_gpuvm(pdd, qpd->ib_base, PAGE_SIZE, flags,
  166. &kaddr);
  167. if (ret)
  168. return ret;
  169. qpd->ib_kaddr = kaddr;
  170. return 0;
  171. }
  172. struct kfd_process *kfd_create_process(struct file *filep)
  173. {
  174. struct kfd_process *process;
  175. struct task_struct *thread = current;
  176. if (!thread->mm)
  177. return ERR_PTR(-EINVAL);
  178. /* Only the pthreads threading model is supported. */
  179. if (thread->group_leader->mm != thread->mm)
  180. return ERR_PTR(-EINVAL);
  181. /*
  182. * take kfd processes mutex before starting of process creation
  183. * so there won't be a case where two threads of the same process
  184. * create two kfd_process structures
  185. */
  186. mutex_lock(&kfd_processes_mutex);
  187. /* A prior open of /dev/kfd could have already created the process. */
  188. process = find_process(thread);
  189. if (process)
  190. pr_debug("Process already found\n");
  191. else
  192. process = create_process(thread, filep);
  193. mutex_unlock(&kfd_processes_mutex);
  194. return process;
  195. }
  196. struct kfd_process *kfd_get_process(const struct task_struct *thread)
  197. {
  198. struct kfd_process *process;
  199. if (!thread->mm)
  200. return ERR_PTR(-EINVAL);
  201. /* Only the pthreads threading model is supported. */
  202. if (thread->group_leader->mm != thread->mm)
  203. return ERR_PTR(-EINVAL);
  204. process = find_process(thread);
  205. return process;
  206. }
  207. static struct kfd_process *find_process_by_mm(const struct mm_struct *mm)
  208. {
  209. struct kfd_process *process;
  210. hash_for_each_possible_rcu(kfd_processes_table, process,
  211. kfd_processes, (uintptr_t)mm)
  212. if (process->mm == mm)
  213. return process;
  214. return NULL;
  215. }
  216. static struct kfd_process *find_process(const struct task_struct *thread)
  217. {
  218. struct kfd_process *p;
  219. int idx;
  220. idx = srcu_read_lock(&kfd_processes_srcu);
  221. p = find_process_by_mm(thread->mm);
  222. srcu_read_unlock(&kfd_processes_srcu, idx);
  223. return p;
  224. }
  225. void kfd_unref_process(struct kfd_process *p)
  226. {
  227. kref_put(&p->ref, kfd_process_ref_release);
  228. }
  229. static void kfd_process_device_free_bos(struct kfd_process_device *pdd)
  230. {
  231. struct kfd_process *p = pdd->process;
  232. void *mem;
  233. int id;
  234. /*
  235. * Remove all handles from idr and release appropriate
  236. * local memory object
  237. */
  238. idr_for_each_entry(&pdd->alloc_idr, mem, id) {
  239. struct kfd_process_device *peer_pdd;
  240. list_for_each_entry(peer_pdd, &p->per_device_data,
  241. per_device_list) {
  242. if (!peer_pdd->vm)
  243. continue;
  244. peer_pdd->dev->kfd2kgd->unmap_memory_to_gpu(
  245. peer_pdd->dev->kgd, mem, peer_pdd->vm);
  246. }
  247. pdd->dev->kfd2kgd->free_memory_of_gpu(pdd->dev->kgd, mem);
  248. kfd_process_device_remove_obj_handle(pdd, id);
  249. }
  250. }
  251. static void kfd_process_free_outstanding_kfd_bos(struct kfd_process *p)
  252. {
  253. struct kfd_process_device *pdd;
  254. list_for_each_entry(pdd, &p->per_device_data, per_device_list)
  255. kfd_process_device_free_bos(pdd);
  256. }
  257. static void kfd_process_destroy_pdds(struct kfd_process *p)
  258. {
  259. struct kfd_process_device *pdd, *temp;
  260. list_for_each_entry_safe(pdd, temp, &p->per_device_data,
  261. per_device_list) {
  262. pr_debug("Releasing pdd (topology id %d) for process (pasid %d)\n",
  263. pdd->dev->id, p->pasid);
  264. if (pdd->drm_file)
  265. fput(pdd->drm_file);
  266. else if (pdd->vm)
  267. pdd->dev->kfd2kgd->destroy_process_vm(
  268. pdd->dev->kgd, pdd->vm);
  269. list_del(&pdd->per_device_list);
  270. if (pdd->qpd.cwsr_kaddr && !pdd->qpd.cwsr_base)
  271. free_pages((unsigned long)pdd->qpd.cwsr_kaddr,
  272. get_order(KFD_CWSR_TBA_TMA_SIZE));
  273. idr_destroy(&pdd->alloc_idr);
  274. kfree(pdd);
  275. }
  276. }
  277. /* No process locking is needed in this function, because the process
  278. * is not findable any more. We must assume that no other thread is
  279. * using it any more, otherwise we couldn't safely free the process
  280. * structure in the end.
  281. */
  282. static void kfd_process_wq_release(struct work_struct *work)
  283. {
  284. struct kfd_process *p = container_of(work, struct kfd_process,
  285. release_work);
  286. kfd_iommu_unbind_process(p);
  287. kfd_process_free_outstanding_kfd_bos(p);
  288. kfd_process_destroy_pdds(p);
  289. dma_fence_put(p->ef);
  290. kfd_event_free_process(p);
  291. kfd_pasid_free(p->pasid);
  292. kfd_free_process_doorbells(p);
  293. mutex_destroy(&p->mutex);
  294. put_task_struct(p->lead_thread);
  295. kfree(p);
  296. }
  297. static void kfd_process_ref_release(struct kref *ref)
  298. {
  299. struct kfd_process *p = container_of(ref, struct kfd_process, ref);
  300. INIT_WORK(&p->release_work, kfd_process_wq_release);
  301. queue_work(kfd_process_wq, &p->release_work);
  302. }
  303. static void kfd_process_destroy_delayed(struct rcu_head *rcu)
  304. {
  305. struct kfd_process *p = container_of(rcu, struct kfd_process, rcu);
  306. kfd_unref_process(p);
  307. }
  308. static void kfd_process_notifier_release(struct mmu_notifier *mn,
  309. struct mm_struct *mm)
  310. {
  311. struct kfd_process *p;
  312. struct kfd_process_device *pdd = NULL;
  313. /*
  314. * The kfd_process structure can not be free because the
  315. * mmu_notifier srcu is read locked
  316. */
  317. p = container_of(mn, struct kfd_process, mmu_notifier);
  318. if (WARN_ON(p->mm != mm))
  319. return;
  320. mutex_lock(&kfd_processes_mutex);
  321. hash_del_rcu(&p->kfd_processes);
  322. mutex_unlock(&kfd_processes_mutex);
  323. synchronize_srcu(&kfd_processes_srcu);
  324. cancel_delayed_work_sync(&p->eviction_work);
  325. cancel_delayed_work_sync(&p->restore_work);
  326. mutex_lock(&p->mutex);
  327. /* Iterate over all process device data structures and if the
  328. * pdd is in debug mode, we should first force unregistration,
  329. * then we will be able to destroy the queues
  330. */
  331. list_for_each_entry(pdd, &p->per_device_data, per_device_list) {
  332. struct kfd_dev *dev = pdd->dev;
  333. mutex_lock(kfd_get_dbgmgr_mutex());
  334. if (dev && dev->dbgmgr && dev->dbgmgr->pasid == p->pasid) {
  335. if (!kfd_dbgmgr_unregister(dev->dbgmgr, p)) {
  336. kfd_dbgmgr_destroy(dev->dbgmgr);
  337. dev->dbgmgr = NULL;
  338. }
  339. }
  340. mutex_unlock(kfd_get_dbgmgr_mutex());
  341. }
  342. kfd_process_dequeue_from_all_devices(p);
  343. pqm_uninit(&p->pqm);
  344. /* Indicate to other users that MM is no longer valid */
  345. p->mm = NULL;
  346. mutex_unlock(&p->mutex);
  347. mmu_notifier_unregister_no_release(&p->mmu_notifier, mm);
  348. mmu_notifier_call_srcu(&p->rcu, &kfd_process_destroy_delayed);
  349. }
  350. static const struct mmu_notifier_ops kfd_process_mmu_notifier_ops = {
  351. .release = kfd_process_notifier_release,
  352. };
  353. static int kfd_process_init_cwsr_apu(struct kfd_process *p, struct file *filep)
  354. {
  355. unsigned long offset;
  356. struct kfd_process_device *pdd;
  357. list_for_each_entry(pdd, &p->per_device_data, per_device_list) {
  358. struct kfd_dev *dev = pdd->dev;
  359. struct qcm_process_device *qpd = &pdd->qpd;
  360. if (!dev->cwsr_enabled || qpd->cwsr_kaddr || qpd->cwsr_base)
  361. continue;
  362. offset = (dev->id | KFD_MMAP_RESERVED_MEM_MASK) << PAGE_SHIFT;
  363. qpd->tba_addr = (int64_t)vm_mmap(filep, 0,
  364. KFD_CWSR_TBA_TMA_SIZE, PROT_READ | PROT_EXEC,
  365. MAP_SHARED, offset);
  366. if (IS_ERR_VALUE(qpd->tba_addr)) {
  367. int err = qpd->tba_addr;
  368. pr_err("Failure to set tba address. error %d.\n", err);
  369. qpd->tba_addr = 0;
  370. qpd->cwsr_kaddr = NULL;
  371. return err;
  372. }
  373. memcpy(qpd->cwsr_kaddr, dev->cwsr_isa, dev->cwsr_isa_size);
  374. qpd->tma_addr = qpd->tba_addr + KFD_CWSR_TMA_OFFSET;
  375. pr_debug("set tba :0x%llx, tma:0x%llx, cwsr_kaddr:%p for pqm.\n",
  376. qpd->tba_addr, qpd->tma_addr, qpd->cwsr_kaddr);
  377. }
  378. return 0;
  379. }
  380. static int kfd_process_device_init_cwsr_dgpu(struct kfd_process_device *pdd)
  381. {
  382. struct kfd_dev *dev = pdd->dev;
  383. struct qcm_process_device *qpd = &pdd->qpd;
  384. uint32_t flags = ALLOC_MEM_FLAGS_GTT |
  385. ALLOC_MEM_FLAGS_NO_SUBSTITUTE | ALLOC_MEM_FLAGS_EXECUTABLE;
  386. void *kaddr;
  387. int ret;
  388. if (!dev->cwsr_enabled || qpd->cwsr_kaddr || !qpd->cwsr_base)
  389. return 0;
  390. /* cwsr_base is only set for dGPU */
  391. ret = kfd_process_alloc_gpuvm(pdd, qpd->cwsr_base,
  392. KFD_CWSR_TBA_TMA_SIZE, flags, &kaddr);
  393. if (ret)
  394. return ret;
  395. qpd->cwsr_kaddr = kaddr;
  396. qpd->tba_addr = qpd->cwsr_base;
  397. memcpy(qpd->cwsr_kaddr, dev->cwsr_isa, dev->cwsr_isa_size);
  398. qpd->tma_addr = qpd->tba_addr + KFD_CWSR_TMA_OFFSET;
  399. pr_debug("set tba :0x%llx, tma:0x%llx, cwsr_kaddr:%p for pqm.\n",
  400. qpd->tba_addr, qpd->tma_addr, qpd->cwsr_kaddr);
  401. return 0;
  402. }
  403. static struct kfd_process *create_process(const struct task_struct *thread,
  404. struct file *filep)
  405. {
  406. struct kfd_process *process;
  407. int err = -ENOMEM;
  408. process = kzalloc(sizeof(*process), GFP_KERNEL);
  409. if (!process)
  410. goto err_alloc_process;
  411. process->pasid = kfd_pasid_alloc();
  412. if (process->pasid == 0)
  413. goto err_alloc_pasid;
  414. if (kfd_alloc_process_doorbells(process) < 0)
  415. goto err_alloc_doorbells;
  416. kref_init(&process->ref);
  417. mutex_init(&process->mutex);
  418. process->mm = thread->mm;
  419. /* register notifier */
  420. process->mmu_notifier.ops = &kfd_process_mmu_notifier_ops;
  421. err = mmu_notifier_register(&process->mmu_notifier, process->mm);
  422. if (err)
  423. goto err_mmu_notifier;
  424. hash_add_rcu(kfd_processes_table, &process->kfd_processes,
  425. (uintptr_t)process->mm);
  426. process->lead_thread = thread->group_leader;
  427. get_task_struct(process->lead_thread);
  428. INIT_LIST_HEAD(&process->per_device_data);
  429. kfd_event_init_process(process);
  430. err = pqm_init(&process->pqm, process);
  431. if (err != 0)
  432. goto err_process_pqm_init;
  433. /* init process apertures*/
  434. process->is_32bit_user_mode = in_compat_syscall();
  435. err = kfd_init_apertures(process);
  436. if (err != 0)
  437. goto err_init_apertures;
  438. INIT_DELAYED_WORK(&process->eviction_work, evict_process_worker);
  439. INIT_DELAYED_WORK(&process->restore_work, restore_process_worker);
  440. process->last_restore_timestamp = get_jiffies_64();
  441. err = kfd_process_init_cwsr_apu(process, filep);
  442. if (err)
  443. goto err_init_cwsr;
  444. return process;
  445. err_init_cwsr:
  446. kfd_process_free_outstanding_kfd_bos(process);
  447. kfd_process_destroy_pdds(process);
  448. err_init_apertures:
  449. pqm_uninit(&process->pqm);
  450. err_process_pqm_init:
  451. hash_del_rcu(&process->kfd_processes);
  452. synchronize_rcu();
  453. mmu_notifier_unregister_no_release(&process->mmu_notifier, process->mm);
  454. err_mmu_notifier:
  455. mutex_destroy(&process->mutex);
  456. kfd_free_process_doorbells(process);
  457. err_alloc_doorbells:
  458. kfd_pasid_free(process->pasid);
  459. err_alloc_pasid:
  460. kfree(process);
  461. err_alloc_process:
  462. return ERR_PTR(err);
  463. }
  464. struct kfd_process_device *kfd_get_process_device_data(struct kfd_dev *dev,
  465. struct kfd_process *p)
  466. {
  467. struct kfd_process_device *pdd = NULL;
  468. list_for_each_entry(pdd, &p->per_device_data, per_device_list)
  469. if (pdd->dev == dev)
  470. return pdd;
  471. return NULL;
  472. }
  473. struct kfd_process_device *kfd_create_process_device_data(struct kfd_dev *dev,
  474. struct kfd_process *p)
  475. {
  476. struct kfd_process_device *pdd = NULL;
  477. pdd = kzalloc(sizeof(*pdd), GFP_KERNEL);
  478. if (!pdd)
  479. return NULL;
  480. pdd->dev = dev;
  481. INIT_LIST_HEAD(&pdd->qpd.queues_list);
  482. INIT_LIST_HEAD(&pdd->qpd.priv_queue_list);
  483. pdd->qpd.dqm = dev->dqm;
  484. pdd->qpd.pqm = &p->pqm;
  485. pdd->qpd.evicted = 0;
  486. pdd->process = p;
  487. pdd->bound = PDD_UNBOUND;
  488. pdd->already_dequeued = false;
  489. list_add(&pdd->per_device_list, &p->per_device_data);
  490. /* Init idr used for memory handle translation */
  491. idr_init(&pdd->alloc_idr);
  492. return pdd;
  493. }
  494. /**
  495. * kfd_process_device_init_vm - Initialize a VM for a process-device
  496. *
  497. * @pdd: The process-device
  498. * @drm_file: Optional pointer to a DRM file descriptor
  499. *
  500. * If @drm_file is specified, it will be used to acquire the VM from
  501. * that file descriptor. If successful, the @pdd takes ownership of
  502. * the file descriptor.
  503. *
  504. * If @drm_file is NULL, a new VM is created.
  505. *
  506. * Returns 0 on success, -errno on failure.
  507. */
  508. int kfd_process_device_init_vm(struct kfd_process_device *pdd,
  509. struct file *drm_file)
  510. {
  511. struct kfd_process *p;
  512. struct kfd_dev *dev;
  513. int ret;
  514. if (pdd->vm)
  515. return drm_file ? -EBUSY : 0;
  516. p = pdd->process;
  517. dev = pdd->dev;
  518. if (drm_file)
  519. ret = dev->kfd2kgd->acquire_process_vm(
  520. dev->kgd, drm_file,
  521. &pdd->vm, &p->kgd_process_info, &p->ef);
  522. else
  523. ret = dev->kfd2kgd->create_process_vm(
  524. dev->kgd, &pdd->vm, &p->kgd_process_info, &p->ef);
  525. if (ret) {
  526. pr_err("Failed to create process VM object\n");
  527. return ret;
  528. }
  529. ret = kfd_process_device_reserve_ib_mem(pdd);
  530. if (ret)
  531. goto err_reserve_ib_mem;
  532. ret = kfd_process_device_init_cwsr_dgpu(pdd);
  533. if (ret)
  534. goto err_init_cwsr;
  535. pdd->drm_file = drm_file;
  536. return 0;
  537. err_init_cwsr:
  538. err_reserve_ib_mem:
  539. kfd_process_device_free_bos(pdd);
  540. if (!drm_file)
  541. dev->kfd2kgd->destroy_process_vm(dev->kgd, pdd->vm);
  542. pdd->vm = NULL;
  543. return ret;
  544. }
  545. /*
  546. * Direct the IOMMU to bind the process (specifically the pasid->mm)
  547. * to the device.
  548. * Unbinding occurs when the process dies or the device is removed.
  549. *
  550. * Assumes that the process lock is held.
  551. */
  552. struct kfd_process_device *kfd_bind_process_to_device(struct kfd_dev *dev,
  553. struct kfd_process *p)
  554. {
  555. struct kfd_process_device *pdd;
  556. int err;
  557. pdd = kfd_get_process_device_data(dev, p);
  558. if (!pdd) {
  559. pr_err("Process device data doesn't exist\n");
  560. return ERR_PTR(-ENOMEM);
  561. }
  562. err = kfd_iommu_bind_process_to_device(pdd);
  563. if (err)
  564. return ERR_PTR(err);
  565. err = kfd_process_device_init_vm(pdd, NULL);
  566. if (err)
  567. return ERR_PTR(err);
  568. return pdd;
  569. }
  570. struct kfd_process_device *kfd_get_first_process_device_data(
  571. struct kfd_process *p)
  572. {
  573. return list_first_entry(&p->per_device_data,
  574. struct kfd_process_device,
  575. per_device_list);
  576. }
  577. struct kfd_process_device *kfd_get_next_process_device_data(
  578. struct kfd_process *p,
  579. struct kfd_process_device *pdd)
  580. {
  581. if (list_is_last(&pdd->per_device_list, &p->per_device_data))
  582. return NULL;
  583. return list_next_entry(pdd, per_device_list);
  584. }
  585. bool kfd_has_process_device_data(struct kfd_process *p)
  586. {
  587. return !(list_empty(&p->per_device_data));
  588. }
  589. /* Create specific handle mapped to mem from process local memory idr
  590. * Assumes that the process lock is held.
  591. */
  592. int kfd_process_device_create_obj_handle(struct kfd_process_device *pdd,
  593. void *mem)
  594. {
  595. return idr_alloc(&pdd->alloc_idr, mem, 0, 0, GFP_KERNEL);
  596. }
  597. /* Translate specific handle from process local memory idr
  598. * Assumes that the process lock is held.
  599. */
  600. void *kfd_process_device_translate_handle(struct kfd_process_device *pdd,
  601. int handle)
  602. {
  603. if (handle < 0)
  604. return NULL;
  605. return idr_find(&pdd->alloc_idr, handle);
  606. }
  607. /* Remove specific handle from process local memory idr
  608. * Assumes that the process lock is held.
  609. */
  610. void kfd_process_device_remove_obj_handle(struct kfd_process_device *pdd,
  611. int handle)
  612. {
  613. if (handle >= 0)
  614. idr_remove(&pdd->alloc_idr, handle);
  615. }
  616. /* This increments the process->ref counter. */
  617. struct kfd_process *kfd_lookup_process_by_pasid(unsigned int pasid)
  618. {
  619. struct kfd_process *p, *ret_p = NULL;
  620. unsigned int temp;
  621. int idx = srcu_read_lock(&kfd_processes_srcu);
  622. hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) {
  623. if (p->pasid == pasid) {
  624. kref_get(&p->ref);
  625. ret_p = p;
  626. break;
  627. }
  628. }
  629. srcu_read_unlock(&kfd_processes_srcu, idx);
  630. return ret_p;
  631. }
  632. /* This increments the process->ref counter. */
  633. struct kfd_process *kfd_lookup_process_by_mm(const struct mm_struct *mm)
  634. {
  635. struct kfd_process *p;
  636. int idx = srcu_read_lock(&kfd_processes_srcu);
  637. p = find_process_by_mm(mm);
  638. if (p)
  639. kref_get(&p->ref);
  640. srcu_read_unlock(&kfd_processes_srcu, idx);
  641. return p;
  642. }
  643. /* process_evict_queues - Evict all user queues of a process
  644. *
  645. * Eviction is reference-counted per process-device. This means multiple
  646. * evictions from different sources can be nested safely.
  647. */
  648. static int process_evict_queues(struct kfd_process *p)
  649. {
  650. struct kfd_process_device *pdd;
  651. int r = 0;
  652. unsigned int n_evicted = 0;
  653. list_for_each_entry(pdd, &p->per_device_data, per_device_list) {
  654. r = pdd->dev->dqm->ops.evict_process_queues(pdd->dev->dqm,
  655. &pdd->qpd);
  656. if (r) {
  657. pr_err("Failed to evict process queues\n");
  658. goto fail;
  659. }
  660. n_evicted++;
  661. }
  662. return r;
  663. fail:
  664. /* To keep state consistent, roll back partial eviction by
  665. * restoring queues
  666. */
  667. list_for_each_entry(pdd, &p->per_device_data, per_device_list) {
  668. if (n_evicted == 0)
  669. break;
  670. if (pdd->dev->dqm->ops.restore_process_queues(pdd->dev->dqm,
  671. &pdd->qpd))
  672. pr_err("Failed to restore queues\n");
  673. n_evicted--;
  674. }
  675. return r;
  676. }
  677. /* process_restore_queues - Restore all user queues of a process */
  678. static int process_restore_queues(struct kfd_process *p)
  679. {
  680. struct kfd_process_device *pdd;
  681. int r, ret = 0;
  682. list_for_each_entry(pdd, &p->per_device_data, per_device_list) {
  683. r = pdd->dev->dqm->ops.restore_process_queues(pdd->dev->dqm,
  684. &pdd->qpd);
  685. if (r) {
  686. pr_err("Failed to restore process queues\n");
  687. if (!ret)
  688. ret = r;
  689. }
  690. }
  691. return ret;
  692. }
  693. static void evict_process_worker(struct work_struct *work)
  694. {
  695. int ret;
  696. struct kfd_process *p;
  697. struct delayed_work *dwork;
  698. dwork = to_delayed_work(work);
  699. /* Process termination destroys this worker thread. So during the
  700. * lifetime of this thread, kfd_process p will be valid
  701. */
  702. p = container_of(dwork, struct kfd_process, eviction_work);
  703. WARN_ONCE(p->last_eviction_seqno != p->ef->seqno,
  704. "Eviction fence mismatch\n");
  705. /* Narrow window of overlap between restore and evict work
  706. * item is possible. Once amdgpu_amdkfd_gpuvm_restore_process_bos
  707. * unreserves KFD BOs, it is possible to evicted again. But
  708. * restore has few more steps of finish. So lets wait for any
  709. * previous restore work to complete
  710. */
  711. flush_delayed_work(&p->restore_work);
  712. pr_debug("Started evicting pasid %d\n", p->pasid);
  713. ret = process_evict_queues(p);
  714. if (!ret) {
  715. dma_fence_signal(p->ef);
  716. dma_fence_put(p->ef);
  717. p->ef = NULL;
  718. queue_delayed_work(kfd_restore_wq, &p->restore_work,
  719. msecs_to_jiffies(PROCESS_RESTORE_TIME_MS));
  720. pr_debug("Finished evicting pasid %d\n", p->pasid);
  721. } else
  722. pr_err("Failed to evict queues of pasid %d\n", p->pasid);
  723. }
  724. static void restore_process_worker(struct work_struct *work)
  725. {
  726. struct delayed_work *dwork;
  727. struct kfd_process *p;
  728. struct kfd_process_device *pdd;
  729. int ret = 0;
  730. dwork = to_delayed_work(work);
  731. /* Process termination destroys this worker thread. So during the
  732. * lifetime of this thread, kfd_process p will be valid
  733. */
  734. p = container_of(dwork, struct kfd_process, restore_work);
  735. /* Call restore_process_bos on the first KGD device. This function
  736. * takes care of restoring the whole process including other devices.
  737. * Restore can fail if enough memory is not available. If so,
  738. * reschedule again.
  739. */
  740. pdd = list_first_entry(&p->per_device_data,
  741. struct kfd_process_device,
  742. per_device_list);
  743. pr_debug("Started restoring pasid %d\n", p->pasid);
  744. /* Setting last_restore_timestamp before successful restoration.
  745. * Otherwise this would have to be set by KGD (restore_process_bos)
  746. * before KFD BOs are unreserved. If not, the process can be evicted
  747. * again before the timestamp is set.
  748. * If restore fails, the timestamp will be set again in the next
  749. * attempt. This would mean that the minimum GPU quanta would be
  750. * PROCESS_ACTIVE_TIME_MS - (time to execute the following two
  751. * functions)
  752. */
  753. p->last_restore_timestamp = get_jiffies_64();
  754. ret = pdd->dev->kfd2kgd->restore_process_bos(p->kgd_process_info,
  755. &p->ef);
  756. if (ret) {
  757. pr_debug("Failed to restore BOs of pasid %d, retry after %d ms\n",
  758. p->pasid, PROCESS_BACK_OFF_TIME_MS);
  759. ret = queue_delayed_work(kfd_restore_wq, &p->restore_work,
  760. msecs_to_jiffies(PROCESS_BACK_OFF_TIME_MS));
  761. WARN(!ret, "reschedule restore work failed\n");
  762. return;
  763. }
  764. ret = process_restore_queues(p);
  765. if (!ret)
  766. pr_debug("Finished restoring pasid %d\n", p->pasid);
  767. else
  768. pr_err("Failed to restore queues of pasid %d\n", p->pasid);
  769. }
  770. void kfd_suspend_all_processes(void)
  771. {
  772. struct kfd_process *p;
  773. unsigned int temp;
  774. int idx = srcu_read_lock(&kfd_processes_srcu);
  775. hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) {
  776. cancel_delayed_work_sync(&p->eviction_work);
  777. cancel_delayed_work_sync(&p->restore_work);
  778. if (process_evict_queues(p))
  779. pr_err("Failed to suspend process %d\n", p->pasid);
  780. dma_fence_signal(p->ef);
  781. dma_fence_put(p->ef);
  782. p->ef = NULL;
  783. }
  784. srcu_read_unlock(&kfd_processes_srcu, idx);
  785. }
  786. int kfd_resume_all_processes(void)
  787. {
  788. struct kfd_process *p;
  789. unsigned int temp;
  790. int ret = 0, idx = srcu_read_lock(&kfd_processes_srcu);
  791. hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) {
  792. if (!queue_delayed_work(kfd_restore_wq, &p->restore_work, 0)) {
  793. pr_err("Restore process %d failed during resume\n",
  794. p->pasid);
  795. ret = -EFAULT;
  796. }
  797. }
  798. srcu_read_unlock(&kfd_processes_srcu, idx);
  799. return ret;
  800. }
  801. int kfd_reserved_mem_mmap(struct kfd_process *process,
  802. struct vm_area_struct *vma)
  803. {
  804. struct kfd_dev *dev = kfd_device_by_id(vma->vm_pgoff);
  805. struct kfd_process_device *pdd;
  806. struct qcm_process_device *qpd;
  807. if (!dev)
  808. return -EINVAL;
  809. if ((vma->vm_end - vma->vm_start) != KFD_CWSR_TBA_TMA_SIZE) {
  810. pr_err("Incorrect CWSR mapping size.\n");
  811. return -EINVAL;
  812. }
  813. pdd = kfd_get_process_device_data(dev, process);
  814. if (!pdd)
  815. return -EINVAL;
  816. qpd = &pdd->qpd;
  817. qpd->cwsr_kaddr = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
  818. get_order(KFD_CWSR_TBA_TMA_SIZE));
  819. if (!qpd->cwsr_kaddr) {
  820. pr_err("Error allocating per process CWSR buffer.\n");
  821. return -ENOMEM;
  822. }
  823. vma->vm_flags |= VM_IO | VM_DONTCOPY | VM_DONTEXPAND
  824. | VM_NORESERVE | VM_DONTDUMP | VM_PFNMAP;
  825. /* Mapping pages to user process */
  826. return remap_pfn_range(vma, vma->vm_start,
  827. PFN_DOWN(__pa(qpd->cwsr_kaddr)),
  828. KFD_CWSR_TBA_TMA_SIZE, vma->vm_page_prot);
  829. }
  830. void kfd_flush_tlb(struct kfd_process_device *pdd)
  831. {
  832. struct kfd_dev *dev = pdd->dev;
  833. const struct kfd2kgd_calls *f2g = dev->kfd2kgd;
  834. if (dev->dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS) {
  835. /* Nothing to flush until a VMID is assigned, which
  836. * only happens when the first queue is created.
  837. */
  838. if (pdd->qpd.vmid)
  839. f2g->invalidate_tlbs_vmid(dev->kgd, pdd->qpd.vmid);
  840. } else {
  841. f2g->invalidate_tlbs(dev->kgd, pdd->process->pasid);
  842. }
  843. }
  844. #if defined(CONFIG_DEBUG_FS)
  845. int kfd_debugfs_mqds_by_process(struct seq_file *m, void *data)
  846. {
  847. struct kfd_process *p;
  848. unsigned int temp;
  849. int r = 0;
  850. int idx = srcu_read_lock(&kfd_processes_srcu);
  851. hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) {
  852. seq_printf(m, "Process %d PASID %d:\n",
  853. p->lead_thread->tgid, p->pasid);
  854. mutex_lock(&p->mutex);
  855. r = pqm_debugfs_mqds(m, &p->pqm);
  856. mutex_unlock(&p->mutex);
  857. if (r)
  858. break;
  859. }
  860. srcu_read_unlock(&kfd_processes_srcu, idx);
  861. return r;
  862. }
  863. #endif