ttm_bo_vm.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481
  1. /* SPDX-License-Identifier: GPL-2.0 OR MIT */
  2. /**************************************************************************
  3. *
  4. * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
  5. * All Rights Reserved.
  6. *
  7. * Permission is hereby granted, free of charge, to any person obtaining a
  8. * copy of this software and associated documentation files (the
  9. * "Software"), to deal in the Software without restriction, including
  10. * without limitation the rights to use, copy, modify, merge, publish,
  11. * distribute, sub license, and/or sell copies of the Software, and to
  12. * permit persons to whom the Software is furnished to do so, subject to
  13. * the following conditions:
  14. *
  15. * The above copyright notice and this permission notice (including the
  16. * next paragraph) shall be included in all copies or substantial portions
  17. * of the Software.
  18. *
  19. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  20. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  21. * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  22. * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  23. * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  24. * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  25. * USE OR OTHER DEALINGS IN THE SOFTWARE.
  26. *
  27. **************************************************************************/
  28. /*
  29. * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
  30. */
  31. #define pr_fmt(fmt) "[TTM] " fmt
  32. #include <drm/ttm/ttm_module.h>
  33. #include <drm/ttm/ttm_bo_driver.h>
  34. #include <drm/ttm/ttm_placement.h>
  35. #include <drm/drm_vma_manager.h>
  36. #include <linux/mm.h>
  37. #include <linux/pfn_t.h>
  38. #include <linux/rbtree.h>
  39. #include <linux/module.h>
  40. #include <linux/uaccess.h>
  41. #include <linux/mem_encrypt.h>
  42. #define TTM_BO_VM_NUM_PREFAULT 16
  43. static int ttm_bo_vm_fault_idle(struct ttm_buffer_object *bo,
  44. struct vm_fault *vmf)
  45. {
  46. int ret = 0;
  47. if (likely(!bo->moving))
  48. goto out_unlock;
  49. /*
  50. * Quick non-stalling check for idle.
  51. */
  52. if (dma_fence_is_signaled(bo->moving))
  53. goto out_clear;
  54. /*
  55. * If possible, avoid waiting for GPU with mmap_sem
  56. * held.
  57. */
  58. if (vmf->flags & FAULT_FLAG_ALLOW_RETRY) {
  59. ret = VM_FAULT_RETRY;
  60. if (vmf->flags & FAULT_FLAG_RETRY_NOWAIT)
  61. goto out_unlock;
  62. ttm_bo_reference(bo);
  63. up_read(&vmf->vma->vm_mm->mmap_sem);
  64. (void) dma_fence_wait(bo->moving, true);
  65. ttm_bo_unreserve(bo);
  66. ttm_bo_unref(&bo);
  67. goto out_unlock;
  68. }
  69. /*
  70. * Ordinary wait.
  71. */
  72. ret = dma_fence_wait(bo->moving, true);
  73. if (unlikely(ret != 0)) {
  74. ret = (ret != -ERESTARTSYS) ? VM_FAULT_SIGBUS :
  75. VM_FAULT_NOPAGE;
  76. goto out_unlock;
  77. }
  78. out_clear:
  79. dma_fence_put(bo->moving);
  80. bo->moving = NULL;
  81. out_unlock:
  82. return ret;
  83. }
  84. static unsigned long ttm_bo_io_mem_pfn(struct ttm_buffer_object *bo,
  85. unsigned long page_offset)
  86. {
  87. struct ttm_bo_device *bdev = bo->bdev;
  88. if (bdev->driver->io_mem_pfn)
  89. return bdev->driver->io_mem_pfn(bo, page_offset);
  90. return ((bo->mem.bus.base + bo->mem.bus.offset) >> PAGE_SHIFT)
  91. + page_offset;
  92. }
  93. static int ttm_bo_vm_fault(struct vm_fault *vmf)
  94. {
  95. struct vm_area_struct *vma = vmf->vma;
  96. struct ttm_buffer_object *bo = (struct ttm_buffer_object *)
  97. vma->vm_private_data;
  98. struct ttm_bo_device *bdev = bo->bdev;
  99. unsigned long page_offset;
  100. unsigned long page_last;
  101. unsigned long pfn;
  102. struct ttm_tt *ttm = NULL;
  103. struct page *page;
  104. int ret;
  105. int i;
  106. unsigned long address = vmf->address;
  107. struct ttm_mem_type_manager *man =
  108. &bdev->man[bo->mem.mem_type];
  109. struct vm_area_struct cvma;
  110. /*
  111. * Work around locking order reversal in fault / nopfn
  112. * between mmap_sem and bo_reserve: Perform a trylock operation
  113. * for reserve, and if it fails, retry the fault after waiting
  114. * for the buffer to become unreserved.
  115. */
  116. ret = ttm_bo_reserve(bo, true, true, NULL);
  117. if (unlikely(ret != 0)) {
  118. if (ret != -EBUSY)
  119. return VM_FAULT_NOPAGE;
  120. if (vmf->flags & FAULT_FLAG_ALLOW_RETRY) {
  121. if (!(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) {
  122. ttm_bo_reference(bo);
  123. up_read(&vmf->vma->vm_mm->mmap_sem);
  124. (void) ttm_bo_wait_unreserved(bo);
  125. ttm_bo_unref(&bo);
  126. }
  127. return VM_FAULT_RETRY;
  128. }
  129. /*
  130. * If we'd want to change locking order to
  131. * mmap_sem -> bo::reserve, we'd use a blocking reserve here
  132. * instead of retrying the fault...
  133. */
  134. return VM_FAULT_NOPAGE;
  135. }
  136. /*
  137. * Refuse to fault imported pages. This should be handled
  138. * (if at all) by redirecting mmap to the exporter.
  139. */
  140. if (bo->ttm && (bo->ttm->page_flags & TTM_PAGE_FLAG_SG)) {
  141. ret = VM_FAULT_SIGBUS;
  142. goto out_unlock;
  143. }
  144. if (bdev->driver->fault_reserve_notify) {
  145. ret = bdev->driver->fault_reserve_notify(bo);
  146. switch (ret) {
  147. case 0:
  148. break;
  149. case -EBUSY:
  150. case -ERESTARTSYS:
  151. ret = VM_FAULT_NOPAGE;
  152. goto out_unlock;
  153. default:
  154. ret = VM_FAULT_SIGBUS;
  155. goto out_unlock;
  156. }
  157. }
  158. /*
  159. * Wait for buffer data in transit, due to a pipelined
  160. * move.
  161. */
  162. ret = ttm_bo_vm_fault_idle(bo, vmf);
  163. if (unlikely(ret != 0)) {
  164. if (ret == VM_FAULT_RETRY &&
  165. !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) {
  166. /* The BO has already been unreserved. */
  167. return ret;
  168. }
  169. goto out_unlock;
  170. }
  171. ret = ttm_mem_io_lock(man, true);
  172. if (unlikely(ret != 0)) {
  173. ret = VM_FAULT_NOPAGE;
  174. goto out_unlock;
  175. }
  176. ret = ttm_mem_io_reserve_vm(bo);
  177. if (unlikely(ret != 0)) {
  178. ret = VM_FAULT_SIGBUS;
  179. goto out_io_unlock;
  180. }
  181. page_offset = ((address - vma->vm_start) >> PAGE_SHIFT) +
  182. vma->vm_pgoff - drm_vma_node_start(&bo->vma_node);
  183. page_last = vma_pages(vma) + vma->vm_pgoff -
  184. drm_vma_node_start(&bo->vma_node);
  185. if (unlikely(page_offset >= bo->num_pages)) {
  186. ret = VM_FAULT_SIGBUS;
  187. goto out_io_unlock;
  188. }
  189. /*
  190. * Make a local vma copy to modify the page_prot member
  191. * and vm_flags if necessary. The vma parameter is protected
  192. * by mmap_sem in write mode.
  193. */
  194. cvma = *vma;
  195. cvma.vm_page_prot = vm_get_page_prot(cvma.vm_flags);
  196. if (bo->mem.bus.is_iomem) {
  197. cvma.vm_page_prot = ttm_io_prot(bo->mem.placement,
  198. cvma.vm_page_prot);
  199. } else {
  200. struct ttm_operation_ctx ctx = {
  201. .interruptible = false,
  202. .no_wait_gpu = false,
  203. .flags = TTM_OPT_FLAG_FORCE_ALLOC
  204. };
  205. ttm = bo->ttm;
  206. cvma.vm_page_prot = ttm_io_prot(bo->mem.placement,
  207. cvma.vm_page_prot);
  208. /* Allocate all page at once, most common usage */
  209. if (ttm_tt_populate(ttm, &ctx)) {
  210. ret = VM_FAULT_OOM;
  211. goto out_io_unlock;
  212. }
  213. }
  214. /*
  215. * Speculatively prefault a number of pages. Only error on
  216. * first page.
  217. */
  218. for (i = 0; i < TTM_BO_VM_NUM_PREFAULT; ++i) {
  219. if (bo->mem.bus.is_iomem) {
  220. /* Iomem should not be marked encrypted */
  221. cvma.vm_page_prot = pgprot_decrypted(cvma.vm_page_prot);
  222. pfn = ttm_bo_io_mem_pfn(bo, page_offset);
  223. } else {
  224. page = ttm->pages[page_offset];
  225. if (unlikely(!page && i == 0)) {
  226. ret = VM_FAULT_OOM;
  227. goto out_io_unlock;
  228. } else if (unlikely(!page)) {
  229. break;
  230. }
  231. page->index = drm_vma_node_start(&bo->vma_node) +
  232. page_offset;
  233. pfn = page_to_pfn(page);
  234. }
  235. if (vma->vm_flags & VM_MIXEDMAP)
  236. ret = vm_insert_mixed(&cvma, address,
  237. __pfn_to_pfn_t(pfn, PFN_DEV));
  238. else
  239. ret = vm_insert_pfn(&cvma, address, pfn);
  240. /*
  241. * Somebody beat us to this PTE or prefaulting to
  242. * an already populated PTE, or prefaulting error.
  243. */
  244. if (unlikely((ret == -EBUSY) || (ret != 0 && i > 0)))
  245. break;
  246. else if (unlikely(ret != 0)) {
  247. ret =
  248. (ret == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS;
  249. goto out_io_unlock;
  250. }
  251. address += PAGE_SIZE;
  252. if (unlikely(++page_offset >= page_last))
  253. break;
  254. }
  255. ret = VM_FAULT_NOPAGE;
  256. out_io_unlock:
  257. ttm_mem_io_unlock(man);
  258. out_unlock:
  259. ttm_bo_unreserve(bo);
  260. return ret;
  261. }
  262. static void ttm_bo_vm_open(struct vm_area_struct *vma)
  263. {
  264. struct ttm_buffer_object *bo =
  265. (struct ttm_buffer_object *)vma->vm_private_data;
  266. WARN_ON(bo->bdev->dev_mapping != vma->vm_file->f_mapping);
  267. (void)ttm_bo_reference(bo);
  268. }
  269. static void ttm_bo_vm_close(struct vm_area_struct *vma)
  270. {
  271. struct ttm_buffer_object *bo = (struct ttm_buffer_object *)vma->vm_private_data;
  272. ttm_bo_unref(&bo);
  273. vma->vm_private_data = NULL;
  274. }
  275. static int ttm_bo_vm_access_kmap(struct ttm_buffer_object *bo,
  276. unsigned long offset,
  277. uint8_t *buf, int len, int write)
  278. {
  279. unsigned long page = offset >> PAGE_SHIFT;
  280. unsigned long bytes_left = len;
  281. int ret;
  282. /* Copy a page at a time, that way no extra virtual address
  283. * mapping is needed
  284. */
  285. offset -= page << PAGE_SHIFT;
  286. do {
  287. unsigned long bytes = min(bytes_left, PAGE_SIZE - offset);
  288. struct ttm_bo_kmap_obj map;
  289. void *ptr;
  290. bool is_iomem;
  291. ret = ttm_bo_kmap(bo, page, 1, &map);
  292. if (ret)
  293. return ret;
  294. ptr = (uint8_t *)ttm_kmap_obj_virtual(&map, &is_iomem) + offset;
  295. WARN_ON_ONCE(is_iomem);
  296. if (write)
  297. memcpy(ptr, buf, bytes);
  298. else
  299. memcpy(buf, ptr, bytes);
  300. ttm_bo_kunmap(&map);
  301. page++;
  302. buf += bytes;
  303. bytes_left -= bytes;
  304. offset = 0;
  305. } while (bytes_left);
  306. return len;
  307. }
  308. static int ttm_bo_vm_access(struct vm_area_struct *vma, unsigned long addr,
  309. void *buf, int len, int write)
  310. {
  311. unsigned long offset = (addr) - vma->vm_start;
  312. struct ttm_buffer_object *bo = vma->vm_private_data;
  313. int ret;
  314. if (len < 1 || (offset + len) >> PAGE_SHIFT > bo->num_pages)
  315. return -EIO;
  316. ret = ttm_bo_reserve(bo, true, false, NULL);
  317. if (ret)
  318. return ret;
  319. switch (bo->mem.mem_type) {
  320. case TTM_PL_SYSTEM:
  321. if (unlikely(bo->ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) {
  322. ret = ttm_tt_swapin(bo->ttm);
  323. if (unlikely(ret != 0))
  324. return ret;
  325. }
  326. /* fall through */
  327. case TTM_PL_TT:
  328. ret = ttm_bo_vm_access_kmap(bo, offset, buf, len, write);
  329. break;
  330. default:
  331. if (bo->bdev->driver->access_memory)
  332. ret = bo->bdev->driver->access_memory(
  333. bo, offset, buf, len, write);
  334. else
  335. ret = -EIO;
  336. }
  337. ttm_bo_unreserve(bo);
  338. return ret;
  339. }
  340. static const struct vm_operations_struct ttm_bo_vm_ops = {
  341. .fault = ttm_bo_vm_fault,
  342. .open = ttm_bo_vm_open,
  343. .close = ttm_bo_vm_close,
  344. .access = ttm_bo_vm_access
  345. };
  346. static struct ttm_buffer_object *ttm_bo_vm_lookup(struct ttm_bo_device *bdev,
  347. unsigned long offset,
  348. unsigned long pages)
  349. {
  350. struct drm_vma_offset_node *node;
  351. struct ttm_buffer_object *bo = NULL;
  352. drm_vma_offset_lock_lookup(&bdev->vma_manager);
  353. node = drm_vma_offset_lookup_locked(&bdev->vma_manager, offset, pages);
  354. if (likely(node)) {
  355. bo = container_of(node, struct ttm_buffer_object, vma_node);
  356. if (!kref_get_unless_zero(&bo->kref))
  357. bo = NULL;
  358. }
  359. drm_vma_offset_unlock_lookup(&bdev->vma_manager);
  360. if (!bo)
  361. pr_err("Could not find buffer object to map\n");
  362. return bo;
  363. }
  364. int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma,
  365. struct ttm_bo_device *bdev)
  366. {
  367. struct ttm_bo_driver *driver;
  368. struct ttm_buffer_object *bo;
  369. int ret;
  370. bo = ttm_bo_vm_lookup(bdev, vma->vm_pgoff, vma_pages(vma));
  371. if (unlikely(!bo))
  372. return -EINVAL;
  373. driver = bo->bdev->driver;
  374. if (unlikely(!driver->verify_access)) {
  375. ret = -EPERM;
  376. goto out_unref;
  377. }
  378. ret = driver->verify_access(bo, filp);
  379. if (unlikely(ret != 0))
  380. goto out_unref;
  381. vma->vm_ops = &ttm_bo_vm_ops;
  382. /*
  383. * Note: We're transferring the bo reference to
  384. * vma->vm_private_data here.
  385. */
  386. vma->vm_private_data = bo;
  387. /*
  388. * We'd like to use VM_PFNMAP on shared mappings, where
  389. * (vma->vm_flags & VM_SHARED) != 0, for performance reasons,
  390. * but for some reason VM_PFNMAP + x86 PAT + write-combine is very
  391. * bad for performance. Until that has been sorted out, use
  392. * VM_MIXEDMAP on all mappings. See freedesktop.org bug #75719
  393. */
  394. vma->vm_flags |= VM_MIXEDMAP;
  395. vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
  396. return 0;
  397. out_unref:
  398. ttm_bo_unref(&bo);
  399. return ret;
  400. }
  401. EXPORT_SYMBOL(ttm_bo_mmap);
  402. int ttm_fbdev_mmap(struct vm_area_struct *vma, struct ttm_buffer_object *bo)
  403. {
  404. if (vma->vm_pgoff != 0)
  405. return -EACCES;
  406. vma->vm_ops = &ttm_bo_vm_ops;
  407. vma->vm_private_data = ttm_bo_reference(bo);
  408. vma->vm_flags |= VM_MIXEDMAP;
  409. vma->vm_flags |= VM_IO | VM_DONTEXPAND;
  410. return 0;
  411. }
  412. EXPORT_SYMBOL(ttm_fbdev_mmap);