ttm_bo_vm.c 9.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366
  1. /**************************************************************************
  2. *
  3. * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
  4. * All Rights Reserved.
  5. *
  6. * Permission is hereby granted, free of charge, to any person obtaining a
  7. * copy of this software and associated documentation files (the
  8. * "Software"), to deal in the Software without restriction, including
  9. * without limitation the rights to use, copy, modify, merge, publish,
  10. * distribute, sub license, and/or sell copies of the Software, and to
  11. * permit persons to whom the Software is furnished to do so, subject to
  12. * the following conditions:
  13. *
  14. * The above copyright notice and this permission notice (including the
  15. * next paragraph) shall be included in all copies or substantial portions
  16. * of the Software.
  17. *
  18. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  19. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  20. * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  21. * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  22. * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  23. * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  24. * USE OR OTHER DEALINGS IN THE SOFTWARE.
  25. *
  26. **************************************************************************/
  27. /*
  28. * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
  29. */
  30. #define pr_fmt(fmt) "[TTM] " fmt
  31. #include <ttm/ttm_module.h>
  32. #include <ttm/ttm_bo_driver.h>
  33. #include <ttm/ttm_placement.h>
  34. #include <drm/drm_vma_manager.h>
  35. #include <linux/mm.h>
  36. #include <linux/pfn_t.h>
  37. #include <linux/rbtree.h>
  38. #include <linux/module.h>
  39. #include <linux/uaccess.h>
  40. #define TTM_BO_VM_NUM_PREFAULT 16
  41. static int ttm_bo_vm_fault_idle(struct ttm_buffer_object *bo,
  42. struct vm_area_struct *vma,
  43. struct vm_fault *vmf)
  44. {
  45. int ret = 0;
  46. if (likely(!test_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags)))
  47. goto out_unlock;
  48. /*
  49. * Quick non-stalling check for idle.
  50. */
  51. ret = ttm_bo_wait(bo, false, true);
  52. if (likely(ret == 0))
  53. goto out_unlock;
  54. /*
  55. * If possible, avoid waiting for GPU with mmap_sem
  56. * held.
  57. */
  58. if (vmf->flags & FAULT_FLAG_ALLOW_RETRY) {
  59. ret = VM_FAULT_RETRY;
  60. if (vmf->flags & FAULT_FLAG_RETRY_NOWAIT)
  61. goto out_unlock;
  62. up_read(&vma->vm_mm->mmap_sem);
  63. (void) ttm_bo_wait(bo, true, false);
  64. goto out_unlock;
  65. }
  66. /*
  67. * Ordinary wait.
  68. */
  69. ret = ttm_bo_wait(bo, true, false);
  70. if (unlikely(ret != 0))
  71. ret = (ret != -ERESTARTSYS) ? VM_FAULT_SIGBUS :
  72. VM_FAULT_NOPAGE;
  73. out_unlock:
  74. return ret;
  75. }
  76. static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
  77. {
  78. struct ttm_buffer_object *bo = (struct ttm_buffer_object *)
  79. vma->vm_private_data;
  80. struct ttm_bo_device *bdev = bo->bdev;
  81. unsigned long page_offset;
  82. unsigned long page_last;
  83. unsigned long pfn;
  84. struct ttm_tt *ttm = NULL;
  85. struct page *page;
  86. int ret;
  87. int i;
  88. unsigned long address = (unsigned long)vmf->virtual_address;
  89. int retval = VM_FAULT_NOPAGE;
  90. struct ttm_mem_type_manager *man =
  91. &bdev->man[bo->mem.mem_type];
  92. struct vm_area_struct cvma;
  93. /*
  94. * Work around locking order reversal in fault / nopfn
  95. * between mmap_sem and bo_reserve: Perform a trylock operation
  96. * for reserve, and if it fails, retry the fault after waiting
  97. * for the buffer to become unreserved.
  98. */
  99. ret = ttm_bo_reserve(bo, true, true, NULL);
  100. if (unlikely(ret != 0)) {
  101. if (ret != -EBUSY)
  102. return VM_FAULT_NOPAGE;
  103. if (vmf->flags & FAULT_FLAG_ALLOW_RETRY) {
  104. if (!(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) {
  105. up_read(&vma->vm_mm->mmap_sem);
  106. (void) ttm_bo_wait_unreserved(bo);
  107. }
  108. return VM_FAULT_RETRY;
  109. }
  110. /*
  111. * If we'd want to change locking order to
  112. * mmap_sem -> bo::reserve, we'd use a blocking reserve here
  113. * instead of retrying the fault...
  114. */
  115. return VM_FAULT_NOPAGE;
  116. }
  117. /*
  118. * Refuse to fault imported pages. This should be handled
  119. * (if at all) by redirecting mmap to the exporter.
  120. */
  121. if (bo->ttm && (bo->ttm->page_flags & TTM_PAGE_FLAG_SG)) {
  122. retval = VM_FAULT_SIGBUS;
  123. goto out_unlock;
  124. }
  125. if (bdev->driver->fault_reserve_notify) {
  126. ret = bdev->driver->fault_reserve_notify(bo);
  127. switch (ret) {
  128. case 0:
  129. break;
  130. case -EBUSY:
  131. case -ERESTARTSYS:
  132. retval = VM_FAULT_NOPAGE;
  133. goto out_unlock;
  134. default:
  135. retval = VM_FAULT_SIGBUS;
  136. goto out_unlock;
  137. }
  138. }
  139. /*
  140. * Wait for buffer data in transit, due to a pipelined
  141. * move.
  142. */
  143. ret = ttm_bo_vm_fault_idle(bo, vma, vmf);
  144. if (unlikely(ret != 0)) {
  145. retval = ret;
  146. goto out_unlock;
  147. }
  148. ret = ttm_mem_io_lock(man, true);
  149. if (unlikely(ret != 0)) {
  150. retval = VM_FAULT_NOPAGE;
  151. goto out_unlock;
  152. }
  153. ret = ttm_mem_io_reserve_vm(bo);
  154. if (unlikely(ret != 0)) {
  155. retval = VM_FAULT_SIGBUS;
  156. goto out_io_unlock;
  157. }
  158. page_offset = ((address - vma->vm_start) >> PAGE_SHIFT) +
  159. vma->vm_pgoff - drm_vma_node_start(&bo->vma_node);
  160. page_last = vma_pages(vma) + vma->vm_pgoff -
  161. drm_vma_node_start(&bo->vma_node);
  162. if (unlikely(page_offset >= bo->num_pages)) {
  163. retval = VM_FAULT_SIGBUS;
  164. goto out_io_unlock;
  165. }
  166. /*
  167. * Make a local vma copy to modify the page_prot member
  168. * and vm_flags if necessary. The vma parameter is protected
  169. * by mmap_sem in write mode.
  170. */
  171. cvma = *vma;
  172. cvma.vm_page_prot = vm_get_page_prot(cvma.vm_flags);
  173. if (bo->mem.bus.is_iomem) {
  174. cvma.vm_page_prot = ttm_io_prot(bo->mem.placement,
  175. cvma.vm_page_prot);
  176. } else {
  177. ttm = bo->ttm;
  178. cvma.vm_page_prot = ttm_io_prot(bo->mem.placement,
  179. cvma.vm_page_prot);
  180. /* Allocate all page at once, most common usage */
  181. if (ttm->bdev->driver->ttm_tt_populate(ttm)) {
  182. retval = VM_FAULT_OOM;
  183. goto out_io_unlock;
  184. }
  185. }
  186. /*
  187. * Speculatively prefault a number of pages. Only error on
  188. * first page.
  189. */
  190. for (i = 0; i < TTM_BO_VM_NUM_PREFAULT; ++i) {
  191. if (bo->mem.bus.is_iomem)
  192. pfn = ((bo->mem.bus.base + bo->mem.bus.offset) >> PAGE_SHIFT) + page_offset;
  193. else {
  194. page = ttm->pages[page_offset];
  195. if (unlikely(!page && i == 0)) {
  196. retval = VM_FAULT_OOM;
  197. goto out_io_unlock;
  198. } else if (unlikely(!page)) {
  199. break;
  200. }
  201. page->mapping = vma->vm_file->f_mapping;
  202. page->index = drm_vma_node_start(&bo->vma_node) +
  203. page_offset;
  204. pfn = page_to_pfn(page);
  205. }
  206. if (vma->vm_flags & VM_MIXEDMAP)
  207. ret = vm_insert_mixed(&cvma, address,
  208. __pfn_to_pfn_t(pfn, PFN_DEV));
  209. else
  210. ret = vm_insert_pfn(&cvma, address, pfn);
  211. /*
  212. * Somebody beat us to this PTE or prefaulting to
  213. * an already populated PTE, or prefaulting error.
  214. */
  215. if (unlikely((ret == -EBUSY) || (ret != 0 && i > 0)))
  216. break;
  217. else if (unlikely(ret != 0)) {
  218. retval =
  219. (ret == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS;
  220. goto out_io_unlock;
  221. }
  222. address += PAGE_SIZE;
  223. if (unlikely(++page_offset >= page_last))
  224. break;
  225. }
  226. out_io_unlock:
  227. ttm_mem_io_unlock(man);
  228. out_unlock:
  229. ttm_bo_unreserve(bo);
  230. return retval;
  231. }
  232. static void ttm_bo_vm_open(struct vm_area_struct *vma)
  233. {
  234. struct ttm_buffer_object *bo =
  235. (struct ttm_buffer_object *)vma->vm_private_data;
  236. WARN_ON(bo->bdev->dev_mapping != vma->vm_file->f_mapping);
  237. (void)ttm_bo_reference(bo);
  238. }
  239. static void ttm_bo_vm_close(struct vm_area_struct *vma)
  240. {
  241. struct ttm_buffer_object *bo = (struct ttm_buffer_object *)vma->vm_private_data;
  242. ttm_bo_unref(&bo);
  243. vma->vm_private_data = NULL;
  244. }
  245. static const struct vm_operations_struct ttm_bo_vm_ops = {
  246. .fault = ttm_bo_vm_fault,
  247. .open = ttm_bo_vm_open,
  248. .close = ttm_bo_vm_close
  249. };
  250. static struct ttm_buffer_object *ttm_bo_vm_lookup(struct ttm_bo_device *bdev,
  251. unsigned long offset,
  252. unsigned long pages)
  253. {
  254. struct drm_vma_offset_node *node;
  255. struct ttm_buffer_object *bo = NULL;
  256. drm_vma_offset_lock_lookup(&bdev->vma_manager);
  257. node = drm_vma_offset_lookup_locked(&bdev->vma_manager, offset, pages);
  258. if (likely(node)) {
  259. bo = container_of(node, struct ttm_buffer_object, vma_node);
  260. if (!kref_get_unless_zero(&bo->kref))
  261. bo = NULL;
  262. }
  263. drm_vma_offset_unlock_lookup(&bdev->vma_manager);
  264. if (!bo)
  265. pr_err("Could not find buffer object to map\n");
  266. return bo;
  267. }
  268. int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma,
  269. struct ttm_bo_device *bdev)
  270. {
  271. struct ttm_bo_driver *driver;
  272. struct ttm_buffer_object *bo;
  273. int ret;
  274. bo = ttm_bo_vm_lookup(bdev, vma->vm_pgoff, vma_pages(vma));
  275. if (unlikely(!bo))
  276. return -EINVAL;
  277. driver = bo->bdev->driver;
  278. if (unlikely(!driver->verify_access)) {
  279. ret = -EPERM;
  280. goto out_unref;
  281. }
  282. ret = driver->verify_access(bo, filp);
  283. if (unlikely(ret != 0))
  284. goto out_unref;
  285. vma->vm_ops = &ttm_bo_vm_ops;
  286. /*
  287. * Note: We're transferring the bo reference to
  288. * vma->vm_private_data here.
  289. */
  290. vma->vm_private_data = bo;
  291. /*
  292. * We'd like to use VM_PFNMAP on shared mappings, where
  293. * (vma->vm_flags & VM_SHARED) != 0, for performance reasons,
  294. * but for some reason VM_PFNMAP + x86 PAT + write-combine is very
  295. * bad for performance. Until that has been sorted out, use
  296. * VM_MIXEDMAP on all mappings. See freedesktop.org bug #75719
  297. */
  298. vma->vm_flags |= VM_MIXEDMAP;
  299. vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
  300. return 0;
  301. out_unref:
  302. ttm_bo_unref(&bo);
  303. return ret;
  304. }
  305. EXPORT_SYMBOL(ttm_bo_mmap);
  306. int ttm_fbdev_mmap(struct vm_area_struct *vma, struct ttm_buffer_object *bo)
  307. {
  308. if (vma->vm_pgoff != 0)
  309. return -EACCES;
  310. vma->vm_ops = &ttm_bo_vm_ops;
  311. vma->vm_private_data = ttm_bo_reference(bo);
  312. vma->vm_flags |= VM_MIXEDMAP;
  313. vma->vm_flags |= VM_IO | VM_DONTEXPAND;
  314. return 0;
  315. }
  316. EXPORT_SYMBOL(ttm_fbdev_mmap);