umem.c 8.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328
  1. /*
  2. * Copyright (c) 2005 Topspin Communications. All rights reserved.
  3. * Copyright (c) 2005 Cisco Systems. All rights reserved.
  4. * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
  5. *
  6. * This software is available to you under a choice of one of two
  7. * licenses. You may choose to be licensed under the terms of the GNU
  8. * General Public License (GPL) Version 2, available from the file
  9. * COPYING in the main directory of this source tree, or the
  10. * OpenIB.org BSD license below:
  11. *
  12. * Redistribution and use in source and binary forms, with or
  13. * without modification, are permitted provided that the following
  14. * conditions are met:
  15. *
  16. * - Redistributions of source code must retain the above
  17. * copyright notice, this list of conditions and the following
  18. * disclaimer.
  19. *
  20. * - Redistributions in binary form must reproduce the above
  21. * copyright notice, this list of conditions and the following
  22. * disclaimer in the documentation and/or other materials
  23. * provided with the distribution.
  24. *
  25. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  26. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  27. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  28. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  29. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  30. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  31. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  32. * SOFTWARE.
  33. */
  34. #include <linux/mm.h>
  35. #include <linux/dma-mapping.h>
  36. #include <linux/sched.h>
  37. #include <linux/export.h>
  38. #include <linux/hugetlb.h>
  39. #include <linux/dma-attrs.h>
  40. #include <linux/slab.h>
  41. #include "uverbs.h"
  42. static void __ib_umem_release(struct ib_device *dev, struct ib_umem *umem, int dirty)
  43. {
  44. struct scatterlist *sg;
  45. struct page *page;
  46. int i;
  47. if (umem->nmap > 0)
  48. ib_dma_unmap_sg(dev, umem->sg_head.sgl,
  49. umem->nmap,
  50. DMA_BIDIRECTIONAL);
  51. for_each_sg(umem->sg_head.sgl, sg, umem->npages, i) {
  52. page = sg_page(sg);
  53. if (umem->writable && dirty)
  54. set_page_dirty_lock(page);
  55. put_page(page);
  56. }
  57. sg_free_table(&umem->sg_head);
  58. return;
  59. }
  60. /**
  61. * ib_umem_get - Pin and DMA map userspace memory.
  62. * @context: userspace context to pin memory for
  63. * @addr: userspace virtual address to start at
  64. * @size: length of region to pin
  65. * @access: IB_ACCESS_xxx flags for memory being pinned
  66. * @dmasync: flush in-flight DMA when the memory region is written
  67. */
  68. struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
  69. size_t size, int access, int dmasync)
  70. {
  71. struct ib_umem *umem;
  72. struct page **page_list;
  73. struct vm_area_struct **vma_list;
  74. unsigned long locked;
  75. unsigned long lock_limit;
  76. unsigned long cur_base;
  77. unsigned long npages;
  78. int ret;
  79. int i;
  80. DEFINE_DMA_ATTRS(attrs);
  81. struct scatterlist *sg, *sg_list_start;
  82. int need_release = 0;
  83. if (dmasync)
  84. dma_set_attr(DMA_ATTR_WRITE_BARRIER, &attrs);
  85. if (!can_do_mlock())
  86. return ERR_PTR(-EPERM);
  87. umem = kzalloc(sizeof *umem, GFP_KERNEL);
  88. if (!umem)
  89. return ERR_PTR(-ENOMEM);
  90. umem->context = context;
  91. umem->length = size;
  92. umem->address = addr;
  93. umem->page_size = PAGE_SIZE;
  94. umem->pid = get_task_pid(current, PIDTYPE_PID);
  95. /*
  96. * We ask for writable memory if any access flags other than
  97. * "remote read" are set. "Local write" and "remote write"
  98. * obviously require write access. "Remote atomic" can do
  99. * things like fetch and add, which will modify memory, and
  100. * "MW bind" can change permissions by binding a window.
  101. */
  102. umem->writable = !!(access & ~IB_ACCESS_REMOTE_READ);
  103. /* We assume the memory is from hugetlb until proved otherwise */
  104. umem->hugetlb = 1;
  105. page_list = (struct page **) __get_free_page(GFP_KERNEL);
  106. if (!page_list) {
  107. kfree(umem);
  108. return ERR_PTR(-ENOMEM);
  109. }
  110. /*
  111. * if we can't alloc the vma_list, it's not so bad;
  112. * just assume the memory is not hugetlb memory
  113. */
  114. vma_list = (struct vm_area_struct **) __get_free_page(GFP_KERNEL);
  115. if (!vma_list)
  116. umem->hugetlb = 0;
  117. npages = ib_umem_num_pages(umem);
  118. down_write(&current->mm->mmap_sem);
  119. locked = npages + current->mm->pinned_vm;
  120. lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
  121. if ((locked > lock_limit) && !capable(CAP_IPC_LOCK)) {
  122. ret = -ENOMEM;
  123. goto out;
  124. }
  125. cur_base = addr & PAGE_MASK;
  126. if (npages == 0) {
  127. ret = -EINVAL;
  128. goto out;
  129. }
  130. ret = sg_alloc_table(&umem->sg_head, npages, GFP_KERNEL);
  131. if (ret)
  132. goto out;
  133. need_release = 1;
  134. sg_list_start = umem->sg_head.sgl;
  135. while (npages) {
  136. ret = get_user_pages(current, current->mm, cur_base,
  137. min_t(unsigned long, npages,
  138. PAGE_SIZE / sizeof (struct page *)),
  139. 1, !umem->writable, page_list, vma_list);
  140. if (ret < 0)
  141. goto out;
  142. umem->npages += ret;
  143. cur_base += ret * PAGE_SIZE;
  144. npages -= ret;
  145. for_each_sg(sg_list_start, sg, ret, i) {
  146. if (vma_list && !is_vm_hugetlb_page(vma_list[i]))
  147. umem->hugetlb = 0;
  148. sg_set_page(sg, page_list[i], PAGE_SIZE, 0);
  149. }
  150. /* preparing for next loop */
  151. sg_list_start = sg;
  152. }
  153. umem->nmap = ib_dma_map_sg_attrs(context->device,
  154. umem->sg_head.sgl,
  155. umem->npages,
  156. DMA_BIDIRECTIONAL,
  157. &attrs);
  158. if (umem->nmap <= 0) {
  159. ret = -ENOMEM;
  160. goto out;
  161. }
  162. ret = 0;
  163. out:
  164. if (ret < 0) {
  165. if (need_release)
  166. __ib_umem_release(context->device, umem, 0);
  167. put_pid(umem->pid);
  168. kfree(umem);
  169. } else
  170. current->mm->pinned_vm = locked;
  171. up_write(&current->mm->mmap_sem);
  172. if (vma_list)
  173. free_page((unsigned long) vma_list);
  174. free_page((unsigned long) page_list);
  175. return ret < 0 ? ERR_PTR(ret) : umem;
  176. }
  177. EXPORT_SYMBOL(ib_umem_get);
  178. static void ib_umem_account(struct work_struct *work)
  179. {
  180. struct ib_umem *umem = container_of(work, struct ib_umem, work);
  181. down_write(&umem->mm->mmap_sem);
  182. umem->mm->pinned_vm -= umem->diff;
  183. up_write(&umem->mm->mmap_sem);
  184. mmput(umem->mm);
  185. kfree(umem);
  186. }
  187. /**
  188. * ib_umem_release - release memory pinned with ib_umem_get
  189. * @umem: umem struct to release
  190. */
  191. void ib_umem_release(struct ib_umem *umem)
  192. {
  193. struct ib_ucontext *context = umem->context;
  194. struct mm_struct *mm;
  195. struct task_struct *task;
  196. unsigned long diff;
  197. __ib_umem_release(umem->context->device, umem, 1);
  198. task = get_pid_task(umem->pid, PIDTYPE_PID);
  199. put_pid(umem->pid);
  200. if (!task)
  201. goto out;
  202. mm = get_task_mm(task);
  203. put_task_struct(task);
  204. if (!mm)
  205. goto out;
  206. diff = ib_umem_num_pages(umem);
  207. /*
  208. * We may be called with the mm's mmap_sem already held. This
  209. * can happen when a userspace munmap() is the call that drops
  210. * the last reference to our file and calls our release
  211. * method. If there are memory regions to destroy, we'll end
  212. * up here and not be able to take the mmap_sem. In that case
  213. * we defer the vm_locked accounting to the system workqueue.
  214. */
  215. if (context->closing) {
  216. if (!down_write_trylock(&mm->mmap_sem)) {
  217. INIT_WORK(&umem->work, ib_umem_account);
  218. umem->mm = mm;
  219. umem->diff = diff;
  220. queue_work(ib_wq, &umem->work);
  221. return;
  222. }
  223. } else
  224. down_write(&mm->mmap_sem);
  225. mm->pinned_vm -= diff;
  226. up_write(&mm->mmap_sem);
  227. mmput(mm);
  228. out:
  229. kfree(umem);
  230. }
  231. EXPORT_SYMBOL(ib_umem_release);
  232. int ib_umem_page_count(struct ib_umem *umem)
  233. {
  234. int shift;
  235. int i;
  236. int n;
  237. struct scatterlist *sg;
  238. shift = ilog2(umem->page_size);
  239. n = 0;
  240. for_each_sg(umem->sg_head.sgl, sg, umem->nmap, i)
  241. n += sg_dma_len(sg) >> shift;
  242. return n;
  243. }
  244. EXPORT_SYMBOL(ib_umem_page_count);
  245. /*
  246. * Copy from the given ib_umem's pages to the given buffer.
  247. *
  248. * umem - the umem to copy from
  249. * offset - offset to start copying from
  250. * dst - destination buffer
  251. * length - buffer length
  252. *
  253. * Returns 0 on success, or an error code.
  254. */
  255. int ib_umem_copy_from(void *dst, struct ib_umem *umem, size_t offset,
  256. size_t length)
  257. {
  258. size_t end = offset + length;
  259. int ret;
  260. if (offset > umem->length || length > umem->length - offset) {
  261. pr_err("ib_umem_copy_from not in range. offset: %zd umem length: %zd end: %zd\n",
  262. offset, umem->length, end);
  263. return -EINVAL;
  264. }
  265. ret = sg_pcopy_to_buffer(umem->sg_head.sgl, umem->nmap, dst, length,
  266. offset + ib_umem_offset(umem));
  267. if (ret < 0)
  268. return ret;
  269. else if (ret != length)
  270. return -EINVAL;
  271. else
  272. return 0;
  273. }
  274. EXPORT_SYMBOL(ib_umem_copy_from);