videobuf2-dma-sg.c 8.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359
  1. /*
  2. * videobuf2-dma-sg.c - dma scatter/gather memory allocator for videobuf2
  3. *
  4. * Copyright (C) 2010 Samsung Electronics
  5. *
  6. * Author: Andrzej Pietrasiewicz <andrzej.p@samsung.com>
  7. *
  8. * This program is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License as published by
  10. * the Free Software Foundation.
  11. */
  12. #include <linux/module.h>
  13. #include <linux/mm.h>
  14. #include <linux/scatterlist.h>
  15. #include <linux/sched.h>
  16. #include <linux/slab.h>
  17. #include <linux/vmalloc.h>
  18. #include <media/videobuf2-core.h>
  19. #include <media/videobuf2-memops.h>
  20. #include <media/videobuf2-dma-sg.h>
  21. static int debug;
  22. module_param(debug, int, 0644);
  23. #define dprintk(level, fmt, arg...) \
  24. do { \
  25. if (debug >= level) \
  26. printk(KERN_DEBUG "vb2-dma-sg: " fmt, ## arg); \
  27. } while (0)
  28. struct vb2_dma_sg_buf {
  29. void *vaddr;
  30. struct page **pages;
  31. int write;
  32. int offset;
  33. struct sg_table sg_table;
  34. size_t size;
  35. unsigned int num_pages;
  36. atomic_t refcount;
  37. struct vb2_vmarea_handler handler;
  38. struct vm_area_struct *vma;
  39. };
  40. static void vb2_dma_sg_put(void *buf_priv);
  41. static int vb2_dma_sg_alloc_compacted(struct vb2_dma_sg_buf *buf,
  42. gfp_t gfp_flags)
  43. {
  44. unsigned int last_page = 0;
  45. int size = buf->size;
  46. while (size > 0) {
  47. struct page *pages;
  48. int order;
  49. int i;
  50. order = get_order(size);
  51. /* Dont over allocate*/
  52. if ((PAGE_SIZE << order) > size)
  53. order--;
  54. pages = NULL;
  55. while (!pages) {
  56. pages = alloc_pages(GFP_KERNEL | __GFP_ZERO |
  57. __GFP_NOWARN | gfp_flags, order);
  58. if (pages)
  59. break;
  60. if (order == 0) {
  61. while (last_page--)
  62. __free_page(buf->pages[last_page]);
  63. return -ENOMEM;
  64. }
  65. order--;
  66. }
  67. split_page(pages, order);
  68. for (i = 0; i < (1 << order); i++)
  69. buf->pages[last_page++] = &pages[i];
  70. size -= PAGE_SIZE << order;
  71. }
  72. return 0;
  73. }
  74. static void *vb2_dma_sg_alloc(void *alloc_ctx, unsigned long size, gfp_t gfp_flags)
  75. {
  76. struct vb2_dma_sg_buf *buf;
  77. int ret;
  78. int num_pages;
  79. buf = kzalloc(sizeof *buf, GFP_KERNEL);
  80. if (!buf)
  81. return NULL;
  82. buf->vaddr = NULL;
  83. buf->write = 0;
  84. buf->offset = 0;
  85. buf->size = size;
  86. /* size is already page aligned */
  87. buf->num_pages = size >> PAGE_SHIFT;
  88. buf->pages = kzalloc(buf->num_pages * sizeof(struct page *),
  89. GFP_KERNEL);
  90. if (!buf->pages)
  91. goto fail_pages_array_alloc;
  92. ret = vb2_dma_sg_alloc_compacted(buf, gfp_flags);
  93. if (ret)
  94. goto fail_pages_alloc;
  95. ret = sg_alloc_table_from_pages(&buf->sg_table, buf->pages,
  96. buf->num_pages, 0, size, gfp_flags);
  97. if (ret)
  98. goto fail_table_alloc;
  99. buf->handler.refcount = &buf->refcount;
  100. buf->handler.put = vb2_dma_sg_put;
  101. buf->handler.arg = buf;
  102. atomic_inc(&buf->refcount);
  103. dprintk(1, "%s: Allocated buffer of %d pages\n",
  104. __func__, buf->num_pages);
  105. return buf;
  106. fail_table_alloc:
  107. num_pages = buf->num_pages;
  108. while (num_pages--)
  109. __free_page(buf->pages[num_pages]);
  110. fail_pages_alloc:
  111. kfree(buf->pages);
  112. fail_pages_array_alloc:
  113. kfree(buf);
  114. return NULL;
  115. }
  116. static void vb2_dma_sg_put(void *buf_priv)
  117. {
  118. struct vb2_dma_sg_buf *buf = buf_priv;
  119. int i = buf->num_pages;
  120. if (atomic_dec_and_test(&buf->refcount)) {
  121. dprintk(1, "%s: Freeing buffer of %d pages\n", __func__,
  122. buf->num_pages);
  123. if (buf->vaddr)
  124. vm_unmap_ram(buf->vaddr, buf->num_pages);
  125. sg_free_table(&buf->sg_table);
  126. while (--i >= 0)
  127. __free_page(buf->pages[i]);
  128. kfree(buf->pages);
  129. kfree(buf);
  130. }
  131. }
  132. static inline int vma_is_io(struct vm_area_struct *vma)
  133. {
  134. return !!(vma->vm_flags & (VM_IO | VM_PFNMAP));
  135. }
  136. static void *vb2_dma_sg_get_userptr(void *alloc_ctx, unsigned long vaddr,
  137. unsigned long size, int write)
  138. {
  139. struct vb2_dma_sg_buf *buf;
  140. unsigned long first, last;
  141. int num_pages_from_user;
  142. struct vm_area_struct *vma;
  143. buf = kzalloc(sizeof *buf, GFP_KERNEL);
  144. if (!buf)
  145. return NULL;
  146. buf->vaddr = NULL;
  147. buf->write = write;
  148. buf->offset = vaddr & ~PAGE_MASK;
  149. buf->size = size;
  150. first = (vaddr & PAGE_MASK) >> PAGE_SHIFT;
  151. last = ((vaddr + size - 1) & PAGE_MASK) >> PAGE_SHIFT;
  152. buf->num_pages = last - first + 1;
  153. buf->pages = kzalloc(buf->num_pages * sizeof(struct page *),
  154. GFP_KERNEL);
  155. if (!buf->pages)
  156. goto userptr_fail_alloc_pages;
  157. vma = find_vma(current->mm, vaddr);
  158. if (!vma) {
  159. dprintk(1, "no vma for address %lu\n", vaddr);
  160. goto userptr_fail_find_vma;
  161. }
  162. if (vma->vm_end < vaddr + size) {
  163. dprintk(1, "vma at %lu is too small for %lu bytes\n",
  164. vaddr, size);
  165. goto userptr_fail_find_vma;
  166. }
  167. buf->vma = vb2_get_vma(vma);
  168. if (!buf->vma) {
  169. dprintk(1, "failed to copy vma\n");
  170. goto userptr_fail_find_vma;
  171. }
  172. if (vma_is_io(buf->vma)) {
  173. for (num_pages_from_user = 0;
  174. num_pages_from_user < buf->num_pages;
  175. ++num_pages_from_user, vaddr += PAGE_SIZE) {
  176. unsigned long pfn;
  177. if (follow_pfn(vma, vaddr, &pfn)) {
  178. dprintk(1, "no page for address %lu\n", vaddr);
  179. break;
  180. }
  181. buf->pages[num_pages_from_user] = pfn_to_page(pfn);
  182. }
  183. } else
  184. num_pages_from_user = get_user_pages(current, current->mm,
  185. vaddr & PAGE_MASK,
  186. buf->num_pages,
  187. write,
  188. 1, /* force */
  189. buf->pages,
  190. NULL);
  191. if (num_pages_from_user != buf->num_pages)
  192. goto userptr_fail_get_user_pages;
  193. if (sg_alloc_table_from_pages(&buf->sg_table, buf->pages,
  194. buf->num_pages, buf->offset, size, 0))
  195. goto userptr_fail_alloc_table_from_pages;
  196. return buf;
  197. userptr_fail_alloc_table_from_pages:
  198. userptr_fail_get_user_pages:
  199. dprintk(1, "get_user_pages requested/got: %d/%d]\n",
  200. buf->num_pages, num_pages_from_user);
  201. if (!vma_is_io(buf->vma))
  202. while (--num_pages_from_user >= 0)
  203. put_page(buf->pages[num_pages_from_user]);
  204. vb2_put_vma(buf->vma);
  205. userptr_fail_find_vma:
  206. kfree(buf->pages);
  207. userptr_fail_alloc_pages:
  208. kfree(buf);
  209. return NULL;
  210. }
  211. /*
  212. * @put_userptr: inform the allocator that a USERPTR buffer will no longer
  213. * be used
  214. */
  215. static void vb2_dma_sg_put_userptr(void *buf_priv)
  216. {
  217. struct vb2_dma_sg_buf *buf = buf_priv;
  218. int i = buf->num_pages;
  219. dprintk(1, "%s: Releasing userspace buffer of %d pages\n",
  220. __func__, buf->num_pages);
  221. if (buf->vaddr)
  222. vm_unmap_ram(buf->vaddr, buf->num_pages);
  223. sg_free_table(&buf->sg_table);
  224. while (--i >= 0) {
  225. if (buf->write)
  226. set_page_dirty_lock(buf->pages[i]);
  227. if (!vma_is_io(buf->vma))
  228. put_page(buf->pages[i]);
  229. }
  230. kfree(buf->pages);
  231. vb2_put_vma(buf->vma);
  232. kfree(buf);
  233. }
  234. static void *vb2_dma_sg_vaddr(void *buf_priv)
  235. {
  236. struct vb2_dma_sg_buf *buf = buf_priv;
  237. BUG_ON(!buf);
  238. if (!buf->vaddr)
  239. buf->vaddr = vm_map_ram(buf->pages,
  240. buf->num_pages,
  241. -1,
  242. PAGE_KERNEL);
  243. /* add offset in case userptr is not page-aligned */
  244. return buf->vaddr + buf->offset;
  245. }
  246. static unsigned int vb2_dma_sg_num_users(void *buf_priv)
  247. {
  248. struct vb2_dma_sg_buf *buf = buf_priv;
  249. return atomic_read(&buf->refcount);
  250. }
  251. static int vb2_dma_sg_mmap(void *buf_priv, struct vm_area_struct *vma)
  252. {
  253. struct vb2_dma_sg_buf *buf = buf_priv;
  254. unsigned long uaddr = vma->vm_start;
  255. unsigned long usize = vma->vm_end - vma->vm_start;
  256. int i = 0;
  257. if (!buf) {
  258. printk(KERN_ERR "No memory to map\n");
  259. return -EINVAL;
  260. }
  261. do {
  262. int ret;
  263. ret = vm_insert_page(vma, uaddr, buf->pages[i++]);
  264. if (ret) {
  265. printk(KERN_ERR "Remapping memory, error: %d\n", ret);
  266. return ret;
  267. }
  268. uaddr += PAGE_SIZE;
  269. usize -= PAGE_SIZE;
  270. } while (usize > 0);
  271. /*
  272. * Use common vm_area operations to track buffer refcount.
  273. */
  274. vma->vm_private_data = &buf->handler;
  275. vma->vm_ops = &vb2_common_vm_ops;
  276. vma->vm_ops->open(vma);
  277. return 0;
  278. }
  279. static void *vb2_dma_sg_cookie(void *buf_priv)
  280. {
  281. struct vb2_dma_sg_buf *buf = buf_priv;
  282. return &buf->sg_table;
  283. }
  284. const struct vb2_mem_ops vb2_dma_sg_memops = {
  285. .alloc = vb2_dma_sg_alloc,
  286. .put = vb2_dma_sg_put,
  287. .get_userptr = vb2_dma_sg_get_userptr,
  288. .put_userptr = vb2_dma_sg_put_userptr,
  289. .vaddr = vb2_dma_sg_vaddr,
  290. .mmap = vb2_dma_sg_mmap,
  291. .num_users = vb2_dma_sg_num_users,
  292. .cookie = vb2_dma_sg_cookie,
  293. };
  294. EXPORT_SYMBOL_GPL(vb2_dma_sg_memops);
  295. MODULE_DESCRIPTION("dma scatter/gather memory handling routines for videobuf2");
  296. MODULE_AUTHOR("Andrzej Pietrasiewicz");
  297. MODULE_LICENSE("GPL");