ispqueue.c 29 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144
  1. /*
  2. * ispqueue.c
  3. *
  4. * TI OMAP3 ISP - Video buffers queue handling
  5. *
  6. * Copyright (C) 2010 Nokia Corporation
  7. *
  8. * Contacts: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
  9. * Sakari Ailus <sakari.ailus@iki.fi>
  10. *
  11. * This program is free software; you can redistribute it and/or modify
  12. * it under the terms of the GNU General Public License version 2 as
  13. * published by the Free Software Foundation.
  14. *
  15. * This program is distributed in the hope that it will be useful, but
  16. * WITHOUT ANY WARRANTY; without even the implied warranty of
  17. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  18. * General Public License for more details.
  19. *
  20. * You should have received a copy of the GNU General Public License
  21. * along with this program; if not, write to the Free Software
  22. * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
  23. * 02110-1301 USA
  24. */
  25. #include <asm/cacheflush.h>
  26. #include <linux/dma-mapping.h>
  27. #include <linux/mm.h>
  28. #include <linux/omap-iommu.h>
  29. #include <linux/pagemap.h>
  30. #include <linux/poll.h>
  31. #include <linux/scatterlist.h>
  32. #include <linux/sched.h>
  33. #include <linux/slab.h>
  34. #include <linux/vmalloc.h>
  35. #include "isp.h"
  36. #include "ispqueue.h"
  37. #include "ispvideo.h"
  38. /* -----------------------------------------------------------------------------
  39. * Video buffers management
  40. */
  41. /*
  42. * isp_video_buffer_cache_sync - Keep the buffers coherent between CPU and ISP
  43. *
  44. * The typical operation required here is Cache Invalidation across
  45. * the (user space) buffer address range. And this _must_ be done
  46. * at QBUF stage (and *only* at QBUF).
  47. *
  48. * We try to use optimal cache invalidation function:
  49. * - dmac_map_area:
  50. * - used when the number of pages are _low_.
  51. * - it becomes quite slow as the number of pages increase.
  52. * - for 648x492 viewfinder (150 pages) it takes 1.3 ms.
  53. * - for 5 Mpix buffer (2491 pages) it takes between 25-50 ms.
  54. *
  55. * - flush_cache_all:
  56. * - used when the number of pages are _high_.
  57. * - time taken in the range of 500-900 us.
  58. * - has a higher penalty but, as whole dcache + icache is invalidated
  59. */
  60. /*
  61. * FIXME: dmac_inv_range crashes randomly on the user space buffer
  62. * address. Fall back to flush_cache_all for now.
  63. */
  64. #define ISP_CACHE_FLUSH_PAGES_MAX 0
  65. static void isp_video_buffer_cache_sync(struct isp_video_buffer *buf)
  66. {
  67. if (buf->skip_cache)
  68. return;
  69. if (buf->vbuf.m.userptr == 0 || buf->npages == 0 ||
  70. buf->npages > ISP_CACHE_FLUSH_PAGES_MAX)
  71. flush_cache_all();
  72. else {
  73. dmac_map_area((void *)buf->vbuf.m.userptr, buf->vbuf.length,
  74. DMA_FROM_DEVICE);
  75. outer_inv_range(buf->vbuf.m.userptr,
  76. buf->vbuf.m.userptr + buf->vbuf.length);
  77. }
  78. }
  79. /*
  80. * isp_video_buffer_lock_vma - Prevent VMAs from being unmapped
  81. *
  82. * Lock the VMAs underlying the given buffer into memory. This avoids the
  83. * userspace buffer mapping from being swapped out, making VIPT cache handling
  84. * easier.
  85. *
  86. * Note that the pages will not be freed as the buffers have been locked to
  87. * memory using by a call to get_user_pages(), but the userspace mapping could
  88. * still disappear if the VMAs are not locked. This is caused by the memory
  89. * management code trying to be as lock-less as possible, which results in the
  90. * userspace mapping manager not finding out that the pages are locked under
  91. * some conditions.
  92. */
  93. static int isp_video_buffer_lock_vma(struct isp_video_buffer *buf, int lock)
  94. {
  95. struct vm_area_struct *vma;
  96. unsigned long start;
  97. unsigned long end;
  98. int ret = 0;
  99. if (buf->vbuf.memory == V4L2_MEMORY_MMAP)
  100. return 0;
  101. /* We can be called from workqueue context if the current task dies to
  102. * unlock the VMAs. In that case there's no current memory management
  103. * context so unlocking can't be performed, but the VMAs have been or
  104. * are getting destroyed anyway so it doesn't really matter.
  105. */
  106. if (!current || !current->mm)
  107. return lock ? -EINVAL : 0;
  108. start = buf->vbuf.m.userptr;
  109. end = buf->vbuf.m.userptr + buf->vbuf.length - 1;
  110. down_write(&current->mm->mmap_sem);
  111. spin_lock(&current->mm->page_table_lock);
  112. do {
  113. vma = find_vma(current->mm, start);
  114. if (vma == NULL) {
  115. ret = -EFAULT;
  116. goto out;
  117. }
  118. if (lock)
  119. vma->vm_flags |= VM_LOCKED;
  120. else
  121. vma->vm_flags &= ~VM_LOCKED;
  122. start = vma->vm_end + 1;
  123. } while (vma->vm_end < end);
  124. if (lock)
  125. buf->vm_flags |= VM_LOCKED;
  126. else
  127. buf->vm_flags &= ~VM_LOCKED;
  128. out:
  129. spin_unlock(&current->mm->page_table_lock);
  130. up_write(&current->mm->mmap_sem);
  131. return ret;
  132. }
  133. /*
  134. * isp_video_buffer_prepare_kernel - Build scatter list for a vmalloc'ed buffer
  135. *
  136. * Iterate over the vmalloc'ed area and create a scatter list entry for every
  137. * page.
  138. */
  139. static int isp_video_buffer_prepare_kernel(struct isp_video_buffer *buf)
  140. {
  141. struct scatterlist *sg;
  142. unsigned int npages;
  143. unsigned int i;
  144. void *addr;
  145. int ret;
  146. addr = buf->vaddr;
  147. npages = PAGE_ALIGN(buf->vbuf.length) >> PAGE_SHIFT;
  148. ret = sg_alloc_table(&buf->sgt, npages, GFP_KERNEL);
  149. if (ret < 0)
  150. return ret;
  151. for (sg = buf->sgt.sgl, i = 0; i < npages; ++i, addr += PAGE_SIZE) {
  152. struct page *page = vmalloc_to_page(addr);
  153. if (page == NULL || PageHighMem(page)) {
  154. sg_free_table(&buf->sgt);
  155. return -EINVAL;
  156. }
  157. sg_set_page(sg, page, PAGE_SIZE, 0);
  158. sg = sg_next(sg);
  159. }
  160. return 0;
  161. }
  162. /*
  163. * isp_video_buffer_cleanup - Release pages for a userspace VMA.
  164. *
  165. * Release pages locked by a call isp_video_buffer_prepare_user and free the
  166. * pages table.
  167. */
  168. static void isp_video_buffer_cleanup(struct isp_video_buffer *buf)
  169. {
  170. struct isp_video_fh *vfh = isp_video_queue_to_isp_video_fh(buf->queue);
  171. struct isp_video *video = vfh->video;
  172. enum dma_data_direction direction;
  173. unsigned int i;
  174. if (buf->dma) {
  175. omap_iommu_vunmap(video->isp->domain, video->isp->dev,
  176. buf->dma);
  177. buf->dma = 0;
  178. }
  179. if (!(buf->vm_flags & VM_PFNMAP)) {
  180. direction = buf->vbuf.type == V4L2_BUF_TYPE_VIDEO_CAPTURE
  181. ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
  182. dma_unmap_sg(buf->queue->dev, buf->sgt.sgl, buf->sgt.orig_nents,
  183. direction);
  184. }
  185. sg_free_table(&buf->sgt);
  186. if (buf->pages != NULL) {
  187. isp_video_buffer_lock_vma(buf, 0);
  188. for (i = 0; i < buf->npages; ++i)
  189. page_cache_release(buf->pages[i]);
  190. vfree(buf->pages);
  191. buf->pages = NULL;
  192. }
  193. buf->npages = 0;
  194. buf->skip_cache = false;
  195. }
  196. /*
  197. * isp_video_buffer_prepare_user - Prepare a userspace buffer.
  198. *
  199. * This function creates a scatter list with a 1:1 mapping for a userspace VMA.
  200. * The number of pages is first computed based on the buffer size, and pages are
  201. * then retrieved by a call to get_user_pages.
  202. *
  203. * Pages are pinned to memory by get_user_pages, making them available for DMA
  204. * transfers. However, due to memory management optimization, it seems the
  205. * get_user_pages doesn't guarantee that the pinned pages will not be written
  206. * to swap and removed from the userspace mapping(s). When this happens, a page
  207. * fault can be generated when accessing those unmapped pages.
  208. *
  209. * If the fault is triggered by a page table walk caused by VIPT cache
  210. * management operations, the page fault handler might oops if the MM semaphore
  211. * is held, as it can't handle kernel page faults in that case. To fix that, a
  212. * fixup entry needs to be added to the cache management code, or the userspace
  213. * VMA must be locked to avoid removing pages from the userspace mapping in the
  214. * first place.
  215. *
  216. * If the number of pages retrieved is smaller than the number required by the
  217. * buffer size, the function returns -EFAULT.
  218. */
  219. static int isp_video_buffer_prepare_user(struct isp_video_buffer *buf)
  220. {
  221. struct scatterlist *sg;
  222. unsigned int offset;
  223. unsigned long data;
  224. unsigned int first;
  225. unsigned int last;
  226. unsigned int i;
  227. int ret;
  228. data = buf->vbuf.m.userptr;
  229. first = (data & PAGE_MASK) >> PAGE_SHIFT;
  230. last = ((data + buf->vbuf.length - 1) & PAGE_MASK) >> PAGE_SHIFT;
  231. offset = data & ~PAGE_MASK;
  232. buf->npages = last - first + 1;
  233. buf->pages = vmalloc(buf->npages * sizeof(buf->pages[0]));
  234. if (buf->pages == NULL)
  235. return -ENOMEM;
  236. down_read(&current->mm->mmap_sem);
  237. ret = get_user_pages(current, current->mm, data & PAGE_MASK,
  238. buf->npages,
  239. buf->vbuf.type == V4L2_BUF_TYPE_VIDEO_CAPTURE, 0,
  240. buf->pages, NULL);
  241. up_read(&current->mm->mmap_sem);
  242. if (ret != buf->npages) {
  243. buf->npages = ret < 0 ? 0 : ret;
  244. return -EFAULT;
  245. }
  246. ret = isp_video_buffer_lock_vma(buf, 1);
  247. if (ret < 0)
  248. return ret;
  249. ret = sg_alloc_table(&buf->sgt, buf->npages, GFP_KERNEL);
  250. if (ret < 0)
  251. return ret;
  252. for (sg = buf->sgt.sgl, i = 0; i < buf->npages; ++i) {
  253. if (PageHighMem(buf->pages[i])) {
  254. sg_free_table(&buf->sgt);
  255. return -EINVAL;
  256. }
  257. sg_set_page(sg, buf->pages[i], PAGE_SIZE - offset, offset);
  258. sg = sg_next(sg);
  259. offset = 0;
  260. }
  261. return 0;
  262. }
  263. /*
  264. * isp_video_buffer_prepare_pfnmap - Prepare a VM_PFNMAP userspace buffer
  265. *
  266. * Userspace VM_PFNMAP buffers are supported only if they are contiguous in
  267. * memory and if they span a single VMA. Start by validating the user pointer to
  268. * make sure it fulfils that condition, and then build a scatter list of
  269. * physically contiguous pages starting at the buffer memory physical address.
  270. *
  271. * Return 0 on success, -EFAULT if the buffer isn't valid or -ENOMEM if memory
  272. * can't be allocated.
  273. */
  274. static int isp_video_buffer_prepare_pfnmap(struct isp_video_buffer *buf)
  275. {
  276. struct vm_area_struct *vma;
  277. struct scatterlist *sg;
  278. unsigned long prev_pfn;
  279. unsigned long this_pfn;
  280. unsigned long start;
  281. unsigned int offset;
  282. unsigned long end;
  283. unsigned long pfn;
  284. unsigned int i;
  285. int ret = 0;
  286. start = buf->vbuf.m.userptr;
  287. end = buf->vbuf.m.userptr + buf->vbuf.length - 1;
  288. offset = start & ~PAGE_MASK;
  289. buf->npages = (end >> PAGE_SHIFT) - (start >> PAGE_SHIFT) + 1;
  290. buf->pages = NULL;
  291. down_read(&current->mm->mmap_sem);
  292. vma = find_vma(current->mm, start);
  293. if (vma == NULL || vma->vm_end < end) {
  294. ret = -EFAULT;
  295. goto unlock;
  296. }
  297. for (prev_pfn = 0; start <= end; start += PAGE_SIZE) {
  298. ret = follow_pfn(vma, start, &this_pfn);
  299. if (ret < 0)
  300. goto unlock;
  301. if (prev_pfn == 0)
  302. pfn = this_pfn;
  303. else if (this_pfn != prev_pfn + 1) {
  304. ret = -EFAULT;
  305. goto unlock;
  306. }
  307. prev_pfn = this_pfn;
  308. }
  309. unlock:
  310. up_read(&current->mm->mmap_sem);
  311. if (ret < 0)
  312. return ret;
  313. ret = sg_alloc_table(&buf->sgt, buf->npages, GFP_KERNEL);
  314. if (ret < 0)
  315. return ret;
  316. for (sg = buf->sgt.sgl, i = 0; i < buf->npages; ++i, ++pfn) {
  317. sg_set_page(sg, pfn_to_page(pfn), PAGE_SIZE - offset, offset);
  318. /* PFNMAP buffers will not get DMA-mapped, set the DMA address
  319. * manually.
  320. */
  321. sg_dma_address(sg) = (pfn << PAGE_SHIFT) + offset;
  322. sg = sg_next(sg);
  323. offset = 0;
  324. }
  325. return 0;
  326. }
  327. /*
  328. * isp_video_buffer_prepare_vm_flags - Get VMA flags for a userspace address
  329. *
  330. * This function locates the VMAs for the buffer's userspace address and checks
  331. * that their flags match. The only flag that we need to care for at the moment
  332. * is VM_PFNMAP.
  333. *
  334. * The buffer vm_flags field is set to the first VMA flags.
  335. *
  336. * Return -EFAULT if no VMA can be found for part of the buffer, or if the VMAs
  337. * have incompatible flags.
  338. */
  339. static int isp_video_buffer_prepare_vm_flags(struct isp_video_buffer *buf)
  340. {
  341. struct vm_area_struct *vma;
  342. pgprot_t uninitialized_var(vm_page_prot);
  343. unsigned long start;
  344. unsigned long end;
  345. int ret = -EFAULT;
  346. start = buf->vbuf.m.userptr;
  347. end = buf->vbuf.m.userptr + buf->vbuf.length - 1;
  348. down_read(&current->mm->mmap_sem);
  349. do {
  350. vma = find_vma(current->mm, start);
  351. if (vma == NULL)
  352. goto done;
  353. if (start == buf->vbuf.m.userptr) {
  354. buf->vm_flags = vma->vm_flags;
  355. vm_page_prot = vma->vm_page_prot;
  356. }
  357. if ((buf->vm_flags ^ vma->vm_flags) & VM_PFNMAP)
  358. goto done;
  359. if (vm_page_prot != vma->vm_page_prot)
  360. goto done;
  361. start = vma->vm_end + 1;
  362. } while (vma->vm_end < end);
  363. /* Skip cache management to enhance performances for non-cached or
  364. * write-combining buffers.
  365. */
  366. if (vm_page_prot == pgprot_noncached(vm_page_prot) ||
  367. vm_page_prot == pgprot_writecombine(vm_page_prot))
  368. buf->skip_cache = true;
  369. ret = 0;
  370. done:
  371. up_read(&current->mm->mmap_sem);
  372. return ret;
  373. }
  374. /*
  375. * isp_video_buffer_prepare - Make a buffer ready for operation
  376. *
  377. * Preparing a buffer involves:
  378. *
  379. * - validating VMAs (userspace buffers only)
  380. * - locking pages and VMAs into memory (userspace buffers only)
  381. * - building page and scatter-gather lists
  382. * - mapping buffers for DMA operation
  383. * - performing driver-specific preparation
  384. *
  385. * The function must be called in userspace context with a valid mm context
  386. * (this excludes cleanup paths such as sys_close when the userspace process
  387. * segfaults).
  388. */
  389. static int isp_video_buffer_prepare(struct isp_video_buffer *buf)
  390. {
  391. struct isp_video_fh *vfh = isp_video_queue_to_isp_video_fh(buf->queue);
  392. struct isp_video *video = vfh->video;
  393. enum dma_data_direction direction;
  394. unsigned long addr;
  395. int ret;
  396. switch (buf->vbuf.memory) {
  397. case V4L2_MEMORY_MMAP:
  398. ret = isp_video_buffer_prepare_kernel(buf);
  399. break;
  400. case V4L2_MEMORY_USERPTR:
  401. ret = isp_video_buffer_prepare_vm_flags(buf);
  402. if (ret < 0)
  403. return ret;
  404. if (buf->vm_flags & VM_PFNMAP)
  405. ret = isp_video_buffer_prepare_pfnmap(buf);
  406. else
  407. ret = isp_video_buffer_prepare_user(buf);
  408. break;
  409. default:
  410. return -EINVAL;
  411. }
  412. if (ret < 0)
  413. goto done;
  414. if (!(buf->vm_flags & VM_PFNMAP)) {
  415. direction = buf->vbuf.type == V4L2_BUF_TYPE_VIDEO_CAPTURE
  416. ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
  417. ret = dma_map_sg(buf->queue->dev, buf->sgt.sgl,
  418. buf->sgt.orig_nents, direction);
  419. if (ret != buf->sgt.orig_nents) {
  420. ret = -EFAULT;
  421. goto done;
  422. }
  423. }
  424. addr = omap_iommu_vmap(video->isp->domain, video->isp->dev, 0,
  425. &buf->sgt, IOVMF_ENDIAN_LITTLE | IOVMF_ELSZ_8);
  426. if (IS_ERR_VALUE(addr)) {
  427. ret = -EIO;
  428. goto done;
  429. }
  430. buf->dma = addr;
  431. if (!IS_ALIGNED(addr, 32)) {
  432. dev_dbg(video->isp->dev,
  433. "Buffer address must be aligned to 32 bytes boundary.\n");
  434. ret = -EINVAL;
  435. goto done;
  436. }
  437. if (buf->queue->ops->buffer_prepare)
  438. ret = buf->queue->ops->buffer_prepare(buf);
  439. done:
  440. if (ret < 0) {
  441. isp_video_buffer_cleanup(buf);
  442. return ret;
  443. }
  444. return ret;
  445. }
  446. /*
  447. * isp_video_queue_query - Query the status of a given buffer
  448. *
  449. * Locking: must be called with the queue lock held.
  450. */
  451. static void isp_video_buffer_query(struct isp_video_buffer *buf,
  452. struct v4l2_buffer *vbuf)
  453. {
  454. memcpy(vbuf, &buf->vbuf, sizeof(*vbuf));
  455. if (buf->vma_use_count)
  456. vbuf->flags |= V4L2_BUF_FLAG_MAPPED;
  457. switch (buf->state) {
  458. case ISP_BUF_STATE_ERROR:
  459. vbuf->flags |= V4L2_BUF_FLAG_ERROR;
  460. /* Fallthrough */
  461. case ISP_BUF_STATE_DONE:
  462. vbuf->flags |= V4L2_BUF_FLAG_DONE;
  463. break;
  464. case ISP_BUF_STATE_QUEUED:
  465. case ISP_BUF_STATE_ACTIVE:
  466. vbuf->flags |= V4L2_BUF_FLAG_QUEUED;
  467. break;
  468. case ISP_BUF_STATE_IDLE:
  469. default:
  470. break;
  471. }
  472. }
  473. /*
  474. * isp_video_buffer_wait - Wait for a buffer to be ready
  475. *
  476. * In non-blocking mode, return immediately with 0 if the buffer is ready or
  477. * -EAGAIN if the buffer is in the QUEUED or ACTIVE state.
  478. *
  479. * In blocking mode, wait (interruptibly but with no timeout) on the buffer wait
  480. * queue using the same condition.
  481. */
  482. static int isp_video_buffer_wait(struct isp_video_buffer *buf, int nonblocking)
  483. {
  484. if (nonblocking) {
  485. return (buf->state != ISP_BUF_STATE_QUEUED &&
  486. buf->state != ISP_BUF_STATE_ACTIVE)
  487. ? 0 : -EAGAIN;
  488. }
  489. return wait_event_interruptible(buf->wait,
  490. buf->state != ISP_BUF_STATE_QUEUED &&
  491. buf->state != ISP_BUF_STATE_ACTIVE);
  492. }
  493. /* -----------------------------------------------------------------------------
  494. * Queue management
  495. */
  496. /*
  497. * isp_video_queue_free - Free video buffers memory
  498. *
  499. * Buffers can only be freed if the queue isn't streaming and if no buffer is
  500. * mapped to userspace. Return -EBUSY if those conditions aren't satisfied.
  501. *
  502. * This function must be called with the queue lock held.
  503. */
  504. static int isp_video_queue_free(struct isp_video_queue *queue)
  505. {
  506. unsigned int i;
  507. if (queue->streaming)
  508. return -EBUSY;
  509. for (i = 0; i < queue->count; ++i) {
  510. if (queue->buffers[i]->vma_use_count != 0)
  511. return -EBUSY;
  512. }
  513. for (i = 0; i < queue->count; ++i) {
  514. struct isp_video_buffer *buf = queue->buffers[i];
  515. isp_video_buffer_cleanup(buf);
  516. vfree(buf->vaddr);
  517. buf->vaddr = NULL;
  518. kfree(buf);
  519. queue->buffers[i] = NULL;
  520. }
  521. INIT_LIST_HEAD(&queue->queue);
  522. queue->count = 0;
  523. return 0;
  524. }
  525. /*
  526. * isp_video_queue_alloc - Allocate video buffers memory
  527. *
  528. * This function must be called with the queue lock held.
  529. */
  530. static int isp_video_queue_alloc(struct isp_video_queue *queue,
  531. unsigned int nbuffers,
  532. unsigned int size, enum v4l2_memory memory)
  533. {
  534. struct isp_video_buffer *buf;
  535. unsigned int i;
  536. void *mem;
  537. int ret;
  538. /* Start by freeing the buffers. */
  539. ret = isp_video_queue_free(queue);
  540. if (ret < 0)
  541. return ret;
  542. /* Bail out if no buffers should be allocated. */
  543. if (nbuffers == 0)
  544. return 0;
  545. /* Initialize the allocated buffers. */
  546. for (i = 0; i < nbuffers; ++i) {
  547. buf = kzalloc(queue->bufsize, GFP_KERNEL);
  548. if (buf == NULL)
  549. break;
  550. if (memory == V4L2_MEMORY_MMAP) {
  551. /* Allocate video buffers memory for mmap mode. Align
  552. * the size to the page size.
  553. */
  554. mem = vmalloc_32_user(PAGE_ALIGN(size));
  555. if (mem == NULL) {
  556. kfree(buf);
  557. break;
  558. }
  559. buf->vbuf.m.offset = i * PAGE_ALIGN(size);
  560. buf->vaddr = mem;
  561. }
  562. buf->vbuf.index = i;
  563. buf->vbuf.length = size;
  564. buf->vbuf.type = queue->type;
  565. buf->vbuf.flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
  566. buf->vbuf.field = V4L2_FIELD_NONE;
  567. buf->vbuf.memory = memory;
  568. buf->queue = queue;
  569. init_waitqueue_head(&buf->wait);
  570. queue->buffers[i] = buf;
  571. }
  572. if (i == 0)
  573. return -ENOMEM;
  574. queue->count = i;
  575. return nbuffers;
  576. }
  577. /**
  578. * omap3isp_video_queue_cleanup - Clean up the video buffers queue
  579. * @queue: Video buffers queue
  580. *
  581. * Free all allocated resources and clean up the video buffers queue. The queue
  582. * must not be busy (no ongoing video stream) and buffers must have been
  583. * unmapped.
  584. *
  585. * Return 0 on success or -EBUSY if the queue is busy or buffers haven't been
  586. * unmapped.
  587. */
  588. int omap3isp_video_queue_cleanup(struct isp_video_queue *queue)
  589. {
  590. return isp_video_queue_free(queue);
  591. }
  592. /**
  593. * omap3isp_video_queue_init - Initialize the video buffers queue
  594. * @queue: Video buffers queue
  595. * @type: V4L2 buffer type (capture or output)
  596. * @ops: Driver-specific queue operations
  597. * @dev: Device used for DMA operations
  598. * @bufsize: Size of the driver-specific buffer structure
  599. *
  600. * Initialize the video buffers queue with the supplied parameters.
  601. *
  602. * The queue type must be one of V4L2_BUF_TYPE_VIDEO_CAPTURE or
  603. * V4L2_BUF_TYPE_VIDEO_OUTPUT. Other buffer types are not supported yet.
  604. *
  605. * Buffer objects will be allocated using the given buffer size to allow room
  606. * for driver-specific fields. Driver-specific buffer structures must start
  607. * with a struct isp_video_buffer field. Drivers with no driver-specific buffer
  608. * structure must pass the size of the isp_video_buffer structure in the bufsize
  609. * parameter.
  610. *
  611. * Return 0 on success.
  612. */
  613. int omap3isp_video_queue_init(struct isp_video_queue *queue,
  614. enum v4l2_buf_type type,
  615. const struct isp_video_queue_operations *ops,
  616. struct device *dev, unsigned int bufsize)
  617. {
  618. INIT_LIST_HEAD(&queue->queue);
  619. mutex_init(&queue->lock);
  620. spin_lock_init(&queue->irqlock);
  621. queue->type = type;
  622. queue->ops = ops;
  623. queue->dev = dev;
  624. queue->bufsize = bufsize;
  625. return 0;
  626. }
  627. /* -----------------------------------------------------------------------------
  628. * V4L2 operations
  629. */
  630. /**
  631. * omap3isp_video_queue_reqbufs - Allocate video buffers memory
  632. *
  633. * This function is intended to be used as a VIDIOC_REQBUFS ioctl handler. It
  634. * allocated video buffer objects and, for MMAP buffers, buffer memory.
  635. *
  636. * If the number of buffers is 0, all buffers are freed and the function returns
  637. * without performing any allocation.
  638. *
  639. * If the number of buffers is not 0, currently allocated buffers (if any) are
  640. * freed and the requested number of buffers are allocated. Depending on
  641. * driver-specific requirements and on memory availability, a number of buffer
  642. * smaller or bigger than requested can be allocated. This isn't considered as
  643. * an error.
  644. *
  645. * Return 0 on success or one of the following error codes:
  646. *
  647. * -EINVAL if the buffer type or index are invalid
  648. * -EBUSY if the queue is busy (streaming or buffers mapped)
  649. * -ENOMEM if the buffers can't be allocated due to an out-of-memory condition
  650. */
  651. int omap3isp_video_queue_reqbufs(struct isp_video_queue *queue,
  652. struct v4l2_requestbuffers *rb)
  653. {
  654. unsigned int nbuffers = rb->count;
  655. unsigned int size;
  656. int ret;
  657. if (rb->type != queue->type)
  658. return -EINVAL;
  659. queue->ops->queue_prepare(queue, &nbuffers, &size);
  660. if (size == 0)
  661. return -EINVAL;
  662. nbuffers = min_t(unsigned int, nbuffers, ISP_VIDEO_MAX_BUFFERS);
  663. mutex_lock(&queue->lock);
  664. ret = isp_video_queue_alloc(queue, nbuffers, size, rb->memory);
  665. if (ret < 0)
  666. goto done;
  667. rb->count = ret;
  668. ret = 0;
  669. done:
  670. mutex_unlock(&queue->lock);
  671. return ret;
  672. }
  673. /**
  674. * omap3isp_video_queue_querybuf - Query the status of a buffer in a queue
  675. *
  676. * This function is intended to be used as a VIDIOC_QUERYBUF ioctl handler. It
  677. * returns the status of a given video buffer.
  678. *
  679. * Return 0 on success or -EINVAL if the buffer type or index are invalid.
  680. */
  681. int omap3isp_video_queue_querybuf(struct isp_video_queue *queue,
  682. struct v4l2_buffer *vbuf)
  683. {
  684. struct isp_video_buffer *buf;
  685. int ret = 0;
  686. if (vbuf->type != queue->type)
  687. return -EINVAL;
  688. mutex_lock(&queue->lock);
  689. if (vbuf->index >= queue->count) {
  690. ret = -EINVAL;
  691. goto done;
  692. }
  693. buf = queue->buffers[vbuf->index];
  694. isp_video_buffer_query(buf, vbuf);
  695. done:
  696. mutex_unlock(&queue->lock);
  697. return ret;
  698. }
  699. /**
  700. * omap3isp_video_queue_qbuf - Queue a buffer
  701. *
  702. * This function is intended to be used as a VIDIOC_QBUF ioctl handler.
  703. *
  704. * The v4l2_buffer structure passed from userspace is first sanity tested. If
  705. * sane, the buffer is then processed and added to the main queue and, if the
  706. * queue is streaming, to the IRQ queue.
  707. *
  708. * Before being enqueued, USERPTR buffers are checked for address changes. If
  709. * the buffer has a different userspace address, the old memory area is unlocked
  710. * and the new memory area is locked.
  711. */
  712. int omap3isp_video_queue_qbuf(struct isp_video_queue *queue,
  713. struct v4l2_buffer *vbuf)
  714. {
  715. struct isp_video_buffer *buf;
  716. unsigned long flags;
  717. int ret = -EINVAL;
  718. if (vbuf->type != queue->type)
  719. goto done;
  720. mutex_lock(&queue->lock);
  721. if (vbuf->index >= queue->count)
  722. goto done;
  723. buf = queue->buffers[vbuf->index];
  724. if (vbuf->memory != buf->vbuf.memory)
  725. goto done;
  726. if (buf->state != ISP_BUF_STATE_IDLE)
  727. goto done;
  728. if (vbuf->memory == V4L2_MEMORY_USERPTR &&
  729. vbuf->length < buf->vbuf.length)
  730. goto done;
  731. if (vbuf->memory == V4L2_MEMORY_USERPTR &&
  732. vbuf->m.userptr != buf->vbuf.m.userptr) {
  733. isp_video_buffer_cleanup(buf);
  734. buf->vbuf.m.userptr = vbuf->m.userptr;
  735. buf->prepared = 0;
  736. }
  737. if (!buf->prepared) {
  738. ret = isp_video_buffer_prepare(buf);
  739. if (ret < 0)
  740. goto done;
  741. buf->prepared = 1;
  742. }
  743. isp_video_buffer_cache_sync(buf);
  744. buf->state = ISP_BUF_STATE_QUEUED;
  745. list_add_tail(&buf->stream, &queue->queue);
  746. if (queue->streaming) {
  747. spin_lock_irqsave(&queue->irqlock, flags);
  748. queue->ops->buffer_queue(buf);
  749. spin_unlock_irqrestore(&queue->irqlock, flags);
  750. }
  751. ret = 0;
  752. done:
  753. mutex_unlock(&queue->lock);
  754. return ret;
  755. }
  756. /**
  757. * omap3isp_video_queue_dqbuf - Dequeue a buffer
  758. *
  759. * This function is intended to be used as a VIDIOC_DQBUF ioctl handler.
  760. *
  761. * Wait until a buffer is ready to be dequeued, remove it from the queue and
  762. * copy its information to the v4l2_buffer structure.
  763. *
  764. * If the nonblocking argument is not zero and no buffer is ready, return
  765. * -EAGAIN immediately instead of waiting.
  766. *
  767. * If no buffer has been enqueued, or if the requested buffer type doesn't match
  768. * the queue type, return -EINVAL.
  769. */
  770. int omap3isp_video_queue_dqbuf(struct isp_video_queue *queue,
  771. struct v4l2_buffer *vbuf, int nonblocking)
  772. {
  773. struct isp_video_buffer *buf;
  774. int ret;
  775. if (vbuf->type != queue->type)
  776. return -EINVAL;
  777. mutex_lock(&queue->lock);
  778. if (list_empty(&queue->queue)) {
  779. ret = -EINVAL;
  780. goto done;
  781. }
  782. buf = list_first_entry(&queue->queue, struct isp_video_buffer, stream);
  783. ret = isp_video_buffer_wait(buf, nonblocking);
  784. if (ret < 0)
  785. goto done;
  786. list_del(&buf->stream);
  787. isp_video_buffer_query(buf, vbuf);
  788. buf->state = ISP_BUF_STATE_IDLE;
  789. vbuf->flags &= ~V4L2_BUF_FLAG_QUEUED;
  790. done:
  791. mutex_unlock(&queue->lock);
  792. return ret;
  793. }
  794. /**
  795. * omap3isp_video_queue_streamon - Start streaming
  796. *
  797. * This function is intended to be used as a VIDIOC_STREAMON ioctl handler. It
  798. * starts streaming on the queue and calls the buffer_queue operation for all
  799. * queued buffers.
  800. *
  801. * Return 0 on success.
  802. */
  803. int omap3isp_video_queue_streamon(struct isp_video_queue *queue)
  804. {
  805. struct isp_video_buffer *buf;
  806. unsigned long flags;
  807. mutex_lock(&queue->lock);
  808. if (queue->streaming)
  809. goto done;
  810. queue->streaming = 1;
  811. spin_lock_irqsave(&queue->irqlock, flags);
  812. list_for_each_entry(buf, &queue->queue, stream)
  813. queue->ops->buffer_queue(buf);
  814. spin_unlock_irqrestore(&queue->irqlock, flags);
  815. done:
  816. mutex_unlock(&queue->lock);
  817. return 0;
  818. }
  819. /**
  820. * omap3isp_video_queue_streamoff - Stop streaming
  821. *
  822. * This function is intended to be used as a VIDIOC_STREAMOFF ioctl handler. It
  823. * stops streaming on the queue and wakes up all the buffers.
  824. *
  825. * Drivers must stop the hardware and synchronize with interrupt handlers and/or
  826. * delayed works before calling this function to make sure no buffer will be
  827. * touched by the driver and/or hardware.
  828. */
  829. void omap3isp_video_queue_streamoff(struct isp_video_queue *queue)
  830. {
  831. struct isp_video_buffer *buf;
  832. unsigned long flags;
  833. unsigned int i;
  834. mutex_lock(&queue->lock);
  835. if (!queue->streaming)
  836. goto done;
  837. queue->streaming = 0;
  838. spin_lock_irqsave(&queue->irqlock, flags);
  839. for (i = 0; i < queue->count; ++i) {
  840. buf = queue->buffers[i];
  841. if (buf->state == ISP_BUF_STATE_ACTIVE)
  842. wake_up(&buf->wait);
  843. buf->state = ISP_BUF_STATE_IDLE;
  844. }
  845. spin_unlock_irqrestore(&queue->irqlock, flags);
  846. INIT_LIST_HEAD(&queue->queue);
  847. done:
  848. mutex_unlock(&queue->lock);
  849. }
  850. /**
  851. * omap3isp_video_queue_discard_done - Discard all buffers marked as DONE
  852. *
  853. * This function is intended to be used with suspend/resume operations. It
  854. * discards all 'done' buffers as they would be too old to be requested after
  855. * resume.
  856. *
  857. * Drivers must stop the hardware and synchronize with interrupt handlers and/or
  858. * delayed works before calling this function to make sure no buffer will be
  859. * touched by the driver and/or hardware.
  860. */
  861. void omap3isp_video_queue_discard_done(struct isp_video_queue *queue)
  862. {
  863. struct isp_video_buffer *buf;
  864. unsigned int i;
  865. mutex_lock(&queue->lock);
  866. if (!queue->streaming)
  867. goto done;
  868. for (i = 0; i < queue->count; ++i) {
  869. buf = queue->buffers[i];
  870. if (buf->state == ISP_BUF_STATE_DONE)
  871. buf->state = ISP_BUF_STATE_ERROR;
  872. }
  873. done:
  874. mutex_unlock(&queue->lock);
  875. }
  876. static void isp_video_queue_vm_open(struct vm_area_struct *vma)
  877. {
  878. struct isp_video_buffer *buf = vma->vm_private_data;
  879. buf->vma_use_count++;
  880. }
  881. static void isp_video_queue_vm_close(struct vm_area_struct *vma)
  882. {
  883. struct isp_video_buffer *buf = vma->vm_private_data;
  884. buf->vma_use_count--;
  885. }
  886. static const struct vm_operations_struct isp_video_queue_vm_ops = {
  887. .open = isp_video_queue_vm_open,
  888. .close = isp_video_queue_vm_close,
  889. };
  890. /**
  891. * omap3isp_video_queue_mmap - Map buffers to userspace
  892. *
  893. * This function is intended to be used as an mmap() file operation handler. It
  894. * maps a buffer to userspace based on the VMA offset.
  895. *
  896. * Only buffers of memory type MMAP are supported.
  897. */
  898. int omap3isp_video_queue_mmap(struct isp_video_queue *queue,
  899. struct vm_area_struct *vma)
  900. {
  901. struct isp_video_buffer *uninitialized_var(buf);
  902. unsigned long size;
  903. unsigned int i;
  904. int ret = 0;
  905. mutex_lock(&queue->lock);
  906. for (i = 0; i < queue->count; ++i) {
  907. buf = queue->buffers[i];
  908. if ((buf->vbuf.m.offset >> PAGE_SHIFT) == vma->vm_pgoff)
  909. break;
  910. }
  911. if (i == queue->count) {
  912. ret = -EINVAL;
  913. goto done;
  914. }
  915. size = vma->vm_end - vma->vm_start;
  916. if (buf->vbuf.memory != V4L2_MEMORY_MMAP ||
  917. size != PAGE_ALIGN(buf->vbuf.length)) {
  918. ret = -EINVAL;
  919. goto done;
  920. }
  921. ret = remap_vmalloc_range(vma, buf->vaddr, 0);
  922. if (ret < 0)
  923. goto done;
  924. vma->vm_ops = &isp_video_queue_vm_ops;
  925. vma->vm_private_data = buf;
  926. isp_video_queue_vm_open(vma);
  927. done:
  928. mutex_unlock(&queue->lock);
  929. return ret;
  930. }
  931. /**
  932. * omap3isp_video_queue_poll - Poll video queue state
  933. *
  934. * This function is intended to be used as a poll() file operation handler. It
  935. * polls the state of the video buffer at the front of the queue and returns an
  936. * events mask.
  937. *
  938. * If no buffer is present at the front of the queue, POLLERR is returned.
  939. */
  940. unsigned int omap3isp_video_queue_poll(struct isp_video_queue *queue,
  941. struct file *file, poll_table *wait)
  942. {
  943. struct isp_video_buffer *buf;
  944. unsigned int mask = 0;
  945. mutex_lock(&queue->lock);
  946. if (list_empty(&queue->queue)) {
  947. mask |= POLLERR;
  948. goto done;
  949. }
  950. buf = list_first_entry(&queue->queue, struct isp_video_buffer, stream);
  951. poll_wait(file, &buf->wait, wait);
  952. if (buf->state == ISP_BUF_STATE_DONE ||
  953. buf->state == ISP_BUF_STATE_ERROR) {
  954. if (queue->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
  955. mask |= POLLIN | POLLRDNORM;
  956. else
  957. mask |= POLLOUT | POLLWRNORM;
  958. }
  959. done:
  960. mutex_unlock(&queue->lock);
  961. return mask;
  962. }