videobuf2-core.c 76 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851
  1. /*
  2. * videobuf2-core.c - video buffer 2 core framework
  3. *
  4. * Copyright (C) 2010 Samsung Electronics
  5. *
  6. * Author: Pawel Osciak <pawel@osciak.com>
  7. * Marek Szyprowski <m.szyprowski@samsung.com>
  8. *
  9. * The vb2_thread implementation was based on code from videobuf-dvb.c:
  10. * (c) 2004 Gerd Knorr <kraxel@bytesex.org> [SUSE Labs]
  11. *
  12. * This program is free software; you can redistribute it and/or modify
  13. * it under the terms of the GNU General Public License as published by
  14. * the Free Software Foundation.
  15. */
  16. #include <linux/err.h>
  17. #include <linux/kernel.h>
  18. #include <linux/module.h>
  19. #include <linux/mm.h>
  20. #include <linux/poll.h>
  21. #include <linux/slab.h>
  22. #include <linux/sched.h>
  23. #include <linux/freezer.h>
  24. #include <linux/kthread.h>
  25. #include <media/videobuf2-core.h>
  26. #include <media/v4l2-mc.h>
  27. #include <trace/events/vb2.h>
  28. static int debug;
  29. module_param(debug, int, 0644);
  30. #define dprintk(level, fmt, arg...) \
  31. do { \
  32. if (debug >= level) \
  33. pr_info("vb2-core: %s: " fmt, __func__, ## arg); \
  34. } while (0)
  35. #ifdef CONFIG_VIDEO_ADV_DEBUG
  36. /*
  37. * If advanced debugging is on, then count how often each op is called
  38. * successfully, which can either be per-buffer or per-queue.
  39. *
  40. * This makes it easy to check that the 'init' and 'cleanup'
  41. * (and variations thereof) stay balanced.
  42. */
  43. #define log_memop(vb, op) \
  44. dprintk(2, "call_memop(%p, %d, %s)%s\n", \
  45. (vb)->vb2_queue, (vb)->index, #op, \
  46. (vb)->vb2_queue->mem_ops->op ? "" : " (nop)")
  47. #define call_memop(vb, op, args...) \
  48. ({ \
  49. struct vb2_queue *_q = (vb)->vb2_queue; \
  50. int err; \
  51. \
  52. log_memop(vb, op); \
  53. err = _q->mem_ops->op ? _q->mem_ops->op(args) : 0; \
  54. if (!err) \
  55. (vb)->cnt_mem_ ## op++; \
  56. err; \
  57. })
  58. #define call_ptr_memop(vb, op, args...) \
  59. ({ \
  60. struct vb2_queue *_q = (vb)->vb2_queue; \
  61. void *ptr; \
  62. \
  63. log_memop(vb, op); \
  64. ptr = _q->mem_ops->op ? _q->mem_ops->op(args) : NULL; \
  65. if (!IS_ERR_OR_NULL(ptr)) \
  66. (vb)->cnt_mem_ ## op++; \
  67. ptr; \
  68. })
  69. #define call_void_memop(vb, op, args...) \
  70. ({ \
  71. struct vb2_queue *_q = (vb)->vb2_queue; \
  72. \
  73. log_memop(vb, op); \
  74. if (_q->mem_ops->op) \
  75. _q->mem_ops->op(args); \
  76. (vb)->cnt_mem_ ## op++; \
  77. })
  78. #define log_qop(q, op) \
  79. dprintk(2, "call_qop(%p, %s)%s\n", q, #op, \
  80. (q)->ops->op ? "" : " (nop)")
  81. #define call_qop(q, op, args...) \
  82. ({ \
  83. int err; \
  84. \
  85. log_qop(q, op); \
  86. err = (q)->ops->op ? (q)->ops->op(args) : 0; \
  87. if (!err) \
  88. (q)->cnt_ ## op++; \
  89. err; \
  90. })
  91. #define call_void_qop(q, op, args...) \
  92. ({ \
  93. log_qop(q, op); \
  94. if ((q)->ops->op) \
  95. (q)->ops->op(args); \
  96. (q)->cnt_ ## op++; \
  97. })
  98. #define log_vb_qop(vb, op, args...) \
  99. dprintk(2, "call_vb_qop(%p, %d, %s)%s\n", \
  100. (vb)->vb2_queue, (vb)->index, #op, \
  101. (vb)->vb2_queue->ops->op ? "" : " (nop)")
  102. #define call_vb_qop(vb, op, args...) \
  103. ({ \
  104. int err; \
  105. \
  106. log_vb_qop(vb, op); \
  107. err = (vb)->vb2_queue->ops->op ? \
  108. (vb)->vb2_queue->ops->op(args) : 0; \
  109. if (!err) \
  110. (vb)->cnt_ ## op++; \
  111. err; \
  112. })
  113. #define call_void_vb_qop(vb, op, args...) \
  114. ({ \
  115. log_vb_qop(vb, op); \
  116. if ((vb)->vb2_queue->ops->op) \
  117. (vb)->vb2_queue->ops->op(args); \
  118. (vb)->cnt_ ## op++; \
  119. })
  120. #else
  121. #define call_memop(vb, op, args...) \
  122. ((vb)->vb2_queue->mem_ops->op ? \
  123. (vb)->vb2_queue->mem_ops->op(args) : 0)
  124. #define call_ptr_memop(vb, op, args...) \
  125. ((vb)->vb2_queue->mem_ops->op ? \
  126. (vb)->vb2_queue->mem_ops->op(args) : NULL)
  127. #define call_void_memop(vb, op, args...) \
  128. do { \
  129. if ((vb)->vb2_queue->mem_ops->op) \
  130. (vb)->vb2_queue->mem_ops->op(args); \
  131. } while (0)
  132. #define call_qop(q, op, args...) \
  133. ((q)->ops->op ? (q)->ops->op(args) : 0)
  134. #define call_void_qop(q, op, args...) \
  135. do { \
  136. if ((q)->ops->op) \
  137. (q)->ops->op(args); \
  138. } while (0)
  139. #define call_vb_qop(vb, op, args...) \
  140. ((vb)->vb2_queue->ops->op ? (vb)->vb2_queue->ops->op(args) : 0)
  141. #define call_void_vb_qop(vb, op, args...) \
  142. do { \
  143. if ((vb)->vb2_queue->ops->op) \
  144. (vb)->vb2_queue->ops->op(args); \
  145. } while (0)
  146. #endif
  147. #define call_bufop(q, op, args...) \
  148. ({ \
  149. int ret = 0; \
  150. if (q && q->buf_ops && q->buf_ops->op) \
  151. ret = q->buf_ops->op(args); \
  152. ret; \
  153. })
  154. #define call_void_bufop(q, op, args...) \
  155. ({ \
  156. if (q && q->buf_ops && q->buf_ops->op) \
  157. q->buf_ops->op(args); \
  158. })
  159. static void __vb2_queue_cancel(struct vb2_queue *q);
  160. static void __enqueue_in_driver(struct vb2_buffer *vb);
  161. /**
  162. * __vb2_buf_mem_alloc() - allocate video memory for the given buffer
  163. */
  164. static int __vb2_buf_mem_alloc(struct vb2_buffer *vb)
  165. {
  166. struct vb2_queue *q = vb->vb2_queue;
  167. enum dma_data_direction dma_dir =
  168. q->is_output ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
  169. void *mem_priv;
  170. int plane;
  171. /*
  172. * Allocate memory for all planes in this buffer
  173. * NOTE: mmapped areas should be page aligned
  174. */
  175. for (plane = 0; plane < vb->num_planes; ++plane) {
  176. unsigned long size = PAGE_ALIGN(vb->planes[plane].length);
  177. mem_priv = call_ptr_memop(vb, alloc,
  178. q->alloc_devs[plane] ? : q->dev,
  179. q->dma_attrs, size, dma_dir, q->gfp_flags);
  180. if (IS_ERR_OR_NULL(mem_priv))
  181. goto free;
  182. /* Associate allocator private data with this plane */
  183. vb->planes[plane].mem_priv = mem_priv;
  184. }
  185. return 0;
  186. free:
  187. /* Free already allocated memory if one of the allocations failed */
  188. for (; plane > 0; --plane) {
  189. call_void_memop(vb, put, vb->planes[plane - 1].mem_priv);
  190. vb->planes[plane - 1].mem_priv = NULL;
  191. }
  192. return -ENOMEM;
  193. }
  194. /**
  195. * __vb2_buf_mem_free() - free memory of the given buffer
  196. */
  197. static void __vb2_buf_mem_free(struct vb2_buffer *vb)
  198. {
  199. unsigned int plane;
  200. for (plane = 0; plane < vb->num_planes; ++plane) {
  201. call_void_memop(vb, put, vb->planes[plane].mem_priv);
  202. vb->planes[plane].mem_priv = NULL;
  203. dprintk(3, "freed plane %d of buffer %d\n", plane, vb->index);
  204. }
  205. }
  206. /**
  207. * __vb2_buf_userptr_put() - release userspace memory associated with
  208. * a USERPTR buffer
  209. */
  210. static void __vb2_buf_userptr_put(struct vb2_buffer *vb)
  211. {
  212. unsigned int plane;
  213. for (plane = 0; plane < vb->num_planes; ++plane) {
  214. if (vb->planes[plane].mem_priv)
  215. call_void_memop(vb, put_userptr, vb->planes[plane].mem_priv);
  216. vb->planes[plane].mem_priv = NULL;
  217. }
  218. }
  219. /**
  220. * __vb2_plane_dmabuf_put() - release memory associated with
  221. * a DMABUF shared plane
  222. */
  223. static void __vb2_plane_dmabuf_put(struct vb2_buffer *vb, struct vb2_plane *p)
  224. {
  225. if (!p->mem_priv)
  226. return;
  227. if (p->dbuf_mapped)
  228. call_void_memop(vb, unmap_dmabuf, p->mem_priv);
  229. call_void_memop(vb, detach_dmabuf, p->mem_priv);
  230. dma_buf_put(p->dbuf);
  231. p->mem_priv = NULL;
  232. p->dbuf = NULL;
  233. p->dbuf_mapped = 0;
  234. }
  235. /**
  236. * __vb2_buf_dmabuf_put() - release memory associated with
  237. * a DMABUF shared buffer
  238. */
  239. static void __vb2_buf_dmabuf_put(struct vb2_buffer *vb)
  240. {
  241. unsigned int plane;
  242. for (plane = 0; plane < vb->num_planes; ++plane)
  243. __vb2_plane_dmabuf_put(vb, &vb->planes[plane]);
  244. }
  245. /**
  246. * __setup_offsets() - setup unique offsets ("cookies") for every plane in
  247. * the buffer.
  248. */
  249. static void __setup_offsets(struct vb2_buffer *vb)
  250. {
  251. struct vb2_queue *q = vb->vb2_queue;
  252. unsigned int plane;
  253. unsigned long off = 0;
  254. if (vb->index) {
  255. struct vb2_buffer *prev = q->bufs[vb->index - 1];
  256. struct vb2_plane *p = &prev->planes[prev->num_planes - 1];
  257. off = PAGE_ALIGN(p->m.offset + p->length);
  258. }
  259. for (plane = 0; plane < vb->num_planes; ++plane) {
  260. vb->planes[plane].m.offset = off;
  261. dprintk(3, "buffer %d, plane %d offset 0x%08lx\n",
  262. vb->index, plane, off);
  263. off += vb->planes[plane].length;
  264. off = PAGE_ALIGN(off);
  265. }
  266. }
  267. /**
  268. * __vb2_queue_alloc() - allocate videobuf buffer structures and (for MMAP type)
  269. * video buffer memory for all buffers/planes on the queue and initializes the
  270. * queue
  271. *
  272. * Returns the number of buffers successfully allocated.
  273. */
  274. static int __vb2_queue_alloc(struct vb2_queue *q, enum vb2_memory memory,
  275. unsigned int num_buffers, unsigned int num_planes,
  276. const unsigned plane_sizes[VB2_MAX_PLANES])
  277. {
  278. unsigned int buffer, plane;
  279. struct vb2_buffer *vb;
  280. int ret;
  281. for (buffer = 0; buffer < num_buffers; ++buffer) {
  282. /* Allocate videobuf buffer structures */
  283. vb = kzalloc(q->buf_struct_size, GFP_KERNEL);
  284. if (!vb) {
  285. dprintk(1, "memory alloc for buffer struct failed\n");
  286. break;
  287. }
  288. vb->state = VB2_BUF_STATE_DEQUEUED;
  289. vb->vb2_queue = q;
  290. vb->num_planes = num_planes;
  291. vb->index = q->num_buffers + buffer;
  292. vb->type = q->type;
  293. vb->memory = memory;
  294. for (plane = 0; plane < num_planes; ++plane) {
  295. vb->planes[plane].length = plane_sizes[plane];
  296. vb->planes[plane].min_length = plane_sizes[plane];
  297. }
  298. q->bufs[vb->index] = vb;
  299. /* Allocate video buffer memory for the MMAP type */
  300. if (memory == VB2_MEMORY_MMAP) {
  301. ret = __vb2_buf_mem_alloc(vb);
  302. if (ret) {
  303. dprintk(1, "failed allocating memory for "
  304. "buffer %d\n", buffer);
  305. q->bufs[vb->index] = NULL;
  306. kfree(vb);
  307. break;
  308. }
  309. __setup_offsets(vb);
  310. /*
  311. * Call the driver-provided buffer initialization
  312. * callback, if given. An error in initialization
  313. * results in queue setup failure.
  314. */
  315. ret = call_vb_qop(vb, buf_init, vb);
  316. if (ret) {
  317. dprintk(1, "buffer %d %p initialization"
  318. " failed\n", buffer, vb);
  319. __vb2_buf_mem_free(vb);
  320. q->bufs[vb->index] = NULL;
  321. kfree(vb);
  322. break;
  323. }
  324. }
  325. }
  326. dprintk(1, "allocated %d buffers, %d plane(s) each\n",
  327. buffer, num_planes);
  328. return buffer;
  329. }
  330. /**
  331. * __vb2_free_mem() - release all video buffer memory for a given queue
  332. */
  333. static void __vb2_free_mem(struct vb2_queue *q, unsigned int buffers)
  334. {
  335. unsigned int buffer;
  336. struct vb2_buffer *vb;
  337. for (buffer = q->num_buffers - buffers; buffer < q->num_buffers;
  338. ++buffer) {
  339. vb = q->bufs[buffer];
  340. if (!vb)
  341. continue;
  342. /* Free MMAP buffers or release USERPTR buffers */
  343. if (q->memory == VB2_MEMORY_MMAP)
  344. __vb2_buf_mem_free(vb);
  345. else if (q->memory == VB2_MEMORY_DMABUF)
  346. __vb2_buf_dmabuf_put(vb);
  347. else
  348. __vb2_buf_userptr_put(vb);
  349. }
  350. }
  351. /**
  352. * __vb2_queue_free() - free buffers at the end of the queue - video memory and
  353. * related information, if no buffers are left return the queue to an
  354. * uninitialized state. Might be called even if the queue has already been freed.
  355. */
  356. static int __vb2_queue_free(struct vb2_queue *q, unsigned int buffers)
  357. {
  358. unsigned int buffer;
  359. /*
  360. * Sanity check: when preparing a buffer the queue lock is released for
  361. * a short while (see __buf_prepare for the details), which would allow
  362. * a race with a reqbufs which can call this function. Removing the
  363. * buffers from underneath __buf_prepare is obviously a bad idea, so we
  364. * check if any of the buffers is in the state PREPARING, and if so we
  365. * just return -EAGAIN.
  366. */
  367. for (buffer = q->num_buffers - buffers; buffer < q->num_buffers;
  368. ++buffer) {
  369. if (q->bufs[buffer] == NULL)
  370. continue;
  371. if (q->bufs[buffer]->state == VB2_BUF_STATE_PREPARING) {
  372. dprintk(1, "preparing buffers, cannot free\n");
  373. return -EAGAIN;
  374. }
  375. }
  376. /* Call driver-provided cleanup function for each buffer, if provided */
  377. for (buffer = q->num_buffers - buffers; buffer < q->num_buffers;
  378. ++buffer) {
  379. struct vb2_buffer *vb = q->bufs[buffer];
  380. if (vb && vb->planes[0].mem_priv)
  381. call_void_vb_qop(vb, buf_cleanup, vb);
  382. }
  383. /* Release video buffer memory */
  384. __vb2_free_mem(q, buffers);
  385. #ifdef CONFIG_VIDEO_ADV_DEBUG
  386. /*
  387. * Check that all the calls were balances during the life-time of this
  388. * queue. If not (or if the debug level is 1 or up), then dump the
  389. * counters to the kernel log.
  390. */
  391. if (q->num_buffers) {
  392. bool unbalanced = q->cnt_start_streaming != q->cnt_stop_streaming ||
  393. q->cnt_wait_prepare != q->cnt_wait_finish;
  394. if (unbalanced || debug) {
  395. pr_info("vb2: counters for queue %p:%s\n", q,
  396. unbalanced ? " UNBALANCED!" : "");
  397. pr_info("vb2: setup: %u start_streaming: %u stop_streaming: %u\n",
  398. q->cnt_queue_setup, q->cnt_start_streaming,
  399. q->cnt_stop_streaming);
  400. pr_info("vb2: wait_prepare: %u wait_finish: %u\n",
  401. q->cnt_wait_prepare, q->cnt_wait_finish);
  402. }
  403. q->cnt_queue_setup = 0;
  404. q->cnt_wait_prepare = 0;
  405. q->cnt_wait_finish = 0;
  406. q->cnt_start_streaming = 0;
  407. q->cnt_stop_streaming = 0;
  408. }
  409. for (buffer = 0; buffer < q->num_buffers; ++buffer) {
  410. struct vb2_buffer *vb = q->bufs[buffer];
  411. bool unbalanced = vb->cnt_mem_alloc != vb->cnt_mem_put ||
  412. vb->cnt_mem_prepare != vb->cnt_mem_finish ||
  413. vb->cnt_mem_get_userptr != vb->cnt_mem_put_userptr ||
  414. vb->cnt_mem_attach_dmabuf != vb->cnt_mem_detach_dmabuf ||
  415. vb->cnt_mem_map_dmabuf != vb->cnt_mem_unmap_dmabuf ||
  416. vb->cnt_buf_queue != vb->cnt_buf_done ||
  417. vb->cnt_buf_prepare != vb->cnt_buf_finish ||
  418. vb->cnt_buf_init != vb->cnt_buf_cleanup;
  419. if (unbalanced || debug) {
  420. pr_info("vb2: counters for queue %p, buffer %d:%s\n",
  421. q, buffer, unbalanced ? " UNBALANCED!" : "");
  422. pr_info("vb2: buf_init: %u buf_cleanup: %u buf_prepare: %u buf_finish: %u\n",
  423. vb->cnt_buf_init, vb->cnt_buf_cleanup,
  424. vb->cnt_buf_prepare, vb->cnt_buf_finish);
  425. pr_info("vb2: buf_queue: %u buf_done: %u\n",
  426. vb->cnt_buf_queue, vb->cnt_buf_done);
  427. pr_info("vb2: alloc: %u put: %u prepare: %u finish: %u mmap: %u\n",
  428. vb->cnt_mem_alloc, vb->cnt_mem_put,
  429. vb->cnt_mem_prepare, vb->cnt_mem_finish,
  430. vb->cnt_mem_mmap);
  431. pr_info("vb2: get_userptr: %u put_userptr: %u\n",
  432. vb->cnt_mem_get_userptr, vb->cnt_mem_put_userptr);
  433. pr_info("vb2: attach_dmabuf: %u detach_dmabuf: %u map_dmabuf: %u unmap_dmabuf: %u\n",
  434. vb->cnt_mem_attach_dmabuf, vb->cnt_mem_detach_dmabuf,
  435. vb->cnt_mem_map_dmabuf, vb->cnt_mem_unmap_dmabuf);
  436. pr_info("vb2: get_dmabuf: %u num_users: %u vaddr: %u cookie: %u\n",
  437. vb->cnt_mem_get_dmabuf,
  438. vb->cnt_mem_num_users,
  439. vb->cnt_mem_vaddr,
  440. vb->cnt_mem_cookie);
  441. }
  442. }
  443. #endif
  444. /* Free videobuf buffers */
  445. for (buffer = q->num_buffers - buffers; buffer < q->num_buffers;
  446. ++buffer) {
  447. kfree(q->bufs[buffer]);
  448. q->bufs[buffer] = NULL;
  449. }
  450. q->num_buffers -= buffers;
  451. if (!q->num_buffers) {
  452. q->memory = 0;
  453. INIT_LIST_HEAD(&q->queued_list);
  454. }
  455. return 0;
  456. }
  457. /**
  458. * vb2_buffer_in_use() - return true if the buffer is in use and
  459. * the queue cannot be freed (by the means of REQBUFS(0)) call
  460. */
  461. bool vb2_buffer_in_use(struct vb2_queue *q, struct vb2_buffer *vb)
  462. {
  463. unsigned int plane;
  464. for (plane = 0; plane < vb->num_planes; ++plane) {
  465. void *mem_priv = vb->planes[plane].mem_priv;
  466. /*
  467. * If num_users() has not been provided, call_memop
  468. * will return 0, apparently nobody cares about this
  469. * case anyway. If num_users() returns more than 1,
  470. * we are not the only user of the plane's memory.
  471. */
  472. if (mem_priv && call_memop(vb, num_users, mem_priv) > 1)
  473. return true;
  474. }
  475. return false;
  476. }
  477. EXPORT_SYMBOL(vb2_buffer_in_use);
  478. /**
  479. * __buffers_in_use() - return true if any buffers on the queue are in use and
  480. * the queue cannot be freed (by the means of REQBUFS(0)) call
  481. */
  482. static bool __buffers_in_use(struct vb2_queue *q)
  483. {
  484. unsigned int buffer;
  485. for (buffer = 0; buffer < q->num_buffers; ++buffer) {
  486. if (vb2_buffer_in_use(q, q->bufs[buffer]))
  487. return true;
  488. }
  489. return false;
  490. }
  491. /**
  492. * vb2_core_querybuf() - query video buffer information
  493. * @q: videobuf queue
  494. * @index: id number of the buffer
  495. * @pb: buffer struct passed from userspace
  496. *
  497. * Should be called from vidioc_querybuf ioctl handler in driver.
  498. * The passed buffer should have been verified.
  499. * This function fills the relevant information for the userspace.
  500. */
  501. void vb2_core_querybuf(struct vb2_queue *q, unsigned int index, void *pb)
  502. {
  503. call_void_bufop(q, fill_user_buffer, q->bufs[index], pb);
  504. }
  505. EXPORT_SYMBOL_GPL(vb2_core_querybuf);
  506. /**
  507. * __verify_userptr_ops() - verify that all memory operations required for
  508. * USERPTR queue type have been provided
  509. */
  510. static int __verify_userptr_ops(struct vb2_queue *q)
  511. {
  512. if (!(q->io_modes & VB2_USERPTR) || !q->mem_ops->get_userptr ||
  513. !q->mem_ops->put_userptr)
  514. return -EINVAL;
  515. return 0;
  516. }
  517. /**
  518. * __verify_mmap_ops() - verify that all memory operations required for
  519. * MMAP queue type have been provided
  520. */
  521. static int __verify_mmap_ops(struct vb2_queue *q)
  522. {
  523. if (!(q->io_modes & VB2_MMAP) || !q->mem_ops->alloc ||
  524. !q->mem_ops->put || !q->mem_ops->mmap)
  525. return -EINVAL;
  526. return 0;
  527. }
  528. /**
  529. * __verify_dmabuf_ops() - verify that all memory operations required for
  530. * DMABUF queue type have been provided
  531. */
  532. static int __verify_dmabuf_ops(struct vb2_queue *q)
  533. {
  534. if (!(q->io_modes & VB2_DMABUF) || !q->mem_ops->attach_dmabuf ||
  535. !q->mem_ops->detach_dmabuf || !q->mem_ops->map_dmabuf ||
  536. !q->mem_ops->unmap_dmabuf)
  537. return -EINVAL;
  538. return 0;
  539. }
  540. /**
  541. * vb2_verify_memory_type() - Check whether the memory type and buffer type
  542. * passed to a buffer operation are compatible with the queue.
  543. */
  544. int vb2_verify_memory_type(struct vb2_queue *q,
  545. enum vb2_memory memory, unsigned int type)
  546. {
  547. if (memory != VB2_MEMORY_MMAP && memory != VB2_MEMORY_USERPTR &&
  548. memory != VB2_MEMORY_DMABUF) {
  549. dprintk(1, "unsupported memory type\n");
  550. return -EINVAL;
  551. }
  552. if (type != q->type) {
  553. dprintk(1, "requested type is incorrect\n");
  554. return -EINVAL;
  555. }
  556. /*
  557. * Make sure all the required memory ops for given memory type
  558. * are available.
  559. */
  560. if (memory == VB2_MEMORY_MMAP && __verify_mmap_ops(q)) {
  561. dprintk(1, "MMAP for current setup unsupported\n");
  562. return -EINVAL;
  563. }
  564. if (memory == VB2_MEMORY_USERPTR && __verify_userptr_ops(q)) {
  565. dprintk(1, "USERPTR for current setup unsupported\n");
  566. return -EINVAL;
  567. }
  568. if (memory == VB2_MEMORY_DMABUF && __verify_dmabuf_ops(q)) {
  569. dprintk(1, "DMABUF for current setup unsupported\n");
  570. return -EINVAL;
  571. }
  572. /*
  573. * Place the busy tests at the end: -EBUSY can be ignored when
  574. * create_bufs is called with count == 0, but count == 0 should still
  575. * do the memory and type validation.
  576. */
  577. if (vb2_fileio_is_active(q)) {
  578. dprintk(1, "file io in progress\n");
  579. return -EBUSY;
  580. }
  581. return 0;
  582. }
  583. EXPORT_SYMBOL(vb2_verify_memory_type);
  584. /**
  585. * vb2_core_reqbufs() - Initiate streaming
  586. * @q: videobuf2 queue
  587. * @memory: memory type
  588. * @count: requested buffer count
  589. *
  590. * Should be called from vidioc_reqbufs ioctl handler of a driver.
  591. * This function:
  592. * 1) verifies streaming parameters passed from the userspace,
  593. * 2) sets up the queue,
  594. * 3) negotiates number of buffers and planes per buffer with the driver
  595. * to be used during streaming,
  596. * 4) allocates internal buffer structures (struct vb2_buffer), according to
  597. * the agreed parameters,
  598. * 5) for MMAP memory type, allocates actual video memory, using the
  599. * memory handling/allocation routines provided during queue initialization
  600. *
  601. * If req->count is 0, all the memory will be freed instead.
  602. * If the queue has been allocated previously (by a previous vb2_reqbufs) call
  603. * and the queue is not busy, memory will be reallocated.
  604. *
  605. * The return values from this function are intended to be directly returned
  606. * from vidioc_reqbufs handler in driver.
  607. */
  608. int vb2_core_reqbufs(struct vb2_queue *q, enum vb2_memory memory,
  609. unsigned int *count)
  610. {
  611. unsigned int num_buffers, allocated_buffers, num_planes = 0;
  612. unsigned plane_sizes[VB2_MAX_PLANES] = { };
  613. int ret;
  614. if (q->streaming) {
  615. dprintk(1, "streaming active\n");
  616. return -EBUSY;
  617. }
  618. if (*count == 0 || q->num_buffers != 0 || q->memory != memory) {
  619. /*
  620. * We already have buffers allocated, so first check if they
  621. * are not in use and can be freed.
  622. */
  623. mutex_lock(&q->mmap_lock);
  624. if (q->memory == VB2_MEMORY_MMAP && __buffers_in_use(q)) {
  625. mutex_unlock(&q->mmap_lock);
  626. dprintk(1, "memory in use, cannot free\n");
  627. return -EBUSY;
  628. }
  629. /*
  630. * Call queue_cancel to clean up any buffers in the PREPARED or
  631. * QUEUED state which is possible if buffers were prepared or
  632. * queued without ever calling STREAMON.
  633. */
  634. __vb2_queue_cancel(q);
  635. ret = __vb2_queue_free(q, q->num_buffers);
  636. mutex_unlock(&q->mmap_lock);
  637. if (ret)
  638. return ret;
  639. /*
  640. * In case of REQBUFS(0) return immediately without calling
  641. * driver's queue_setup() callback and allocating resources.
  642. */
  643. if (*count == 0)
  644. return 0;
  645. }
  646. /*
  647. * Make sure the requested values and current defaults are sane.
  648. */
  649. num_buffers = min_t(unsigned int, *count, VB2_MAX_FRAME);
  650. num_buffers = max_t(unsigned int, num_buffers, q->min_buffers_needed);
  651. memset(q->alloc_devs, 0, sizeof(q->alloc_devs));
  652. q->memory = memory;
  653. /*
  654. * Ask the driver how many buffers and planes per buffer it requires.
  655. * Driver also sets the size and allocator context for each plane.
  656. */
  657. ret = call_qop(q, queue_setup, q, &num_buffers, &num_planes,
  658. plane_sizes, q->alloc_devs);
  659. if (ret)
  660. return ret;
  661. /* Finally, allocate buffers and video memory */
  662. allocated_buffers =
  663. __vb2_queue_alloc(q, memory, num_buffers, num_planes, plane_sizes);
  664. if (allocated_buffers == 0) {
  665. dprintk(1, "memory allocation failed\n");
  666. return -ENOMEM;
  667. }
  668. /*
  669. * There is no point in continuing if we can't allocate the minimum
  670. * number of buffers needed by this vb2_queue.
  671. */
  672. if (allocated_buffers < q->min_buffers_needed)
  673. ret = -ENOMEM;
  674. /*
  675. * Check if driver can handle the allocated number of buffers.
  676. */
  677. if (!ret && allocated_buffers < num_buffers) {
  678. num_buffers = allocated_buffers;
  679. /*
  680. * num_planes is set by the previous queue_setup(), but since it
  681. * signals to queue_setup() whether it is called from create_bufs()
  682. * vs reqbufs() we zero it here to signal that queue_setup() is
  683. * called for the reqbufs() case.
  684. */
  685. num_planes = 0;
  686. ret = call_qop(q, queue_setup, q, &num_buffers,
  687. &num_planes, plane_sizes, q->alloc_devs);
  688. if (!ret && allocated_buffers < num_buffers)
  689. ret = -ENOMEM;
  690. /*
  691. * Either the driver has accepted a smaller number of buffers,
  692. * or .queue_setup() returned an error
  693. */
  694. }
  695. mutex_lock(&q->mmap_lock);
  696. q->num_buffers = allocated_buffers;
  697. if (ret < 0) {
  698. /*
  699. * Note: __vb2_queue_free() will subtract 'allocated_buffers'
  700. * from q->num_buffers.
  701. */
  702. __vb2_queue_free(q, allocated_buffers);
  703. mutex_unlock(&q->mmap_lock);
  704. return ret;
  705. }
  706. mutex_unlock(&q->mmap_lock);
  707. /*
  708. * Return the number of successfully allocated buffers
  709. * to the userspace.
  710. */
  711. *count = allocated_buffers;
  712. q->waiting_for_buffers = !q->is_output;
  713. return 0;
  714. }
  715. EXPORT_SYMBOL_GPL(vb2_core_reqbufs);
  716. /**
  717. * vb2_core_create_bufs() - Allocate buffers and any required auxiliary structs
  718. * @q: videobuf2 queue
  719. * @memory: memory type
  720. * @count: requested buffer count
  721. * @parg: parameter passed to device driver
  722. *
  723. * Should be called from vidioc_create_bufs ioctl handler of a driver.
  724. * This function:
  725. * 1) verifies parameter sanity
  726. * 2) calls the .queue_setup() queue operation
  727. * 3) performs any necessary memory allocations
  728. *
  729. * The return values from this function are intended to be directly returned
  730. * from vidioc_create_bufs handler in driver.
  731. */
  732. int vb2_core_create_bufs(struct vb2_queue *q, enum vb2_memory memory,
  733. unsigned int *count, unsigned requested_planes,
  734. const unsigned requested_sizes[])
  735. {
  736. unsigned int num_planes = 0, num_buffers, allocated_buffers;
  737. unsigned plane_sizes[VB2_MAX_PLANES] = { };
  738. int ret;
  739. if (q->num_buffers == VB2_MAX_FRAME) {
  740. dprintk(1, "maximum number of buffers already allocated\n");
  741. return -ENOBUFS;
  742. }
  743. if (!q->num_buffers) {
  744. memset(q->alloc_devs, 0, sizeof(q->alloc_devs));
  745. q->memory = memory;
  746. q->waiting_for_buffers = !q->is_output;
  747. }
  748. num_buffers = min(*count, VB2_MAX_FRAME - q->num_buffers);
  749. if (requested_planes && requested_sizes) {
  750. num_planes = requested_planes;
  751. memcpy(plane_sizes, requested_sizes, sizeof(plane_sizes));
  752. }
  753. /*
  754. * Ask the driver, whether the requested number of buffers, planes per
  755. * buffer and their sizes are acceptable
  756. */
  757. ret = call_qop(q, queue_setup, q, &num_buffers,
  758. &num_planes, plane_sizes, q->alloc_devs);
  759. if (ret)
  760. return ret;
  761. /* Finally, allocate buffers and video memory */
  762. allocated_buffers = __vb2_queue_alloc(q, memory, num_buffers,
  763. num_planes, plane_sizes);
  764. if (allocated_buffers == 0) {
  765. dprintk(1, "memory allocation failed\n");
  766. return -ENOMEM;
  767. }
  768. /*
  769. * Check if driver can handle the so far allocated number of buffers.
  770. */
  771. if (allocated_buffers < num_buffers) {
  772. num_buffers = allocated_buffers;
  773. /*
  774. * q->num_buffers contains the total number of buffers, that the
  775. * queue driver has set up
  776. */
  777. ret = call_qop(q, queue_setup, q, &num_buffers,
  778. &num_planes, plane_sizes, q->alloc_devs);
  779. if (!ret && allocated_buffers < num_buffers)
  780. ret = -ENOMEM;
  781. /*
  782. * Either the driver has accepted a smaller number of buffers,
  783. * or .queue_setup() returned an error
  784. */
  785. }
  786. mutex_lock(&q->mmap_lock);
  787. q->num_buffers += allocated_buffers;
  788. if (ret < 0) {
  789. /*
  790. * Note: __vb2_queue_free() will subtract 'allocated_buffers'
  791. * from q->num_buffers.
  792. */
  793. __vb2_queue_free(q, allocated_buffers);
  794. mutex_unlock(&q->mmap_lock);
  795. return -ENOMEM;
  796. }
  797. mutex_unlock(&q->mmap_lock);
  798. /*
  799. * Return the number of successfully allocated buffers
  800. * to the userspace.
  801. */
  802. *count = allocated_buffers;
  803. return 0;
  804. }
  805. EXPORT_SYMBOL_GPL(vb2_core_create_bufs);
  806. /**
  807. * vb2_plane_vaddr() - Return a kernel virtual address of a given plane
  808. * @vb: vb2_buffer to which the plane in question belongs to
  809. * @plane_no: plane number for which the address is to be returned
  810. *
  811. * This function returns a kernel virtual address of a given plane if
  812. * such a mapping exist, NULL otherwise.
  813. */
  814. void *vb2_plane_vaddr(struct vb2_buffer *vb, unsigned int plane_no)
  815. {
  816. if (plane_no > vb->num_planes || !vb->planes[plane_no].mem_priv)
  817. return NULL;
  818. return call_ptr_memop(vb, vaddr, vb->planes[plane_no].mem_priv);
  819. }
  820. EXPORT_SYMBOL_GPL(vb2_plane_vaddr);
  821. /**
  822. * vb2_plane_cookie() - Return allocator specific cookie for the given plane
  823. * @vb: vb2_buffer to which the plane in question belongs to
  824. * @plane_no: plane number for which the cookie is to be returned
  825. *
  826. * This function returns an allocator specific cookie for a given plane if
  827. * available, NULL otherwise. The allocator should provide some simple static
  828. * inline function, which would convert this cookie to the allocator specific
  829. * type that can be used directly by the driver to access the buffer. This can
  830. * be for example physical address, pointer to scatter list or IOMMU mapping.
  831. */
  832. void *vb2_plane_cookie(struct vb2_buffer *vb, unsigned int plane_no)
  833. {
  834. if (plane_no >= vb->num_planes || !vb->planes[plane_no].mem_priv)
  835. return NULL;
  836. return call_ptr_memop(vb, cookie, vb->planes[plane_no].mem_priv);
  837. }
  838. EXPORT_SYMBOL_GPL(vb2_plane_cookie);
  839. /**
  840. * vb2_buffer_done() - inform videobuf that an operation on a buffer is finished
  841. * @vb: vb2_buffer returned from the driver
  842. * @state: either VB2_BUF_STATE_DONE if the operation finished successfully,
  843. * VB2_BUF_STATE_ERROR if the operation finished with an error or
  844. * VB2_BUF_STATE_QUEUED if the driver wants to requeue buffers.
  845. * If start_streaming fails then it should return buffers with state
  846. * VB2_BUF_STATE_QUEUED to put them back into the queue.
  847. *
  848. * This function should be called by the driver after a hardware operation on
  849. * a buffer is finished and the buffer may be returned to userspace. The driver
  850. * cannot use this buffer anymore until it is queued back to it by videobuf
  851. * by the means of buf_queue callback. Only buffers previously queued to the
  852. * driver by buf_queue can be passed to this function.
  853. *
  854. * While streaming a buffer can only be returned in state DONE or ERROR.
  855. * The start_streaming op can also return them in case the DMA engine cannot
  856. * be started for some reason. In that case the buffers should be returned with
  857. * state QUEUED.
  858. */
  859. void vb2_buffer_done(struct vb2_buffer *vb, enum vb2_buffer_state state)
  860. {
  861. struct vb2_queue *q = vb->vb2_queue;
  862. unsigned long flags;
  863. unsigned int plane;
  864. if (WARN_ON(vb->state != VB2_BUF_STATE_ACTIVE))
  865. return;
  866. if (WARN_ON(state != VB2_BUF_STATE_DONE &&
  867. state != VB2_BUF_STATE_ERROR &&
  868. state != VB2_BUF_STATE_QUEUED &&
  869. state != VB2_BUF_STATE_REQUEUEING))
  870. state = VB2_BUF_STATE_ERROR;
  871. #ifdef CONFIG_VIDEO_ADV_DEBUG
  872. /*
  873. * Although this is not a callback, it still does have to balance
  874. * with the buf_queue op. So update this counter manually.
  875. */
  876. vb->cnt_buf_done++;
  877. #endif
  878. dprintk(4, "done processing on buffer %d, state: %d\n",
  879. vb->index, state);
  880. /* sync buffers */
  881. for (plane = 0; plane < vb->num_planes; ++plane)
  882. call_void_memop(vb, finish, vb->planes[plane].mem_priv);
  883. spin_lock_irqsave(&q->done_lock, flags);
  884. if (state == VB2_BUF_STATE_QUEUED ||
  885. state == VB2_BUF_STATE_REQUEUEING) {
  886. vb->state = VB2_BUF_STATE_QUEUED;
  887. } else {
  888. /* Add the buffer to the done buffers list */
  889. list_add_tail(&vb->done_entry, &q->done_list);
  890. vb->state = state;
  891. }
  892. atomic_dec(&q->owned_by_drv_count);
  893. spin_unlock_irqrestore(&q->done_lock, flags);
  894. trace_vb2_buf_done(q, vb);
  895. switch (state) {
  896. case VB2_BUF_STATE_QUEUED:
  897. return;
  898. case VB2_BUF_STATE_REQUEUEING:
  899. if (q->start_streaming_called)
  900. __enqueue_in_driver(vb);
  901. return;
  902. default:
  903. /* Inform any processes that may be waiting for buffers */
  904. wake_up(&q->done_wq);
  905. break;
  906. }
  907. }
  908. EXPORT_SYMBOL_GPL(vb2_buffer_done);
  909. /**
  910. * vb2_discard_done() - discard all buffers marked as DONE
  911. * @q: videobuf2 queue
  912. *
  913. * This function is intended to be used with suspend/resume operations. It
  914. * discards all 'done' buffers as they would be too old to be requested after
  915. * resume.
  916. *
  917. * Drivers must stop the hardware and synchronize with interrupt handlers and/or
  918. * delayed works before calling this function to make sure no buffer will be
  919. * touched by the driver and/or hardware.
  920. */
  921. void vb2_discard_done(struct vb2_queue *q)
  922. {
  923. struct vb2_buffer *vb;
  924. unsigned long flags;
  925. spin_lock_irqsave(&q->done_lock, flags);
  926. list_for_each_entry(vb, &q->done_list, done_entry)
  927. vb->state = VB2_BUF_STATE_ERROR;
  928. spin_unlock_irqrestore(&q->done_lock, flags);
  929. }
  930. EXPORT_SYMBOL_GPL(vb2_discard_done);
  931. /**
  932. * __qbuf_mmap() - handle qbuf of an MMAP buffer
  933. */
  934. static int __qbuf_mmap(struct vb2_buffer *vb, const void *pb)
  935. {
  936. int ret = 0;
  937. if (pb)
  938. ret = call_bufop(vb->vb2_queue, fill_vb2_buffer,
  939. vb, pb, vb->planes);
  940. return ret ? ret : call_vb_qop(vb, buf_prepare, vb);
  941. }
  942. /**
  943. * __qbuf_userptr() - handle qbuf of a USERPTR buffer
  944. */
  945. static int __qbuf_userptr(struct vb2_buffer *vb, const void *pb)
  946. {
  947. struct vb2_plane planes[VB2_MAX_PLANES];
  948. struct vb2_queue *q = vb->vb2_queue;
  949. void *mem_priv;
  950. unsigned int plane;
  951. int ret = 0;
  952. enum dma_data_direction dma_dir =
  953. q->is_output ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
  954. bool reacquired = vb->planes[0].mem_priv == NULL;
  955. memset(planes, 0, sizeof(planes[0]) * vb->num_planes);
  956. /* Copy relevant information provided by the userspace */
  957. if (pb)
  958. ret = call_bufop(vb->vb2_queue, fill_vb2_buffer,
  959. vb, pb, planes);
  960. if (ret)
  961. return ret;
  962. for (plane = 0; plane < vb->num_planes; ++plane) {
  963. /* Skip the plane if already verified */
  964. if (vb->planes[plane].m.userptr &&
  965. vb->planes[plane].m.userptr == planes[plane].m.userptr
  966. && vb->planes[plane].length == planes[plane].length)
  967. continue;
  968. dprintk(3, "userspace address for plane %d changed, "
  969. "reacquiring memory\n", plane);
  970. /* Check if the provided plane buffer is large enough */
  971. if (planes[plane].length < vb->planes[plane].min_length) {
  972. dprintk(1, "provided buffer size %u is less than "
  973. "setup size %u for plane %d\n",
  974. planes[plane].length,
  975. vb->planes[plane].min_length,
  976. plane);
  977. ret = -EINVAL;
  978. goto err;
  979. }
  980. /* Release previously acquired memory if present */
  981. if (vb->planes[plane].mem_priv) {
  982. if (!reacquired) {
  983. reacquired = true;
  984. call_void_vb_qop(vb, buf_cleanup, vb);
  985. }
  986. call_void_memop(vb, put_userptr, vb->planes[plane].mem_priv);
  987. }
  988. vb->planes[plane].mem_priv = NULL;
  989. vb->planes[plane].bytesused = 0;
  990. vb->planes[plane].length = 0;
  991. vb->planes[plane].m.userptr = 0;
  992. vb->planes[plane].data_offset = 0;
  993. /* Acquire each plane's memory */
  994. mem_priv = call_ptr_memop(vb, get_userptr,
  995. q->alloc_devs[plane] ? : q->dev,
  996. planes[plane].m.userptr,
  997. planes[plane].length, dma_dir);
  998. if (IS_ERR_OR_NULL(mem_priv)) {
  999. dprintk(1, "failed acquiring userspace "
  1000. "memory for plane %d\n", plane);
  1001. ret = mem_priv ? PTR_ERR(mem_priv) : -EINVAL;
  1002. goto err;
  1003. }
  1004. vb->planes[plane].mem_priv = mem_priv;
  1005. }
  1006. /*
  1007. * Now that everything is in order, copy relevant information
  1008. * provided by userspace.
  1009. */
  1010. for (plane = 0; plane < vb->num_planes; ++plane) {
  1011. vb->planes[plane].bytesused = planes[plane].bytesused;
  1012. vb->planes[plane].length = planes[plane].length;
  1013. vb->planes[plane].m.userptr = planes[plane].m.userptr;
  1014. vb->planes[plane].data_offset = planes[plane].data_offset;
  1015. }
  1016. if (reacquired) {
  1017. /*
  1018. * One or more planes changed, so we must call buf_init to do
  1019. * the driver-specific initialization on the newly acquired
  1020. * buffer, if provided.
  1021. */
  1022. ret = call_vb_qop(vb, buf_init, vb);
  1023. if (ret) {
  1024. dprintk(1, "buffer initialization failed\n");
  1025. goto err;
  1026. }
  1027. }
  1028. ret = call_vb_qop(vb, buf_prepare, vb);
  1029. if (ret) {
  1030. dprintk(1, "buffer preparation failed\n");
  1031. call_void_vb_qop(vb, buf_cleanup, vb);
  1032. goto err;
  1033. }
  1034. return 0;
  1035. err:
  1036. /* In case of errors, release planes that were already acquired */
  1037. for (plane = 0; plane < vb->num_planes; ++plane) {
  1038. if (vb->planes[plane].mem_priv)
  1039. call_void_memop(vb, put_userptr,
  1040. vb->planes[plane].mem_priv);
  1041. vb->planes[plane].mem_priv = NULL;
  1042. vb->planes[plane].m.userptr = 0;
  1043. vb->planes[plane].length = 0;
  1044. }
  1045. return ret;
  1046. }
  1047. /**
  1048. * __qbuf_dmabuf() - handle qbuf of a DMABUF buffer
  1049. */
  1050. static int __qbuf_dmabuf(struct vb2_buffer *vb, const void *pb)
  1051. {
  1052. struct vb2_plane planes[VB2_MAX_PLANES];
  1053. struct vb2_queue *q = vb->vb2_queue;
  1054. void *mem_priv;
  1055. unsigned int plane;
  1056. int ret = 0;
  1057. enum dma_data_direction dma_dir =
  1058. q->is_output ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
  1059. bool reacquired = vb->planes[0].mem_priv == NULL;
  1060. memset(planes, 0, sizeof(planes[0]) * vb->num_planes);
  1061. /* Copy relevant information provided by the userspace */
  1062. if (pb)
  1063. ret = call_bufop(vb->vb2_queue, fill_vb2_buffer,
  1064. vb, pb, planes);
  1065. if (ret)
  1066. return ret;
  1067. for (plane = 0; plane < vb->num_planes; ++plane) {
  1068. struct dma_buf *dbuf = dma_buf_get(planes[plane].m.fd);
  1069. if (IS_ERR_OR_NULL(dbuf)) {
  1070. dprintk(1, "invalid dmabuf fd for plane %d\n",
  1071. plane);
  1072. ret = -EINVAL;
  1073. goto err;
  1074. }
  1075. /* use DMABUF size if length is not provided */
  1076. if (planes[plane].length == 0)
  1077. planes[plane].length = dbuf->size;
  1078. if (planes[plane].length < vb->planes[plane].min_length) {
  1079. dprintk(1, "invalid dmabuf length for plane %d\n",
  1080. plane);
  1081. dma_buf_put(dbuf);
  1082. ret = -EINVAL;
  1083. goto err;
  1084. }
  1085. /* Skip the plane if already verified */
  1086. if (dbuf == vb->planes[plane].dbuf &&
  1087. vb->planes[plane].length == planes[plane].length) {
  1088. dma_buf_put(dbuf);
  1089. continue;
  1090. }
  1091. dprintk(1, "buffer for plane %d changed\n", plane);
  1092. if (!reacquired) {
  1093. reacquired = true;
  1094. call_void_vb_qop(vb, buf_cleanup, vb);
  1095. }
  1096. /* Release previously acquired memory if present */
  1097. __vb2_plane_dmabuf_put(vb, &vb->planes[plane]);
  1098. vb->planes[plane].bytesused = 0;
  1099. vb->planes[plane].length = 0;
  1100. vb->planes[plane].m.fd = 0;
  1101. vb->planes[plane].data_offset = 0;
  1102. /* Acquire each plane's memory */
  1103. mem_priv = call_ptr_memop(vb, attach_dmabuf,
  1104. q->alloc_devs[plane] ? : q->dev,
  1105. dbuf, planes[plane].length, dma_dir);
  1106. if (IS_ERR(mem_priv)) {
  1107. dprintk(1, "failed to attach dmabuf\n");
  1108. ret = PTR_ERR(mem_priv);
  1109. dma_buf_put(dbuf);
  1110. goto err;
  1111. }
  1112. vb->planes[plane].dbuf = dbuf;
  1113. vb->planes[plane].mem_priv = mem_priv;
  1114. }
  1115. /* TODO: This pins the buffer(s) with dma_buf_map_attachment()).. but
  1116. * really we want to do this just before the DMA, not while queueing
  1117. * the buffer(s)..
  1118. */
  1119. for (plane = 0; plane < vb->num_planes; ++plane) {
  1120. ret = call_memop(vb, map_dmabuf, vb->planes[plane].mem_priv);
  1121. if (ret) {
  1122. dprintk(1, "failed to map dmabuf for plane %d\n",
  1123. plane);
  1124. goto err;
  1125. }
  1126. vb->planes[plane].dbuf_mapped = 1;
  1127. }
  1128. /*
  1129. * Now that everything is in order, copy relevant information
  1130. * provided by userspace.
  1131. */
  1132. for (plane = 0; plane < vb->num_planes; ++plane) {
  1133. vb->planes[plane].bytesused = planes[plane].bytesused;
  1134. vb->planes[plane].length = planes[plane].length;
  1135. vb->planes[plane].m.fd = planes[plane].m.fd;
  1136. vb->planes[plane].data_offset = planes[plane].data_offset;
  1137. }
  1138. if (reacquired) {
  1139. /*
  1140. * Call driver-specific initialization on the newly acquired buffer,
  1141. * if provided.
  1142. */
  1143. ret = call_vb_qop(vb, buf_init, vb);
  1144. if (ret) {
  1145. dprintk(1, "buffer initialization failed\n");
  1146. goto err;
  1147. }
  1148. }
  1149. ret = call_vb_qop(vb, buf_prepare, vb);
  1150. if (ret) {
  1151. dprintk(1, "buffer preparation failed\n");
  1152. call_void_vb_qop(vb, buf_cleanup, vb);
  1153. goto err;
  1154. }
  1155. return 0;
  1156. err:
  1157. /* In case of errors, release planes that were already acquired */
  1158. __vb2_buf_dmabuf_put(vb);
  1159. return ret;
  1160. }
  1161. /**
  1162. * __enqueue_in_driver() - enqueue a vb2_buffer in driver for processing
  1163. */
  1164. static void __enqueue_in_driver(struct vb2_buffer *vb)
  1165. {
  1166. struct vb2_queue *q = vb->vb2_queue;
  1167. unsigned int plane;
  1168. vb->state = VB2_BUF_STATE_ACTIVE;
  1169. atomic_inc(&q->owned_by_drv_count);
  1170. trace_vb2_buf_queue(q, vb);
  1171. /* sync buffers */
  1172. for (plane = 0; plane < vb->num_planes; ++plane)
  1173. call_void_memop(vb, prepare, vb->planes[plane].mem_priv);
  1174. call_void_vb_qop(vb, buf_queue, vb);
  1175. }
  1176. static int __buf_prepare(struct vb2_buffer *vb, const void *pb)
  1177. {
  1178. struct vb2_queue *q = vb->vb2_queue;
  1179. int ret;
  1180. if (q->error) {
  1181. dprintk(1, "fatal error occurred on queue\n");
  1182. return -EIO;
  1183. }
  1184. vb->state = VB2_BUF_STATE_PREPARING;
  1185. switch (q->memory) {
  1186. case VB2_MEMORY_MMAP:
  1187. ret = __qbuf_mmap(vb, pb);
  1188. break;
  1189. case VB2_MEMORY_USERPTR:
  1190. ret = __qbuf_userptr(vb, pb);
  1191. break;
  1192. case VB2_MEMORY_DMABUF:
  1193. ret = __qbuf_dmabuf(vb, pb);
  1194. break;
  1195. default:
  1196. WARN(1, "Invalid queue type\n");
  1197. ret = -EINVAL;
  1198. }
  1199. if (ret)
  1200. dprintk(1, "buffer preparation failed: %d\n", ret);
  1201. vb->state = ret ? VB2_BUF_STATE_DEQUEUED : VB2_BUF_STATE_PREPARED;
  1202. return ret;
  1203. }
  1204. /**
  1205. * vb2_core_prepare_buf() - Pass ownership of a buffer from userspace
  1206. * to the kernel
  1207. * @q: videobuf2 queue
  1208. * @index: id number of the buffer
  1209. * @pb: buffer structure passed from userspace to vidioc_prepare_buf
  1210. * handler in driver
  1211. *
  1212. * Should be called from vidioc_prepare_buf ioctl handler of a driver.
  1213. * The passed buffer should have been verified.
  1214. * This function calls buf_prepare callback in the driver (if provided),
  1215. * in which driver-specific buffer initialization can be performed,
  1216. *
  1217. * The return values from this function are intended to be directly returned
  1218. * from vidioc_prepare_buf handler in driver.
  1219. */
  1220. int vb2_core_prepare_buf(struct vb2_queue *q, unsigned int index, void *pb)
  1221. {
  1222. struct vb2_buffer *vb;
  1223. int ret;
  1224. vb = q->bufs[index];
  1225. if (vb->state != VB2_BUF_STATE_DEQUEUED) {
  1226. dprintk(1, "invalid buffer state %d\n",
  1227. vb->state);
  1228. return -EINVAL;
  1229. }
  1230. ret = __buf_prepare(vb, pb);
  1231. if (ret)
  1232. return ret;
  1233. /* Fill buffer information for the userspace */
  1234. call_void_bufop(q, fill_user_buffer, vb, pb);
  1235. dprintk(1, "prepare of buffer %d succeeded\n", vb->index);
  1236. return ret;
  1237. }
  1238. EXPORT_SYMBOL_GPL(vb2_core_prepare_buf);
  1239. /**
  1240. * vb2_start_streaming() - Attempt to start streaming.
  1241. * @q: videobuf2 queue
  1242. *
  1243. * Attempt to start streaming. When this function is called there must be
  1244. * at least q->min_buffers_needed buffers queued up (i.e. the minimum
  1245. * number of buffers required for the DMA engine to function). If the
  1246. * @start_streaming op fails it is supposed to return all the driver-owned
  1247. * buffers back to vb2 in state QUEUED. Check if that happened and if
  1248. * not warn and reclaim them forcefully.
  1249. */
  1250. static int vb2_start_streaming(struct vb2_queue *q)
  1251. {
  1252. struct vb2_buffer *vb;
  1253. int ret;
  1254. /*
  1255. * If any buffers were queued before streamon,
  1256. * we can now pass them to driver for processing.
  1257. */
  1258. list_for_each_entry(vb, &q->queued_list, queued_entry)
  1259. __enqueue_in_driver(vb);
  1260. /* Tell the driver to start streaming */
  1261. q->start_streaming_called = 1;
  1262. ret = call_qop(q, start_streaming, q,
  1263. atomic_read(&q->owned_by_drv_count));
  1264. if (!ret)
  1265. return 0;
  1266. q->start_streaming_called = 0;
  1267. dprintk(1, "driver refused to start streaming\n");
  1268. /*
  1269. * If you see this warning, then the driver isn't cleaning up properly
  1270. * after a failed start_streaming(). See the start_streaming()
  1271. * documentation in videobuf2-core.h for more information how buffers
  1272. * should be returned to vb2 in start_streaming().
  1273. */
  1274. if (WARN_ON(atomic_read(&q->owned_by_drv_count))) {
  1275. unsigned i;
  1276. /*
  1277. * Forcefully reclaim buffers if the driver did not
  1278. * correctly return them to vb2.
  1279. */
  1280. for (i = 0; i < q->num_buffers; ++i) {
  1281. vb = q->bufs[i];
  1282. if (vb->state == VB2_BUF_STATE_ACTIVE)
  1283. vb2_buffer_done(vb, VB2_BUF_STATE_QUEUED);
  1284. }
  1285. /* Must be zero now */
  1286. WARN_ON(atomic_read(&q->owned_by_drv_count));
  1287. }
  1288. /*
  1289. * If done_list is not empty, then start_streaming() didn't call
  1290. * vb2_buffer_done(vb, VB2_BUF_STATE_QUEUED) but STATE_ERROR or
  1291. * STATE_DONE.
  1292. */
  1293. WARN_ON(!list_empty(&q->done_list));
  1294. return ret;
  1295. }
  1296. /**
  1297. * vb2_core_qbuf() - Queue a buffer from userspace
  1298. * @q: videobuf2 queue
  1299. * @index: id number of the buffer
  1300. * @pb: buffer structure passed from userspace to vidioc_qbuf handler
  1301. * in driver
  1302. *
  1303. * Should be called from vidioc_qbuf ioctl handler of a driver.
  1304. * The passed buffer should have been verified.
  1305. * This function:
  1306. * 1) if necessary, calls buf_prepare callback in the driver (if provided), in
  1307. * which driver-specific buffer initialization can be performed,
  1308. * 2) if streaming is on, queues the buffer in driver by the means of buf_queue
  1309. * callback for processing.
  1310. *
  1311. * The return values from this function are intended to be directly returned
  1312. * from vidioc_qbuf handler in driver.
  1313. */
  1314. int vb2_core_qbuf(struct vb2_queue *q, unsigned int index, void *pb)
  1315. {
  1316. struct vb2_buffer *vb;
  1317. int ret;
  1318. vb = q->bufs[index];
  1319. switch (vb->state) {
  1320. case VB2_BUF_STATE_DEQUEUED:
  1321. ret = __buf_prepare(vb, pb);
  1322. if (ret)
  1323. return ret;
  1324. break;
  1325. case VB2_BUF_STATE_PREPARED:
  1326. break;
  1327. case VB2_BUF_STATE_PREPARING:
  1328. dprintk(1, "buffer still being prepared\n");
  1329. return -EINVAL;
  1330. default:
  1331. dprintk(1, "invalid buffer state %d\n", vb->state);
  1332. return -EINVAL;
  1333. }
  1334. /*
  1335. * Add to the queued buffers list, a buffer will stay on it until
  1336. * dequeued in dqbuf.
  1337. */
  1338. list_add_tail(&vb->queued_entry, &q->queued_list);
  1339. q->queued_count++;
  1340. q->waiting_for_buffers = false;
  1341. vb->state = VB2_BUF_STATE_QUEUED;
  1342. if (pb)
  1343. call_void_bufop(q, copy_timestamp, vb, pb);
  1344. trace_vb2_qbuf(q, vb);
  1345. /*
  1346. * If already streaming, give the buffer to driver for processing.
  1347. * If not, the buffer will be given to driver on next streamon.
  1348. */
  1349. if (q->start_streaming_called)
  1350. __enqueue_in_driver(vb);
  1351. /* Fill buffer information for the userspace */
  1352. if (pb)
  1353. call_void_bufop(q, fill_user_buffer, vb, pb);
  1354. /*
  1355. * If streamon has been called, and we haven't yet called
  1356. * start_streaming() since not enough buffers were queued, and
  1357. * we now have reached the minimum number of queued buffers,
  1358. * then we can finally call start_streaming().
  1359. */
  1360. if (q->streaming && !q->start_streaming_called &&
  1361. q->queued_count >= q->min_buffers_needed) {
  1362. ret = vb2_start_streaming(q);
  1363. if (ret)
  1364. return ret;
  1365. }
  1366. dprintk(1, "qbuf of buffer %d succeeded\n", vb->index);
  1367. return 0;
  1368. }
  1369. EXPORT_SYMBOL_GPL(vb2_core_qbuf);
  1370. /**
  1371. * __vb2_wait_for_done_vb() - wait for a buffer to become available
  1372. * for dequeuing
  1373. *
  1374. * Will sleep if required for nonblocking == false.
  1375. */
  1376. static int __vb2_wait_for_done_vb(struct vb2_queue *q, int nonblocking)
  1377. {
  1378. /*
  1379. * All operations on vb_done_list are performed under done_lock
  1380. * spinlock protection. However, buffers may be removed from
  1381. * it and returned to userspace only while holding both driver's
  1382. * lock and the done_lock spinlock. Thus we can be sure that as
  1383. * long as we hold the driver's lock, the list will remain not
  1384. * empty if list_empty() check succeeds.
  1385. */
  1386. for (;;) {
  1387. int ret;
  1388. if (!q->streaming) {
  1389. dprintk(1, "streaming off, will not wait for buffers\n");
  1390. return -EINVAL;
  1391. }
  1392. if (q->error) {
  1393. dprintk(1, "Queue in error state, will not wait for buffers\n");
  1394. return -EIO;
  1395. }
  1396. if (q->last_buffer_dequeued) {
  1397. dprintk(3, "last buffer dequeued already, will not wait for buffers\n");
  1398. return -EPIPE;
  1399. }
  1400. if (!list_empty(&q->done_list)) {
  1401. /*
  1402. * Found a buffer that we were waiting for.
  1403. */
  1404. break;
  1405. }
  1406. if (nonblocking) {
  1407. dprintk(1, "nonblocking and no buffers to dequeue, "
  1408. "will not wait\n");
  1409. return -EAGAIN;
  1410. }
  1411. /*
  1412. * We are streaming and blocking, wait for another buffer to
  1413. * become ready or for streamoff. Driver's lock is released to
  1414. * allow streamoff or qbuf to be called while waiting.
  1415. */
  1416. call_void_qop(q, wait_prepare, q);
  1417. /*
  1418. * All locks have been released, it is safe to sleep now.
  1419. */
  1420. dprintk(3, "will sleep waiting for buffers\n");
  1421. ret = wait_event_interruptible(q->done_wq,
  1422. !list_empty(&q->done_list) || !q->streaming ||
  1423. q->error);
  1424. /*
  1425. * We need to reevaluate both conditions again after reacquiring
  1426. * the locks or return an error if one occurred.
  1427. */
  1428. call_void_qop(q, wait_finish, q);
  1429. if (ret) {
  1430. dprintk(1, "sleep was interrupted\n");
  1431. return ret;
  1432. }
  1433. }
  1434. return 0;
  1435. }
  1436. /**
  1437. * __vb2_get_done_vb() - get a buffer ready for dequeuing
  1438. *
  1439. * Will sleep if required for nonblocking == false.
  1440. */
  1441. static int __vb2_get_done_vb(struct vb2_queue *q, struct vb2_buffer **vb,
  1442. void *pb, int nonblocking)
  1443. {
  1444. unsigned long flags;
  1445. int ret = 0;
  1446. /*
  1447. * Wait for at least one buffer to become available on the done_list.
  1448. */
  1449. ret = __vb2_wait_for_done_vb(q, nonblocking);
  1450. if (ret)
  1451. return ret;
  1452. /*
  1453. * Driver's lock has been held since we last verified that done_list
  1454. * is not empty, so no need for another list_empty(done_list) check.
  1455. */
  1456. spin_lock_irqsave(&q->done_lock, flags);
  1457. *vb = list_first_entry(&q->done_list, struct vb2_buffer, done_entry);
  1458. /*
  1459. * Only remove the buffer from done_list if all planes can be
  1460. * handled. Some cases such as V4L2 file I/O and DVB have pb
  1461. * == NULL; skip the check then as there's nothing to verify.
  1462. */
  1463. if (pb)
  1464. ret = call_bufop(q, verify_planes_array, *vb, pb);
  1465. if (!ret)
  1466. list_del(&(*vb)->done_entry);
  1467. spin_unlock_irqrestore(&q->done_lock, flags);
  1468. return ret;
  1469. }
  1470. /**
  1471. * vb2_wait_for_all_buffers() - wait until all buffers are given back to vb2
  1472. * @q: videobuf2 queue
  1473. *
  1474. * This function will wait until all buffers that have been given to the driver
  1475. * by buf_queue() are given back to vb2 with vb2_buffer_done(). It doesn't call
  1476. * wait_prepare, wait_finish pair. It is intended to be called with all locks
  1477. * taken, for example from stop_streaming() callback.
  1478. */
  1479. int vb2_wait_for_all_buffers(struct vb2_queue *q)
  1480. {
  1481. if (!q->streaming) {
  1482. dprintk(1, "streaming off, will not wait for buffers\n");
  1483. return -EINVAL;
  1484. }
  1485. if (q->start_streaming_called)
  1486. wait_event(q->done_wq, !atomic_read(&q->owned_by_drv_count));
  1487. return 0;
  1488. }
  1489. EXPORT_SYMBOL_GPL(vb2_wait_for_all_buffers);
  1490. /**
  1491. * __vb2_dqbuf() - bring back the buffer to the DEQUEUED state
  1492. */
  1493. static void __vb2_dqbuf(struct vb2_buffer *vb)
  1494. {
  1495. struct vb2_queue *q = vb->vb2_queue;
  1496. unsigned int i;
  1497. /* nothing to do if the buffer is already dequeued */
  1498. if (vb->state == VB2_BUF_STATE_DEQUEUED)
  1499. return;
  1500. vb->state = VB2_BUF_STATE_DEQUEUED;
  1501. /* unmap DMABUF buffer */
  1502. if (q->memory == VB2_MEMORY_DMABUF)
  1503. for (i = 0; i < vb->num_planes; ++i) {
  1504. if (!vb->planes[i].dbuf_mapped)
  1505. continue;
  1506. call_void_memop(vb, unmap_dmabuf, vb->planes[i].mem_priv);
  1507. vb->planes[i].dbuf_mapped = 0;
  1508. }
  1509. }
  1510. /**
  1511. * vb2_dqbuf() - Dequeue a buffer to the userspace
  1512. * @q: videobuf2 queue
  1513. * @pb: buffer structure passed from userspace to vidioc_dqbuf handler
  1514. * in driver
  1515. * @nonblocking: if true, this call will not sleep waiting for a buffer if no
  1516. * buffers ready for dequeuing are present. Normally the driver
  1517. * would be passing (file->f_flags & O_NONBLOCK) here
  1518. *
  1519. * Should be called from vidioc_dqbuf ioctl handler of a driver.
  1520. * The passed buffer should have been verified.
  1521. * This function:
  1522. * 1) calls buf_finish callback in the driver (if provided), in which
  1523. * driver can perform any additional operations that may be required before
  1524. * returning the buffer to userspace, such as cache sync,
  1525. * 2) the buffer struct members are filled with relevant information for
  1526. * the userspace.
  1527. *
  1528. * The return values from this function are intended to be directly returned
  1529. * from vidioc_dqbuf handler in driver.
  1530. */
  1531. int vb2_core_dqbuf(struct vb2_queue *q, unsigned int *pindex, void *pb,
  1532. bool nonblocking)
  1533. {
  1534. struct vb2_buffer *vb = NULL;
  1535. int ret;
  1536. ret = __vb2_get_done_vb(q, &vb, pb, nonblocking);
  1537. if (ret < 0)
  1538. return ret;
  1539. switch (vb->state) {
  1540. case VB2_BUF_STATE_DONE:
  1541. dprintk(3, "returning done buffer\n");
  1542. break;
  1543. case VB2_BUF_STATE_ERROR:
  1544. dprintk(3, "returning done buffer with errors\n");
  1545. break;
  1546. default:
  1547. dprintk(1, "invalid buffer state\n");
  1548. return -EINVAL;
  1549. }
  1550. call_void_vb_qop(vb, buf_finish, vb);
  1551. if (pindex)
  1552. *pindex = vb->index;
  1553. /* Fill buffer information for the userspace */
  1554. if (pb)
  1555. call_void_bufop(q, fill_user_buffer, vb, pb);
  1556. /* Remove from videobuf queue */
  1557. list_del(&vb->queued_entry);
  1558. q->queued_count--;
  1559. trace_vb2_dqbuf(q, vb);
  1560. /* go back to dequeued state */
  1561. __vb2_dqbuf(vb);
  1562. dprintk(1, "dqbuf of buffer %d, with state %d\n",
  1563. vb->index, vb->state);
  1564. return 0;
  1565. }
  1566. EXPORT_SYMBOL_GPL(vb2_core_dqbuf);
  1567. /**
  1568. * __vb2_queue_cancel() - cancel and stop (pause) streaming
  1569. *
  1570. * Removes all queued buffers from driver's queue and all buffers queued by
  1571. * userspace from videobuf's queue. Returns to state after reqbufs.
  1572. */
  1573. static void __vb2_queue_cancel(struct vb2_queue *q)
  1574. {
  1575. unsigned int i;
  1576. /*
  1577. * Tell driver to stop all transactions and release all queued
  1578. * buffers.
  1579. */
  1580. if (q->start_streaming_called)
  1581. call_void_qop(q, stop_streaming, q);
  1582. /*
  1583. * If you see this warning, then the driver isn't cleaning up properly
  1584. * in stop_streaming(). See the stop_streaming() documentation in
  1585. * videobuf2-core.h for more information how buffers should be returned
  1586. * to vb2 in stop_streaming().
  1587. */
  1588. if (WARN_ON(atomic_read(&q->owned_by_drv_count))) {
  1589. for (i = 0; i < q->num_buffers; ++i)
  1590. if (q->bufs[i]->state == VB2_BUF_STATE_ACTIVE)
  1591. vb2_buffer_done(q->bufs[i], VB2_BUF_STATE_ERROR);
  1592. /* Must be zero now */
  1593. WARN_ON(atomic_read(&q->owned_by_drv_count));
  1594. }
  1595. q->streaming = 0;
  1596. q->start_streaming_called = 0;
  1597. q->queued_count = 0;
  1598. q->error = 0;
  1599. /*
  1600. * Remove all buffers from videobuf's list...
  1601. */
  1602. INIT_LIST_HEAD(&q->queued_list);
  1603. /*
  1604. * ...and done list; userspace will not receive any buffers it
  1605. * has not already dequeued before initiating cancel.
  1606. */
  1607. INIT_LIST_HEAD(&q->done_list);
  1608. atomic_set(&q->owned_by_drv_count, 0);
  1609. wake_up_all(&q->done_wq);
  1610. /*
  1611. * Reinitialize all buffers for next use.
  1612. * Make sure to call buf_finish for any queued buffers. Normally
  1613. * that's done in dqbuf, but that's not going to happen when we
  1614. * cancel the whole queue. Note: this code belongs here, not in
  1615. * __vb2_dqbuf() since in vb2_core_dqbuf() there is a critical
  1616. * call to __fill_user_buffer() after buf_finish(). That order can't
  1617. * be changed, so we can't move the buf_finish() to __vb2_dqbuf().
  1618. */
  1619. for (i = 0; i < q->num_buffers; ++i) {
  1620. struct vb2_buffer *vb = q->bufs[i];
  1621. if (vb->state != VB2_BUF_STATE_DEQUEUED) {
  1622. vb->state = VB2_BUF_STATE_PREPARED;
  1623. call_void_vb_qop(vb, buf_finish, vb);
  1624. }
  1625. __vb2_dqbuf(vb);
  1626. }
  1627. }
  1628. int vb2_core_streamon(struct vb2_queue *q, unsigned int type)
  1629. {
  1630. int ret;
  1631. if (type != q->type) {
  1632. dprintk(1, "invalid stream type\n");
  1633. return -EINVAL;
  1634. }
  1635. if (q->streaming) {
  1636. dprintk(3, "already streaming\n");
  1637. return 0;
  1638. }
  1639. if (!q->num_buffers) {
  1640. dprintk(1, "no buffers have been allocated\n");
  1641. return -EINVAL;
  1642. }
  1643. if (q->num_buffers < q->min_buffers_needed) {
  1644. dprintk(1, "need at least %u allocated buffers\n",
  1645. q->min_buffers_needed);
  1646. return -EINVAL;
  1647. }
  1648. /*
  1649. * Tell driver to start streaming provided sufficient buffers
  1650. * are available.
  1651. */
  1652. if (q->queued_count >= q->min_buffers_needed) {
  1653. ret = v4l_vb2q_enable_media_source(q);
  1654. if (ret)
  1655. return ret;
  1656. ret = vb2_start_streaming(q);
  1657. if (ret) {
  1658. __vb2_queue_cancel(q);
  1659. return ret;
  1660. }
  1661. }
  1662. q->streaming = 1;
  1663. dprintk(3, "successful\n");
  1664. return 0;
  1665. }
  1666. EXPORT_SYMBOL_GPL(vb2_core_streamon);
  1667. /**
  1668. * vb2_queue_error() - signal a fatal error on the queue
  1669. * @q: videobuf2 queue
  1670. *
  1671. * Flag that a fatal unrecoverable error has occurred and wake up all processes
  1672. * waiting on the queue. Polling will now set POLLERR and queuing and dequeuing
  1673. * buffers will return -EIO.
  1674. *
  1675. * The error flag will be cleared when cancelling the queue, either from
  1676. * vb2_streamoff or vb2_queue_release. Drivers should thus not call this
  1677. * function before starting the stream, otherwise the error flag will remain set
  1678. * until the queue is released when closing the device node.
  1679. */
  1680. void vb2_queue_error(struct vb2_queue *q)
  1681. {
  1682. q->error = 1;
  1683. wake_up_all(&q->done_wq);
  1684. }
  1685. EXPORT_SYMBOL_GPL(vb2_queue_error);
  1686. int vb2_core_streamoff(struct vb2_queue *q, unsigned int type)
  1687. {
  1688. if (type != q->type) {
  1689. dprintk(1, "invalid stream type\n");
  1690. return -EINVAL;
  1691. }
  1692. /*
  1693. * Cancel will pause streaming and remove all buffers from the driver
  1694. * and videobuf, effectively returning control over them to userspace.
  1695. *
  1696. * Note that we do this even if q->streaming == 0: if you prepare or
  1697. * queue buffers, and then call streamoff without ever having called
  1698. * streamon, you would still expect those buffers to be returned to
  1699. * their normal dequeued state.
  1700. */
  1701. __vb2_queue_cancel(q);
  1702. q->waiting_for_buffers = !q->is_output;
  1703. q->last_buffer_dequeued = false;
  1704. dprintk(3, "successful\n");
  1705. return 0;
  1706. }
  1707. EXPORT_SYMBOL_GPL(vb2_core_streamoff);
  1708. /**
  1709. * __find_plane_by_offset() - find plane associated with the given offset off
  1710. */
  1711. static int __find_plane_by_offset(struct vb2_queue *q, unsigned long off,
  1712. unsigned int *_buffer, unsigned int *_plane)
  1713. {
  1714. struct vb2_buffer *vb;
  1715. unsigned int buffer, plane;
  1716. /*
  1717. * Go over all buffers and their planes, comparing the given offset
  1718. * with an offset assigned to each plane. If a match is found,
  1719. * return its buffer and plane numbers.
  1720. */
  1721. for (buffer = 0; buffer < q->num_buffers; ++buffer) {
  1722. vb = q->bufs[buffer];
  1723. for (plane = 0; plane < vb->num_planes; ++plane) {
  1724. if (vb->planes[plane].m.offset == off) {
  1725. *_buffer = buffer;
  1726. *_plane = plane;
  1727. return 0;
  1728. }
  1729. }
  1730. }
  1731. return -EINVAL;
  1732. }
  1733. /**
  1734. * vb2_core_expbuf() - Export a buffer as a file descriptor
  1735. * @q: videobuf2 queue
  1736. * @fd: file descriptor associated with DMABUF (set by driver) *
  1737. * @type: buffer type
  1738. * @index: id number of the buffer
  1739. * @plane: index of the plane to be exported, 0 for single plane queues
  1740. * @flags: flags for newly created file, currently only O_CLOEXEC is
  1741. * supported, refer to manual of open syscall for more details
  1742. *
  1743. * The return values from this function are intended to be directly returned
  1744. * from vidioc_expbuf handler in driver.
  1745. */
  1746. int vb2_core_expbuf(struct vb2_queue *q, int *fd, unsigned int type,
  1747. unsigned int index, unsigned int plane, unsigned int flags)
  1748. {
  1749. struct vb2_buffer *vb = NULL;
  1750. struct vb2_plane *vb_plane;
  1751. int ret;
  1752. struct dma_buf *dbuf;
  1753. if (q->memory != VB2_MEMORY_MMAP) {
  1754. dprintk(1, "queue is not currently set up for mmap\n");
  1755. return -EINVAL;
  1756. }
  1757. if (!q->mem_ops->get_dmabuf) {
  1758. dprintk(1, "queue does not support DMA buffer exporting\n");
  1759. return -EINVAL;
  1760. }
  1761. if (flags & ~(O_CLOEXEC | O_ACCMODE)) {
  1762. dprintk(1, "queue does support only O_CLOEXEC and access mode flags\n");
  1763. return -EINVAL;
  1764. }
  1765. if (type != q->type) {
  1766. dprintk(1, "invalid buffer type\n");
  1767. return -EINVAL;
  1768. }
  1769. if (index >= q->num_buffers) {
  1770. dprintk(1, "buffer index out of range\n");
  1771. return -EINVAL;
  1772. }
  1773. vb = q->bufs[index];
  1774. if (plane >= vb->num_planes) {
  1775. dprintk(1, "buffer plane out of range\n");
  1776. return -EINVAL;
  1777. }
  1778. if (vb2_fileio_is_active(q)) {
  1779. dprintk(1, "expbuf: file io in progress\n");
  1780. return -EBUSY;
  1781. }
  1782. vb_plane = &vb->planes[plane];
  1783. dbuf = call_ptr_memop(vb, get_dmabuf, vb_plane->mem_priv,
  1784. flags & O_ACCMODE);
  1785. if (IS_ERR_OR_NULL(dbuf)) {
  1786. dprintk(1, "failed to export buffer %d, plane %d\n",
  1787. index, plane);
  1788. return -EINVAL;
  1789. }
  1790. ret = dma_buf_fd(dbuf, flags & ~O_ACCMODE);
  1791. if (ret < 0) {
  1792. dprintk(3, "buffer %d, plane %d failed to export (%d)\n",
  1793. index, plane, ret);
  1794. dma_buf_put(dbuf);
  1795. return ret;
  1796. }
  1797. dprintk(3, "buffer %d, plane %d exported as %d descriptor\n",
  1798. index, plane, ret);
  1799. *fd = ret;
  1800. return 0;
  1801. }
  1802. EXPORT_SYMBOL_GPL(vb2_core_expbuf);
  1803. /**
  1804. * vb2_mmap() - map video buffers into application address space
  1805. * @q: videobuf2 queue
  1806. * @vma: vma passed to the mmap file operation handler in the driver
  1807. *
  1808. * Should be called from mmap file operation handler of a driver.
  1809. * This function maps one plane of one of the available video buffers to
  1810. * userspace. To map whole video memory allocated on reqbufs, this function
  1811. * has to be called once per each plane per each buffer previously allocated.
  1812. *
  1813. * When the userspace application calls mmap, it passes to it an offset returned
  1814. * to it earlier by the means of vidioc_querybuf handler. That offset acts as
  1815. * a "cookie", which is then used to identify the plane to be mapped.
  1816. * This function finds a plane with a matching offset and a mapping is performed
  1817. * by the means of a provided memory operation.
  1818. *
  1819. * The return values from this function are intended to be directly returned
  1820. * from the mmap handler in driver.
  1821. */
  1822. int vb2_mmap(struct vb2_queue *q, struct vm_area_struct *vma)
  1823. {
  1824. unsigned long off = vma->vm_pgoff << PAGE_SHIFT;
  1825. struct vb2_buffer *vb;
  1826. unsigned int buffer = 0, plane = 0;
  1827. int ret;
  1828. unsigned long length;
  1829. if (q->memory != VB2_MEMORY_MMAP) {
  1830. dprintk(1, "queue is not currently set up for mmap\n");
  1831. return -EINVAL;
  1832. }
  1833. /*
  1834. * Check memory area access mode.
  1835. */
  1836. if (!(vma->vm_flags & VM_SHARED)) {
  1837. dprintk(1, "invalid vma flags, VM_SHARED needed\n");
  1838. return -EINVAL;
  1839. }
  1840. if (q->is_output) {
  1841. if (!(vma->vm_flags & VM_WRITE)) {
  1842. dprintk(1, "invalid vma flags, VM_WRITE needed\n");
  1843. return -EINVAL;
  1844. }
  1845. } else {
  1846. if (!(vma->vm_flags & VM_READ)) {
  1847. dprintk(1, "invalid vma flags, VM_READ needed\n");
  1848. return -EINVAL;
  1849. }
  1850. }
  1851. if (vb2_fileio_is_active(q)) {
  1852. dprintk(1, "mmap: file io in progress\n");
  1853. return -EBUSY;
  1854. }
  1855. /*
  1856. * Find the plane corresponding to the offset passed by userspace.
  1857. */
  1858. ret = __find_plane_by_offset(q, off, &buffer, &plane);
  1859. if (ret)
  1860. return ret;
  1861. vb = q->bufs[buffer];
  1862. /*
  1863. * MMAP requires page_aligned buffers.
  1864. * The buffer length was page_aligned at __vb2_buf_mem_alloc(),
  1865. * so, we need to do the same here.
  1866. */
  1867. length = PAGE_ALIGN(vb->planes[plane].length);
  1868. if (length < (vma->vm_end - vma->vm_start)) {
  1869. dprintk(1,
  1870. "MMAP invalid, as it would overflow buffer length\n");
  1871. return -EINVAL;
  1872. }
  1873. mutex_lock(&q->mmap_lock);
  1874. ret = call_memop(vb, mmap, vb->planes[plane].mem_priv, vma);
  1875. mutex_unlock(&q->mmap_lock);
  1876. if (ret)
  1877. return ret;
  1878. dprintk(3, "buffer %d, plane %d successfully mapped\n", buffer, plane);
  1879. return 0;
  1880. }
  1881. EXPORT_SYMBOL_GPL(vb2_mmap);
  1882. #ifndef CONFIG_MMU
  1883. unsigned long vb2_get_unmapped_area(struct vb2_queue *q,
  1884. unsigned long addr,
  1885. unsigned long len,
  1886. unsigned long pgoff,
  1887. unsigned long flags)
  1888. {
  1889. unsigned long off = pgoff << PAGE_SHIFT;
  1890. struct vb2_buffer *vb;
  1891. unsigned int buffer, plane;
  1892. void *vaddr;
  1893. int ret;
  1894. if (q->memory != VB2_MEMORY_MMAP) {
  1895. dprintk(1, "queue is not currently set up for mmap\n");
  1896. return -EINVAL;
  1897. }
  1898. /*
  1899. * Find the plane corresponding to the offset passed by userspace.
  1900. */
  1901. ret = __find_plane_by_offset(q, off, &buffer, &plane);
  1902. if (ret)
  1903. return ret;
  1904. vb = q->bufs[buffer];
  1905. vaddr = vb2_plane_vaddr(vb, plane);
  1906. return vaddr ? (unsigned long)vaddr : -EINVAL;
  1907. }
  1908. EXPORT_SYMBOL_GPL(vb2_get_unmapped_area);
  1909. #endif
  1910. /**
  1911. * vb2_core_queue_init() - initialize a videobuf2 queue
  1912. * @q: videobuf2 queue; this structure should be allocated in driver
  1913. *
  1914. * The vb2_queue structure should be allocated by the driver. The driver is
  1915. * responsible of clearing it's content and setting initial values for some
  1916. * required entries before calling this function.
  1917. * q->ops, q->mem_ops, q->type and q->io_modes are mandatory. Please refer
  1918. * to the struct vb2_queue description in include/media/videobuf2-core.h
  1919. * for more information.
  1920. */
  1921. int vb2_core_queue_init(struct vb2_queue *q)
  1922. {
  1923. /*
  1924. * Sanity check
  1925. */
  1926. if (WARN_ON(!q) ||
  1927. WARN_ON(!q->ops) ||
  1928. WARN_ON(!q->mem_ops) ||
  1929. WARN_ON(!q->type) ||
  1930. WARN_ON(!q->io_modes) ||
  1931. WARN_ON(!q->ops->queue_setup) ||
  1932. WARN_ON(!q->ops->buf_queue))
  1933. return -EINVAL;
  1934. INIT_LIST_HEAD(&q->queued_list);
  1935. INIT_LIST_HEAD(&q->done_list);
  1936. spin_lock_init(&q->done_lock);
  1937. mutex_init(&q->mmap_lock);
  1938. init_waitqueue_head(&q->done_wq);
  1939. if (q->buf_struct_size == 0)
  1940. q->buf_struct_size = sizeof(struct vb2_buffer);
  1941. return 0;
  1942. }
  1943. EXPORT_SYMBOL_GPL(vb2_core_queue_init);
  1944. static int __vb2_init_fileio(struct vb2_queue *q, int read);
  1945. static int __vb2_cleanup_fileio(struct vb2_queue *q);
  1946. /**
  1947. * vb2_core_queue_release() - stop streaming, release the queue and free memory
  1948. * @q: videobuf2 queue
  1949. *
  1950. * This function stops streaming and performs necessary clean ups, including
  1951. * freeing video buffer memory. The driver is responsible for freeing
  1952. * the vb2_queue structure itself.
  1953. */
  1954. void vb2_core_queue_release(struct vb2_queue *q)
  1955. {
  1956. __vb2_cleanup_fileio(q);
  1957. __vb2_queue_cancel(q);
  1958. mutex_lock(&q->mmap_lock);
  1959. __vb2_queue_free(q, q->num_buffers);
  1960. mutex_unlock(&q->mmap_lock);
  1961. }
  1962. EXPORT_SYMBOL_GPL(vb2_core_queue_release);
  1963. /**
  1964. * vb2_core_poll() - implements poll userspace operation
  1965. * @q: videobuf2 queue
  1966. * @file: file argument passed to the poll file operation handler
  1967. * @wait: wait argument passed to the poll file operation handler
  1968. *
  1969. * This function implements poll file operation handler for a driver.
  1970. * For CAPTURE queues, if a buffer is ready to be dequeued, the userspace will
  1971. * be informed that the file descriptor of a video device is available for
  1972. * reading.
  1973. * For OUTPUT queues, if a buffer is ready to be dequeued, the file descriptor
  1974. * will be reported as available for writing.
  1975. *
  1976. * The return values from this function are intended to be directly returned
  1977. * from poll handler in driver.
  1978. */
  1979. unsigned int vb2_core_poll(struct vb2_queue *q, struct file *file,
  1980. poll_table *wait)
  1981. {
  1982. unsigned long req_events = poll_requested_events(wait);
  1983. struct vb2_buffer *vb = NULL;
  1984. unsigned long flags;
  1985. if (!q->is_output && !(req_events & (POLLIN | POLLRDNORM)))
  1986. return 0;
  1987. if (q->is_output && !(req_events & (POLLOUT | POLLWRNORM)))
  1988. return 0;
  1989. /*
  1990. * Start file I/O emulator only if streaming API has not been used yet.
  1991. */
  1992. if (q->num_buffers == 0 && !vb2_fileio_is_active(q)) {
  1993. if (!q->is_output && (q->io_modes & VB2_READ) &&
  1994. (req_events & (POLLIN | POLLRDNORM))) {
  1995. if (__vb2_init_fileio(q, 1))
  1996. return POLLERR;
  1997. }
  1998. if (q->is_output && (q->io_modes & VB2_WRITE) &&
  1999. (req_events & (POLLOUT | POLLWRNORM))) {
  2000. if (__vb2_init_fileio(q, 0))
  2001. return POLLERR;
  2002. /*
  2003. * Write to OUTPUT queue can be done immediately.
  2004. */
  2005. return POLLOUT | POLLWRNORM;
  2006. }
  2007. }
  2008. /*
  2009. * There is nothing to wait for if the queue isn't streaming, or if the
  2010. * error flag is set.
  2011. */
  2012. if (!vb2_is_streaming(q) || q->error)
  2013. return POLLERR;
  2014. /*
  2015. * If this quirk is set and QBUF hasn't been called yet then
  2016. * return POLLERR as well. This only affects capture queues, output
  2017. * queues will always initialize waiting_for_buffers to false.
  2018. * This quirk is set by V4L2 for backwards compatibility reasons.
  2019. */
  2020. if (q->quirk_poll_must_check_waiting_for_buffers &&
  2021. q->waiting_for_buffers && (req_events & (POLLIN | POLLRDNORM)))
  2022. return POLLERR;
  2023. /*
  2024. * For output streams you can call write() as long as there are fewer
  2025. * buffers queued than there are buffers available.
  2026. */
  2027. if (q->is_output && q->fileio && q->queued_count < q->num_buffers)
  2028. return POLLOUT | POLLWRNORM;
  2029. if (list_empty(&q->done_list)) {
  2030. /*
  2031. * If the last buffer was dequeued from a capture queue,
  2032. * return immediately. DQBUF will return -EPIPE.
  2033. */
  2034. if (q->last_buffer_dequeued)
  2035. return POLLIN | POLLRDNORM;
  2036. poll_wait(file, &q->done_wq, wait);
  2037. }
  2038. /*
  2039. * Take first buffer available for dequeuing.
  2040. */
  2041. spin_lock_irqsave(&q->done_lock, flags);
  2042. if (!list_empty(&q->done_list))
  2043. vb = list_first_entry(&q->done_list, struct vb2_buffer,
  2044. done_entry);
  2045. spin_unlock_irqrestore(&q->done_lock, flags);
  2046. if (vb && (vb->state == VB2_BUF_STATE_DONE
  2047. || vb->state == VB2_BUF_STATE_ERROR)) {
  2048. return (q->is_output) ?
  2049. POLLOUT | POLLWRNORM :
  2050. POLLIN | POLLRDNORM;
  2051. }
  2052. return 0;
  2053. }
  2054. EXPORT_SYMBOL_GPL(vb2_core_poll);
  2055. /**
  2056. * struct vb2_fileio_buf - buffer context used by file io emulator
  2057. *
  2058. * vb2 provides a compatibility layer and emulator of file io (read and
  2059. * write) calls on top of streaming API. This structure is used for
  2060. * tracking context related to the buffers.
  2061. */
  2062. struct vb2_fileio_buf {
  2063. void *vaddr;
  2064. unsigned int size;
  2065. unsigned int pos;
  2066. unsigned int queued:1;
  2067. };
  2068. /**
  2069. * struct vb2_fileio_data - queue context used by file io emulator
  2070. *
  2071. * @cur_index: the index of the buffer currently being read from or
  2072. * written to. If equal to q->num_buffers then a new buffer
  2073. * must be dequeued.
  2074. * @initial_index: in the read() case all buffers are queued up immediately
  2075. * in __vb2_init_fileio() and __vb2_perform_fileio() just cycles
  2076. * buffers. However, in the write() case no buffers are initially
  2077. * queued, instead whenever a buffer is full it is queued up by
  2078. * __vb2_perform_fileio(). Only once all available buffers have
  2079. * been queued up will __vb2_perform_fileio() start to dequeue
  2080. * buffers. This means that initially __vb2_perform_fileio()
  2081. * needs to know what buffer index to use when it is queuing up
  2082. * the buffers for the first time. That initial index is stored
  2083. * in this field. Once it is equal to q->num_buffers all
  2084. * available buffers have been queued and __vb2_perform_fileio()
  2085. * should start the normal dequeue/queue cycle.
  2086. *
  2087. * vb2 provides a compatibility layer and emulator of file io (read and
  2088. * write) calls on top of streaming API. For proper operation it required
  2089. * this structure to save the driver state between each call of the read
  2090. * or write function.
  2091. */
  2092. struct vb2_fileio_data {
  2093. unsigned int count;
  2094. unsigned int type;
  2095. unsigned int memory;
  2096. struct vb2_fileio_buf bufs[VB2_MAX_FRAME];
  2097. unsigned int cur_index;
  2098. unsigned int initial_index;
  2099. unsigned int q_count;
  2100. unsigned int dq_count;
  2101. unsigned read_once:1;
  2102. unsigned write_immediately:1;
  2103. };
  2104. /**
  2105. * __vb2_init_fileio() - initialize file io emulator
  2106. * @q: videobuf2 queue
  2107. * @read: mode selector (1 means read, 0 means write)
  2108. */
  2109. static int __vb2_init_fileio(struct vb2_queue *q, int read)
  2110. {
  2111. struct vb2_fileio_data *fileio;
  2112. int i, ret;
  2113. unsigned int count = 0;
  2114. /*
  2115. * Sanity check
  2116. */
  2117. if (WARN_ON((read && !(q->io_modes & VB2_READ)) ||
  2118. (!read && !(q->io_modes & VB2_WRITE))))
  2119. return -EINVAL;
  2120. /*
  2121. * Check if device supports mapping buffers to kernel virtual space.
  2122. */
  2123. if (!q->mem_ops->vaddr)
  2124. return -EBUSY;
  2125. /*
  2126. * Check if streaming api has not been already activated.
  2127. */
  2128. if (q->streaming || q->num_buffers > 0)
  2129. return -EBUSY;
  2130. /*
  2131. * Start with count 1, driver can increase it in queue_setup()
  2132. */
  2133. count = 1;
  2134. dprintk(3, "setting up file io: mode %s, count %d, read_once %d, write_immediately %d\n",
  2135. (read) ? "read" : "write", count, q->fileio_read_once,
  2136. q->fileio_write_immediately);
  2137. fileio = kzalloc(sizeof(*fileio), GFP_KERNEL);
  2138. if (fileio == NULL)
  2139. return -ENOMEM;
  2140. fileio->read_once = q->fileio_read_once;
  2141. fileio->write_immediately = q->fileio_write_immediately;
  2142. /*
  2143. * Request buffers and use MMAP type to force driver
  2144. * to allocate buffers by itself.
  2145. */
  2146. fileio->count = count;
  2147. fileio->memory = VB2_MEMORY_MMAP;
  2148. fileio->type = q->type;
  2149. q->fileio = fileio;
  2150. ret = vb2_core_reqbufs(q, fileio->memory, &fileio->count);
  2151. if (ret)
  2152. goto err_kfree;
  2153. /*
  2154. * Check if plane_count is correct
  2155. * (multiplane buffers are not supported).
  2156. */
  2157. if (q->bufs[0]->num_planes != 1) {
  2158. ret = -EBUSY;
  2159. goto err_reqbufs;
  2160. }
  2161. /*
  2162. * Get kernel address of each buffer.
  2163. */
  2164. for (i = 0; i < q->num_buffers; i++) {
  2165. fileio->bufs[i].vaddr = vb2_plane_vaddr(q->bufs[i], 0);
  2166. if (fileio->bufs[i].vaddr == NULL) {
  2167. ret = -EINVAL;
  2168. goto err_reqbufs;
  2169. }
  2170. fileio->bufs[i].size = vb2_plane_size(q->bufs[i], 0);
  2171. }
  2172. /*
  2173. * Read mode requires pre queuing of all buffers.
  2174. */
  2175. if (read) {
  2176. /*
  2177. * Queue all buffers.
  2178. */
  2179. for (i = 0; i < q->num_buffers; i++) {
  2180. ret = vb2_core_qbuf(q, i, NULL);
  2181. if (ret)
  2182. goto err_reqbufs;
  2183. fileio->bufs[i].queued = 1;
  2184. }
  2185. /*
  2186. * All buffers have been queued, so mark that by setting
  2187. * initial_index to q->num_buffers
  2188. */
  2189. fileio->initial_index = q->num_buffers;
  2190. fileio->cur_index = q->num_buffers;
  2191. }
  2192. /*
  2193. * Start streaming.
  2194. */
  2195. ret = vb2_core_streamon(q, q->type);
  2196. if (ret)
  2197. goto err_reqbufs;
  2198. return ret;
  2199. err_reqbufs:
  2200. fileio->count = 0;
  2201. vb2_core_reqbufs(q, fileio->memory, &fileio->count);
  2202. err_kfree:
  2203. q->fileio = NULL;
  2204. kfree(fileio);
  2205. return ret;
  2206. }
  2207. /**
  2208. * __vb2_cleanup_fileio() - free resourced used by file io emulator
  2209. * @q: videobuf2 queue
  2210. */
  2211. static int __vb2_cleanup_fileio(struct vb2_queue *q)
  2212. {
  2213. struct vb2_fileio_data *fileio = q->fileio;
  2214. if (fileio) {
  2215. vb2_core_streamoff(q, q->type);
  2216. q->fileio = NULL;
  2217. fileio->count = 0;
  2218. vb2_core_reqbufs(q, fileio->memory, &fileio->count);
  2219. kfree(fileio);
  2220. dprintk(3, "file io emulator closed\n");
  2221. }
  2222. return 0;
  2223. }
  2224. /**
  2225. * __vb2_perform_fileio() - perform a single file io (read or write) operation
  2226. * @q: videobuf2 queue
  2227. * @data: pointed to target userspace buffer
  2228. * @count: number of bytes to read or write
  2229. * @ppos: file handle position tracking pointer
  2230. * @nonblock: mode selector (1 means blocking calls, 0 means nonblocking)
  2231. * @read: access mode selector (1 means read, 0 means write)
  2232. */
  2233. static size_t __vb2_perform_fileio(struct vb2_queue *q, char __user *data, size_t count,
  2234. loff_t *ppos, int nonblock, int read)
  2235. {
  2236. struct vb2_fileio_data *fileio;
  2237. struct vb2_fileio_buf *buf;
  2238. bool is_multiplanar = q->is_multiplanar;
  2239. /*
  2240. * When using write() to write data to an output video node the vb2 core
  2241. * should copy timestamps if V4L2_BUF_FLAG_TIMESTAMP_COPY is set. Nobody
  2242. * else is able to provide this information with the write() operation.
  2243. */
  2244. bool copy_timestamp = !read && q->copy_timestamp;
  2245. unsigned index;
  2246. int ret;
  2247. dprintk(3, "mode %s, offset %ld, count %zd, %sblocking\n",
  2248. read ? "read" : "write", (long)*ppos, count,
  2249. nonblock ? "non" : "");
  2250. if (!data)
  2251. return -EINVAL;
  2252. /*
  2253. * Initialize emulator on first call.
  2254. */
  2255. if (!vb2_fileio_is_active(q)) {
  2256. ret = __vb2_init_fileio(q, read);
  2257. dprintk(3, "vb2_init_fileio result: %d\n", ret);
  2258. if (ret)
  2259. return ret;
  2260. }
  2261. fileio = q->fileio;
  2262. /*
  2263. * Check if we need to dequeue the buffer.
  2264. */
  2265. index = fileio->cur_index;
  2266. if (index >= q->num_buffers) {
  2267. struct vb2_buffer *b;
  2268. /*
  2269. * Call vb2_dqbuf to get buffer back.
  2270. */
  2271. ret = vb2_core_dqbuf(q, &index, NULL, nonblock);
  2272. dprintk(5, "vb2_dqbuf result: %d\n", ret);
  2273. if (ret)
  2274. return ret;
  2275. fileio->dq_count += 1;
  2276. fileio->cur_index = index;
  2277. buf = &fileio->bufs[index];
  2278. b = q->bufs[index];
  2279. /*
  2280. * Get number of bytes filled by the driver
  2281. */
  2282. buf->pos = 0;
  2283. buf->queued = 0;
  2284. buf->size = read ? vb2_get_plane_payload(q->bufs[index], 0)
  2285. : vb2_plane_size(q->bufs[index], 0);
  2286. /* Compensate for data_offset on read in the multiplanar case. */
  2287. if (is_multiplanar && read &&
  2288. b->planes[0].data_offset < buf->size) {
  2289. buf->pos = b->planes[0].data_offset;
  2290. buf->size -= buf->pos;
  2291. }
  2292. } else {
  2293. buf = &fileio->bufs[index];
  2294. }
  2295. /*
  2296. * Limit count on last few bytes of the buffer.
  2297. */
  2298. if (buf->pos + count > buf->size) {
  2299. count = buf->size - buf->pos;
  2300. dprintk(5, "reducing read count: %zd\n", count);
  2301. }
  2302. /*
  2303. * Transfer data to userspace.
  2304. */
  2305. dprintk(3, "copying %zd bytes - buffer %d, offset %u\n",
  2306. count, index, buf->pos);
  2307. if (read)
  2308. ret = copy_to_user(data, buf->vaddr + buf->pos, count);
  2309. else
  2310. ret = copy_from_user(buf->vaddr + buf->pos, data, count);
  2311. if (ret) {
  2312. dprintk(3, "error copying data\n");
  2313. return -EFAULT;
  2314. }
  2315. /*
  2316. * Update counters.
  2317. */
  2318. buf->pos += count;
  2319. *ppos += count;
  2320. /*
  2321. * Queue next buffer if required.
  2322. */
  2323. if (buf->pos == buf->size || (!read && fileio->write_immediately)) {
  2324. struct vb2_buffer *b = q->bufs[index];
  2325. /*
  2326. * Check if this is the last buffer to read.
  2327. */
  2328. if (read && fileio->read_once && fileio->dq_count == 1) {
  2329. dprintk(3, "read limit reached\n");
  2330. return __vb2_cleanup_fileio(q);
  2331. }
  2332. /*
  2333. * Call vb2_qbuf and give buffer to the driver.
  2334. */
  2335. b->planes[0].bytesused = buf->pos;
  2336. if (copy_timestamp)
  2337. b->timestamp = ktime_get_ns();
  2338. ret = vb2_core_qbuf(q, index, NULL);
  2339. dprintk(5, "vb2_dbuf result: %d\n", ret);
  2340. if (ret)
  2341. return ret;
  2342. /*
  2343. * Buffer has been queued, update the status
  2344. */
  2345. buf->pos = 0;
  2346. buf->queued = 1;
  2347. buf->size = vb2_plane_size(q->bufs[index], 0);
  2348. fileio->q_count += 1;
  2349. /*
  2350. * If we are queuing up buffers for the first time, then
  2351. * increase initial_index by one.
  2352. */
  2353. if (fileio->initial_index < q->num_buffers)
  2354. fileio->initial_index++;
  2355. /*
  2356. * The next buffer to use is either a buffer that's going to be
  2357. * queued for the first time (initial_index < q->num_buffers)
  2358. * or it is equal to q->num_buffers, meaning that the next
  2359. * time we need to dequeue a buffer since we've now queued up
  2360. * all the 'first time' buffers.
  2361. */
  2362. fileio->cur_index = fileio->initial_index;
  2363. }
  2364. /*
  2365. * Return proper number of bytes processed.
  2366. */
  2367. if (ret == 0)
  2368. ret = count;
  2369. return ret;
  2370. }
  2371. size_t vb2_read(struct vb2_queue *q, char __user *data, size_t count,
  2372. loff_t *ppos, int nonblocking)
  2373. {
  2374. return __vb2_perform_fileio(q, data, count, ppos, nonblocking, 1);
  2375. }
  2376. EXPORT_SYMBOL_GPL(vb2_read);
  2377. size_t vb2_write(struct vb2_queue *q, const char __user *data, size_t count,
  2378. loff_t *ppos, int nonblocking)
  2379. {
  2380. return __vb2_perform_fileio(q, (char __user *) data, count,
  2381. ppos, nonblocking, 0);
  2382. }
  2383. EXPORT_SYMBOL_GPL(vb2_write);
  2384. struct vb2_threadio_data {
  2385. struct task_struct *thread;
  2386. vb2_thread_fnc fnc;
  2387. void *priv;
  2388. bool stop;
  2389. };
  2390. static int vb2_thread(void *data)
  2391. {
  2392. struct vb2_queue *q = data;
  2393. struct vb2_threadio_data *threadio = q->threadio;
  2394. bool copy_timestamp = false;
  2395. unsigned prequeue = 0;
  2396. unsigned index = 0;
  2397. int ret = 0;
  2398. if (q->is_output) {
  2399. prequeue = q->num_buffers;
  2400. copy_timestamp = q->copy_timestamp;
  2401. }
  2402. set_freezable();
  2403. for (;;) {
  2404. struct vb2_buffer *vb;
  2405. /*
  2406. * Call vb2_dqbuf to get buffer back.
  2407. */
  2408. if (prequeue) {
  2409. vb = q->bufs[index++];
  2410. prequeue--;
  2411. } else {
  2412. call_void_qop(q, wait_finish, q);
  2413. if (!threadio->stop)
  2414. ret = vb2_core_dqbuf(q, &index, NULL, 0);
  2415. call_void_qop(q, wait_prepare, q);
  2416. dprintk(5, "file io: vb2_dqbuf result: %d\n", ret);
  2417. if (!ret)
  2418. vb = q->bufs[index];
  2419. }
  2420. if (ret || threadio->stop)
  2421. break;
  2422. try_to_freeze();
  2423. if (vb->state != VB2_BUF_STATE_ERROR)
  2424. if (threadio->fnc(vb, threadio->priv))
  2425. break;
  2426. call_void_qop(q, wait_finish, q);
  2427. if (copy_timestamp)
  2428. vb->timestamp = ktime_get_ns();;
  2429. if (!threadio->stop)
  2430. ret = vb2_core_qbuf(q, vb->index, NULL);
  2431. call_void_qop(q, wait_prepare, q);
  2432. if (ret || threadio->stop)
  2433. break;
  2434. }
  2435. /* Hmm, linux becomes *very* unhappy without this ... */
  2436. while (!kthread_should_stop()) {
  2437. set_current_state(TASK_INTERRUPTIBLE);
  2438. schedule();
  2439. }
  2440. return 0;
  2441. }
  2442. /*
  2443. * This function should not be used for anything else but the videobuf2-dvb
  2444. * support. If you think you have another good use-case for this, then please
  2445. * contact the linux-media mailinglist first.
  2446. */
  2447. int vb2_thread_start(struct vb2_queue *q, vb2_thread_fnc fnc, void *priv,
  2448. const char *thread_name)
  2449. {
  2450. struct vb2_threadio_data *threadio;
  2451. int ret = 0;
  2452. if (q->threadio)
  2453. return -EBUSY;
  2454. if (vb2_is_busy(q))
  2455. return -EBUSY;
  2456. if (WARN_ON(q->fileio))
  2457. return -EBUSY;
  2458. threadio = kzalloc(sizeof(*threadio), GFP_KERNEL);
  2459. if (threadio == NULL)
  2460. return -ENOMEM;
  2461. threadio->fnc = fnc;
  2462. threadio->priv = priv;
  2463. ret = __vb2_init_fileio(q, !q->is_output);
  2464. dprintk(3, "file io: vb2_init_fileio result: %d\n", ret);
  2465. if (ret)
  2466. goto nomem;
  2467. q->threadio = threadio;
  2468. threadio->thread = kthread_run(vb2_thread, q, "vb2-%s", thread_name);
  2469. if (IS_ERR(threadio->thread)) {
  2470. ret = PTR_ERR(threadio->thread);
  2471. threadio->thread = NULL;
  2472. goto nothread;
  2473. }
  2474. return 0;
  2475. nothread:
  2476. __vb2_cleanup_fileio(q);
  2477. nomem:
  2478. kfree(threadio);
  2479. return ret;
  2480. }
  2481. EXPORT_SYMBOL_GPL(vb2_thread_start);
  2482. int vb2_thread_stop(struct vb2_queue *q)
  2483. {
  2484. struct vb2_threadio_data *threadio = q->threadio;
  2485. int err;
  2486. if (threadio == NULL)
  2487. return 0;
  2488. threadio->stop = true;
  2489. /* Wake up all pending sleeps in the thread */
  2490. vb2_queue_error(q);
  2491. err = kthread_stop(threadio->thread);
  2492. __vb2_cleanup_fileio(q);
  2493. threadio->thread = NULL;
  2494. kfree(threadio);
  2495. q->threadio = NULL;
  2496. return err;
  2497. }
  2498. EXPORT_SYMBOL_GPL(vb2_thread_stop);
  2499. MODULE_DESCRIPTION("Media buffer core framework");
  2500. MODULE_AUTHOR("Pawel Osciak <pawel@osciak.com>, Marek Szyprowski");
  2501. MODULE_LICENSE("GPL");