videobuf2-core.c 76 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832
  1. /*
  2. * videobuf2-core.c - video buffer 2 core framework
  3. *
  4. * Copyright (C) 2010 Samsung Electronics
  5. *
  6. * Author: Pawel Osciak <pawel@osciak.com>
  7. * Marek Szyprowski <m.szyprowski@samsung.com>
  8. *
  9. * The vb2_thread implementation was based on code from videobuf-dvb.c:
  10. * (c) 2004 Gerd Knorr <kraxel@bytesex.org> [SUSE Labs]
  11. *
  12. * This program is free software; you can redistribute it and/or modify
  13. * it under the terms of the GNU General Public License as published by
  14. * the Free Software Foundation.
  15. */
  16. #include <linux/err.h>
  17. #include <linux/kernel.h>
  18. #include <linux/module.h>
  19. #include <linux/mm.h>
  20. #include <linux/poll.h>
  21. #include <linux/slab.h>
  22. #include <linux/sched.h>
  23. #include <linux/freezer.h>
  24. #include <linux/kthread.h>
  25. #include <media/videobuf2-core.h>
  26. #include <trace/events/vb2.h>
  27. static int debug;
  28. module_param(debug, int, 0644);
  29. #define dprintk(level, fmt, arg...) \
  30. do { \
  31. if (debug >= level) \
  32. pr_info("vb2-core: %s: " fmt, __func__, ## arg); \
  33. } while (0)
  34. #ifdef CONFIG_VIDEO_ADV_DEBUG
  35. /*
  36. * If advanced debugging is on, then count how often each op is called
  37. * successfully, which can either be per-buffer or per-queue.
  38. *
  39. * This makes it easy to check that the 'init' and 'cleanup'
  40. * (and variations thereof) stay balanced.
  41. */
  42. #define log_memop(vb, op) \
  43. dprintk(2, "call_memop(%p, %d, %s)%s\n", \
  44. (vb)->vb2_queue, (vb)->index, #op, \
  45. (vb)->vb2_queue->mem_ops->op ? "" : " (nop)")
  46. #define call_memop(vb, op, args...) \
  47. ({ \
  48. struct vb2_queue *_q = (vb)->vb2_queue; \
  49. int err; \
  50. \
  51. log_memop(vb, op); \
  52. err = _q->mem_ops->op ? _q->mem_ops->op(args) : 0; \
  53. if (!err) \
  54. (vb)->cnt_mem_ ## op++; \
  55. err; \
  56. })
  57. #define call_ptr_memop(vb, op, args...) \
  58. ({ \
  59. struct vb2_queue *_q = (vb)->vb2_queue; \
  60. void *ptr; \
  61. \
  62. log_memop(vb, op); \
  63. ptr = _q->mem_ops->op ? _q->mem_ops->op(args) : NULL; \
  64. if (!IS_ERR_OR_NULL(ptr)) \
  65. (vb)->cnt_mem_ ## op++; \
  66. ptr; \
  67. })
  68. #define call_void_memop(vb, op, args...) \
  69. ({ \
  70. struct vb2_queue *_q = (vb)->vb2_queue; \
  71. \
  72. log_memop(vb, op); \
  73. if (_q->mem_ops->op) \
  74. _q->mem_ops->op(args); \
  75. (vb)->cnt_mem_ ## op++; \
  76. })
  77. #define log_qop(q, op) \
  78. dprintk(2, "call_qop(%p, %s)%s\n", q, #op, \
  79. (q)->ops->op ? "" : " (nop)")
  80. #define call_qop(q, op, args...) \
  81. ({ \
  82. int err; \
  83. \
  84. log_qop(q, op); \
  85. err = (q)->ops->op ? (q)->ops->op(args) : 0; \
  86. if (!err) \
  87. (q)->cnt_ ## op++; \
  88. err; \
  89. })
  90. #define call_void_qop(q, op, args...) \
  91. ({ \
  92. log_qop(q, op); \
  93. if ((q)->ops->op) \
  94. (q)->ops->op(args); \
  95. (q)->cnt_ ## op++; \
  96. })
  97. #define log_vb_qop(vb, op, args...) \
  98. dprintk(2, "call_vb_qop(%p, %d, %s)%s\n", \
  99. (vb)->vb2_queue, (vb)->index, #op, \
  100. (vb)->vb2_queue->ops->op ? "" : " (nop)")
  101. #define call_vb_qop(vb, op, args...) \
  102. ({ \
  103. int err; \
  104. \
  105. log_vb_qop(vb, op); \
  106. err = (vb)->vb2_queue->ops->op ? \
  107. (vb)->vb2_queue->ops->op(args) : 0; \
  108. if (!err) \
  109. (vb)->cnt_ ## op++; \
  110. err; \
  111. })
  112. #define call_void_vb_qop(vb, op, args...) \
  113. ({ \
  114. log_vb_qop(vb, op); \
  115. if ((vb)->vb2_queue->ops->op) \
  116. (vb)->vb2_queue->ops->op(args); \
  117. (vb)->cnt_ ## op++; \
  118. })
  119. #else
  120. #define call_memop(vb, op, args...) \
  121. ((vb)->vb2_queue->mem_ops->op ? \
  122. (vb)->vb2_queue->mem_ops->op(args) : 0)
  123. #define call_ptr_memop(vb, op, args...) \
  124. ((vb)->vb2_queue->mem_ops->op ? \
  125. (vb)->vb2_queue->mem_ops->op(args) : NULL)
  126. #define call_void_memop(vb, op, args...) \
  127. do { \
  128. if ((vb)->vb2_queue->mem_ops->op) \
  129. (vb)->vb2_queue->mem_ops->op(args); \
  130. } while (0)
  131. #define call_qop(q, op, args...) \
  132. ((q)->ops->op ? (q)->ops->op(args) : 0)
  133. #define call_void_qop(q, op, args...) \
  134. do { \
  135. if ((q)->ops->op) \
  136. (q)->ops->op(args); \
  137. } while (0)
  138. #define call_vb_qop(vb, op, args...) \
  139. ((vb)->vb2_queue->ops->op ? (vb)->vb2_queue->ops->op(args) : 0)
  140. #define call_void_vb_qop(vb, op, args...) \
  141. do { \
  142. if ((vb)->vb2_queue->ops->op) \
  143. (vb)->vb2_queue->ops->op(args); \
  144. } while (0)
  145. #endif
  146. #define call_bufop(q, op, args...) \
  147. ({ \
  148. int ret = 0; \
  149. if (q && q->buf_ops && q->buf_ops->op) \
  150. ret = q->buf_ops->op(args); \
  151. ret; \
  152. })
  153. #define call_void_bufop(q, op, args...) \
  154. ({ \
  155. if (q && q->buf_ops && q->buf_ops->op) \
  156. q->buf_ops->op(args); \
  157. })
  158. static void __vb2_queue_cancel(struct vb2_queue *q);
  159. static void __enqueue_in_driver(struct vb2_buffer *vb);
  160. /**
  161. * __vb2_buf_mem_alloc() - allocate video memory for the given buffer
  162. */
  163. static int __vb2_buf_mem_alloc(struct vb2_buffer *vb)
  164. {
  165. struct vb2_queue *q = vb->vb2_queue;
  166. enum dma_data_direction dma_dir =
  167. q->is_output ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
  168. void *mem_priv;
  169. int plane;
  170. /*
  171. * Allocate memory for all planes in this buffer
  172. * NOTE: mmapped areas should be page aligned
  173. */
  174. for (plane = 0; plane < vb->num_planes; ++plane) {
  175. unsigned long size = PAGE_ALIGN(vb->planes[plane].length);
  176. mem_priv = call_ptr_memop(vb, alloc, q->alloc_ctx[plane],
  177. size, dma_dir, q->gfp_flags);
  178. if (IS_ERR_OR_NULL(mem_priv))
  179. goto free;
  180. /* Associate allocator private data with this plane */
  181. vb->planes[plane].mem_priv = mem_priv;
  182. }
  183. return 0;
  184. free:
  185. /* Free already allocated memory if one of the allocations failed */
  186. for (; plane > 0; --plane) {
  187. call_void_memop(vb, put, vb->planes[plane - 1].mem_priv);
  188. vb->planes[plane - 1].mem_priv = NULL;
  189. }
  190. return -ENOMEM;
  191. }
  192. /**
  193. * __vb2_buf_mem_free() - free memory of the given buffer
  194. */
  195. static void __vb2_buf_mem_free(struct vb2_buffer *vb)
  196. {
  197. unsigned int plane;
  198. for (plane = 0; plane < vb->num_planes; ++plane) {
  199. call_void_memop(vb, put, vb->planes[plane].mem_priv);
  200. vb->planes[plane].mem_priv = NULL;
  201. dprintk(3, "freed plane %d of buffer %d\n", plane, vb->index);
  202. }
  203. }
  204. /**
  205. * __vb2_buf_userptr_put() - release userspace memory associated with
  206. * a USERPTR buffer
  207. */
  208. static void __vb2_buf_userptr_put(struct vb2_buffer *vb)
  209. {
  210. unsigned int plane;
  211. for (plane = 0; plane < vb->num_planes; ++plane) {
  212. if (vb->planes[plane].mem_priv)
  213. call_void_memop(vb, put_userptr, vb->planes[plane].mem_priv);
  214. vb->planes[plane].mem_priv = NULL;
  215. }
  216. }
  217. /**
  218. * __vb2_plane_dmabuf_put() - release memory associated with
  219. * a DMABUF shared plane
  220. */
  221. static void __vb2_plane_dmabuf_put(struct vb2_buffer *vb, struct vb2_plane *p)
  222. {
  223. if (!p->mem_priv)
  224. return;
  225. if (p->dbuf_mapped)
  226. call_void_memop(vb, unmap_dmabuf, p->mem_priv);
  227. call_void_memop(vb, detach_dmabuf, p->mem_priv);
  228. dma_buf_put(p->dbuf);
  229. p->mem_priv = NULL;
  230. p->dbuf = NULL;
  231. p->dbuf_mapped = 0;
  232. }
  233. /**
  234. * __vb2_buf_dmabuf_put() - release memory associated with
  235. * a DMABUF shared buffer
  236. */
  237. static void __vb2_buf_dmabuf_put(struct vb2_buffer *vb)
  238. {
  239. unsigned int plane;
  240. for (plane = 0; plane < vb->num_planes; ++plane)
  241. __vb2_plane_dmabuf_put(vb, &vb->planes[plane]);
  242. }
  243. /**
  244. * __setup_offsets() - setup unique offsets ("cookies") for every plane in
  245. * the buffer.
  246. */
  247. static void __setup_offsets(struct vb2_buffer *vb)
  248. {
  249. struct vb2_queue *q = vb->vb2_queue;
  250. unsigned int plane;
  251. unsigned long off = 0;
  252. if (vb->index) {
  253. struct vb2_buffer *prev = q->bufs[vb->index - 1];
  254. struct vb2_plane *p = &prev->planes[prev->num_planes - 1];
  255. off = PAGE_ALIGN(p->m.offset + p->length);
  256. }
  257. for (plane = 0; plane < vb->num_planes; ++plane) {
  258. vb->planes[plane].m.offset = off;
  259. dprintk(3, "buffer %d, plane %d offset 0x%08lx\n",
  260. vb->index, plane, off);
  261. off += vb->planes[plane].length;
  262. off = PAGE_ALIGN(off);
  263. }
  264. }
  265. /**
  266. * __vb2_queue_alloc() - allocate videobuf buffer structures and (for MMAP type)
  267. * video buffer memory for all buffers/planes on the queue and initializes the
  268. * queue
  269. *
  270. * Returns the number of buffers successfully allocated.
  271. */
  272. static int __vb2_queue_alloc(struct vb2_queue *q, enum vb2_memory memory,
  273. unsigned int num_buffers, unsigned int num_planes,
  274. const unsigned plane_sizes[VB2_MAX_PLANES])
  275. {
  276. unsigned int buffer, plane;
  277. struct vb2_buffer *vb;
  278. int ret;
  279. for (buffer = 0; buffer < num_buffers; ++buffer) {
  280. /* Allocate videobuf buffer structures */
  281. vb = kzalloc(q->buf_struct_size, GFP_KERNEL);
  282. if (!vb) {
  283. dprintk(1, "memory alloc for buffer struct failed\n");
  284. break;
  285. }
  286. vb->state = VB2_BUF_STATE_DEQUEUED;
  287. vb->vb2_queue = q;
  288. vb->num_planes = num_planes;
  289. vb->index = q->num_buffers + buffer;
  290. vb->type = q->type;
  291. vb->memory = memory;
  292. for (plane = 0; plane < num_planes; ++plane) {
  293. vb->planes[plane].length = plane_sizes[plane];
  294. vb->planes[plane].min_length = plane_sizes[plane];
  295. }
  296. q->bufs[vb->index] = vb;
  297. /* Allocate video buffer memory for the MMAP type */
  298. if (memory == VB2_MEMORY_MMAP) {
  299. ret = __vb2_buf_mem_alloc(vb);
  300. if (ret) {
  301. dprintk(1, "failed allocating memory for "
  302. "buffer %d\n", buffer);
  303. q->bufs[vb->index] = NULL;
  304. kfree(vb);
  305. break;
  306. }
  307. __setup_offsets(vb);
  308. /*
  309. * Call the driver-provided buffer initialization
  310. * callback, if given. An error in initialization
  311. * results in queue setup failure.
  312. */
  313. ret = call_vb_qop(vb, buf_init, vb);
  314. if (ret) {
  315. dprintk(1, "buffer %d %p initialization"
  316. " failed\n", buffer, vb);
  317. __vb2_buf_mem_free(vb);
  318. q->bufs[vb->index] = NULL;
  319. kfree(vb);
  320. break;
  321. }
  322. }
  323. }
  324. dprintk(1, "allocated %d buffers, %d plane(s) each\n",
  325. buffer, num_planes);
  326. return buffer;
  327. }
  328. /**
  329. * __vb2_free_mem() - release all video buffer memory for a given queue
  330. */
  331. static void __vb2_free_mem(struct vb2_queue *q, unsigned int buffers)
  332. {
  333. unsigned int buffer;
  334. struct vb2_buffer *vb;
  335. for (buffer = q->num_buffers - buffers; buffer < q->num_buffers;
  336. ++buffer) {
  337. vb = q->bufs[buffer];
  338. if (!vb)
  339. continue;
  340. /* Free MMAP buffers or release USERPTR buffers */
  341. if (q->memory == VB2_MEMORY_MMAP)
  342. __vb2_buf_mem_free(vb);
  343. else if (q->memory == VB2_MEMORY_DMABUF)
  344. __vb2_buf_dmabuf_put(vb);
  345. else
  346. __vb2_buf_userptr_put(vb);
  347. }
  348. }
  349. /**
  350. * __vb2_queue_free() - free buffers at the end of the queue - video memory and
  351. * related information, if no buffers are left return the queue to an
  352. * uninitialized state. Might be called even if the queue has already been freed.
  353. */
  354. static int __vb2_queue_free(struct vb2_queue *q, unsigned int buffers)
  355. {
  356. unsigned int buffer;
  357. /*
  358. * Sanity check: when preparing a buffer the queue lock is released for
  359. * a short while (see __buf_prepare for the details), which would allow
  360. * a race with a reqbufs which can call this function. Removing the
  361. * buffers from underneath __buf_prepare is obviously a bad idea, so we
  362. * check if any of the buffers is in the state PREPARING, and if so we
  363. * just return -EAGAIN.
  364. */
  365. for (buffer = q->num_buffers - buffers; buffer < q->num_buffers;
  366. ++buffer) {
  367. if (q->bufs[buffer] == NULL)
  368. continue;
  369. if (q->bufs[buffer]->state == VB2_BUF_STATE_PREPARING) {
  370. dprintk(1, "preparing buffers, cannot free\n");
  371. return -EAGAIN;
  372. }
  373. }
  374. /* Call driver-provided cleanup function for each buffer, if provided */
  375. for (buffer = q->num_buffers - buffers; buffer < q->num_buffers;
  376. ++buffer) {
  377. struct vb2_buffer *vb = q->bufs[buffer];
  378. if (vb && vb->planes[0].mem_priv)
  379. call_void_vb_qop(vb, buf_cleanup, vb);
  380. }
  381. /* Release video buffer memory */
  382. __vb2_free_mem(q, buffers);
  383. #ifdef CONFIG_VIDEO_ADV_DEBUG
  384. /*
  385. * Check that all the calls were balances during the life-time of this
  386. * queue. If not (or if the debug level is 1 or up), then dump the
  387. * counters to the kernel log.
  388. */
  389. if (q->num_buffers) {
  390. bool unbalanced = q->cnt_start_streaming != q->cnt_stop_streaming ||
  391. q->cnt_wait_prepare != q->cnt_wait_finish;
  392. if (unbalanced || debug) {
  393. pr_info("vb2: counters for queue %p:%s\n", q,
  394. unbalanced ? " UNBALANCED!" : "");
  395. pr_info("vb2: setup: %u start_streaming: %u stop_streaming: %u\n",
  396. q->cnt_queue_setup, q->cnt_start_streaming,
  397. q->cnt_stop_streaming);
  398. pr_info("vb2: wait_prepare: %u wait_finish: %u\n",
  399. q->cnt_wait_prepare, q->cnt_wait_finish);
  400. }
  401. q->cnt_queue_setup = 0;
  402. q->cnt_wait_prepare = 0;
  403. q->cnt_wait_finish = 0;
  404. q->cnt_start_streaming = 0;
  405. q->cnt_stop_streaming = 0;
  406. }
  407. for (buffer = 0; buffer < q->num_buffers; ++buffer) {
  408. struct vb2_buffer *vb = q->bufs[buffer];
  409. bool unbalanced = vb->cnt_mem_alloc != vb->cnt_mem_put ||
  410. vb->cnt_mem_prepare != vb->cnt_mem_finish ||
  411. vb->cnt_mem_get_userptr != vb->cnt_mem_put_userptr ||
  412. vb->cnt_mem_attach_dmabuf != vb->cnt_mem_detach_dmabuf ||
  413. vb->cnt_mem_map_dmabuf != vb->cnt_mem_unmap_dmabuf ||
  414. vb->cnt_buf_queue != vb->cnt_buf_done ||
  415. vb->cnt_buf_prepare != vb->cnt_buf_finish ||
  416. vb->cnt_buf_init != vb->cnt_buf_cleanup;
  417. if (unbalanced || debug) {
  418. pr_info("vb2: counters for queue %p, buffer %d:%s\n",
  419. q, buffer, unbalanced ? " UNBALANCED!" : "");
  420. pr_info("vb2: buf_init: %u buf_cleanup: %u buf_prepare: %u buf_finish: %u\n",
  421. vb->cnt_buf_init, vb->cnt_buf_cleanup,
  422. vb->cnt_buf_prepare, vb->cnt_buf_finish);
  423. pr_info("vb2: buf_queue: %u buf_done: %u\n",
  424. vb->cnt_buf_queue, vb->cnt_buf_done);
  425. pr_info("vb2: alloc: %u put: %u prepare: %u finish: %u mmap: %u\n",
  426. vb->cnt_mem_alloc, vb->cnt_mem_put,
  427. vb->cnt_mem_prepare, vb->cnt_mem_finish,
  428. vb->cnt_mem_mmap);
  429. pr_info("vb2: get_userptr: %u put_userptr: %u\n",
  430. vb->cnt_mem_get_userptr, vb->cnt_mem_put_userptr);
  431. pr_info("vb2: attach_dmabuf: %u detach_dmabuf: %u map_dmabuf: %u unmap_dmabuf: %u\n",
  432. vb->cnt_mem_attach_dmabuf, vb->cnt_mem_detach_dmabuf,
  433. vb->cnt_mem_map_dmabuf, vb->cnt_mem_unmap_dmabuf);
  434. pr_info("vb2: get_dmabuf: %u num_users: %u vaddr: %u cookie: %u\n",
  435. vb->cnt_mem_get_dmabuf,
  436. vb->cnt_mem_num_users,
  437. vb->cnt_mem_vaddr,
  438. vb->cnt_mem_cookie);
  439. }
  440. }
  441. #endif
  442. /* Free videobuf buffers */
  443. for (buffer = q->num_buffers - buffers; buffer < q->num_buffers;
  444. ++buffer) {
  445. kfree(q->bufs[buffer]);
  446. q->bufs[buffer] = NULL;
  447. }
  448. q->num_buffers -= buffers;
  449. if (!q->num_buffers) {
  450. q->memory = 0;
  451. INIT_LIST_HEAD(&q->queued_list);
  452. }
  453. return 0;
  454. }
  455. /**
  456. * vb2_buffer_in_use() - return true if the buffer is in use and
  457. * the queue cannot be freed (by the means of REQBUFS(0)) call
  458. */
  459. bool vb2_buffer_in_use(struct vb2_queue *q, struct vb2_buffer *vb)
  460. {
  461. unsigned int plane;
  462. for (plane = 0; plane < vb->num_planes; ++plane) {
  463. void *mem_priv = vb->planes[plane].mem_priv;
  464. /*
  465. * If num_users() has not been provided, call_memop
  466. * will return 0, apparently nobody cares about this
  467. * case anyway. If num_users() returns more than 1,
  468. * we are not the only user of the plane's memory.
  469. */
  470. if (mem_priv && call_memop(vb, num_users, mem_priv) > 1)
  471. return true;
  472. }
  473. return false;
  474. }
  475. EXPORT_SYMBOL(vb2_buffer_in_use);
  476. /**
  477. * __buffers_in_use() - return true if any buffers on the queue are in use and
  478. * the queue cannot be freed (by the means of REQBUFS(0)) call
  479. */
  480. static bool __buffers_in_use(struct vb2_queue *q)
  481. {
  482. unsigned int buffer;
  483. for (buffer = 0; buffer < q->num_buffers; ++buffer) {
  484. if (vb2_buffer_in_use(q, q->bufs[buffer]))
  485. return true;
  486. }
  487. return false;
  488. }
  489. /**
  490. * vb2_core_querybuf() - query video buffer information
  491. * @q: videobuf queue
  492. * @index: id number of the buffer
  493. * @pb: buffer struct passed from userspace
  494. *
  495. * Should be called from vidioc_querybuf ioctl handler in driver.
  496. * The passed buffer should have been verified.
  497. * This function fills the relevant information for the userspace.
  498. */
  499. void vb2_core_querybuf(struct vb2_queue *q, unsigned int index, void *pb)
  500. {
  501. call_void_bufop(q, fill_user_buffer, q->bufs[index], pb);
  502. }
  503. EXPORT_SYMBOL_GPL(vb2_core_querybuf);
  504. /**
  505. * __verify_userptr_ops() - verify that all memory operations required for
  506. * USERPTR queue type have been provided
  507. */
  508. static int __verify_userptr_ops(struct vb2_queue *q)
  509. {
  510. if (!(q->io_modes & VB2_USERPTR) || !q->mem_ops->get_userptr ||
  511. !q->mem_ops->put_userptr)
  512. return -EINVAL;
  513. return 0;
  514. }
  515. /**
  516. * __verify_mmap_ops() - verify that all memory operations required for
  517. * MMAP queue type have been provided
  518. */
  519. static int __verify_mmap_ops(struct vb2_queue *q)
  520. {
  521. if (!(q->io_modes & VB2_MMAP) || !q->mem_ops->alloc ||
  522. !q->mem_ops->put || !q->mem_ops->mmap)
  523. return -EINVAL;
  524. return 0;
  525. }
  526. /**
  527. * __verify_dmabuf_ops() - verify that all memory operations required for
  528. * DMABUF queue type have been provided
  529. */
  530. static int __verify_dmabuf_ops(struct vb2_queue *q)
  531. {
  532. if (!(q->io_modes & VB2_DMABUF) || !q->mem_ops->attach_dmabuf ||
  533. !q->mem_ops->detach_dmabuf || !q->mem_ops->map_dmabuf ||
  534. !q->mem_ops->unmap_dmabuf)
  535. return -EINVAL;
  536. return 0;
  537. }
  538. /**
  539. * vb2_verify_memory_type() - Check whether the memory type and buffer type
  540. * passed to a buffer operation are compatible with the queue.
  541. */
  542. int vb2_verify_memory_type(struct vb2_queue *q,
  543. enum vb2_memory memory, unsigned int type)
  544. {
  545. if (memory != VB2_MEMORY_MMAP && memory != VB2_MEMORY_USERPTR &&
  546. memory != VB2_MEMORY_DMABUF) {
  547. dprintk(1, "unsupported memory type\n");
  548. return -EINVAL;
  549. }
  550. if (type != q->type) {
  551. dprintk(1, "requested type is incorrect\n");
  552. return -EINVAL;
  553. }
  554. /*
  555. * Make sure all the required memory ops for given memory type
  556. * are available.
  557. */
  558. if (memory == VB2_MEMORY_MMAP && __verify_mmap_ops(q)) {
  559. dprintk(1, "MMAP for current setup unsupported\n");
  560. return -EINVAL;
  561. }
  562. if (memory == VB2_MEMORY_USERPTR && __verify_userptr_ops(q)) {
  563. dprintk(1, "USERPTR for current setup unsupported\n");
  564. return -EINVAL;
  565. }
  566. if (memory == VB2_MEMORY_DMABUF && __verify_dmabuf_ops(q)) {
  567. dprintk(1, "DMABUF for current setup unsupported\n");
  568. return -EINVAL;
  569. }
  570. /*
  571. * Place the busy tests at the end: -EBUSY can be ignored when
  572. * create_bufs is called with count == 0, but count == 0 should still
  573. * do the memory and type validation.
  574. */
  575. if (vb2_fileio_is_active(q)) {
  576. dprintk(1, "file io in progress\n");
  577. return -EBUSY;
  578. }
  579. return 0;
  580. }
  581. EXPORT_SYMBOL(vb2_verify_memory_type);
  582. /**
  583. * vb2_core_reqbufs() - Initiate streaming
  584. * @q: videobuf2 queue
  585. * @memory: memory type
  586. * @count: requested buffer count
  587. *
  588. * Should be called from vidioc_reqbufs ioctl handler of a driver.
  589. * This function:
  590. * 1) verifies streaming parameters passed from the userspace,
  591. * 2) sets up the queue,
  592. * 3) negotiates number of buffers and planes per buffer with the driver
  593. * to be used during streaming,
  594. * 4) allocates internal buffer structures (struct vb2_buffer), according to
  595. * the agreed parameters,
  596. * 5) for MMAP memory type, allocates actual video memory, using the
  597. * memory handling/allocation routines provided during queue initialization
  598. *
  599. * If req->count is 0, all the memory will be freed instead.
  600. * If the queue has been allocated previously (by a previous vb2_reqbufs) call
  601. * and the queue is not busy, memory will be reallocated.
  602. *
  603. * The return values from this function are intended to be directly returned
  604. * from vidioc_reqbufs handler in driver.
  605. */
  606. int vb2_core_reqbufs(struct vb2_queue *q, enum vb2_memory memory,
  607. unsigned int *count)
  608. {
  609. unsigned int num_buffers, allocated_buffers, num_planes = 0;
  610. unsigned plane_sizes[VB2_MAX_PLANES] = { };
  611. int ret;
  612. if (q->streaming) {
  613. dprintk(1, "streaming active\n");
  614. return -EBUSY;
  615. }
  616. if (*count == 0 || q->num_buffers != 0 || q->memory != memory) {
  617. /*
  618. * We already have buffers allocated, so first check if they
  619. * are not in use and can be freed.
  620. */
  621. mutex_lock(&q->mmap_lock);
  622. if (q->memory == VB2_MEMORY_MMAP && __buffers_in_use(q)) {
  623. mutex_unlock(&q->mmap_lock);
  624. dprintk(1, "memory in use, cannot free\n");
  625. return -EBUSY;
  626. }
  627. /*
  628. * Call queue_cancel to clean up any buffers in the PREPARED or
  629. * QUEUED state which is possible if buffers were prepared or
  630. * queued without ever calling STREAMON.
  631. */
  632. __vb2_queue_cancel(q);
  633. ret = __vb2_queue_free(q, q->num_buffers);
  634. mutex_unlock(&q->mmap_lock);
  635. if (ret)
  636. return ret;
  637. /*
  638. * In case of REQBUFS(0) return immediately without calling
  639. * driver's queue_setup() callback and allocating resources.
  640. */
  641. if (*count == 0)
  642. return 0;
  643. }
  644. /*
  645. * Make sure the requested values and current defaults are sane.
  646. */
  647. num_buffers = min_t(unsigned int, *count, VB2_MAX_FRAME);
  648. num_buffers = max_t(unsigned int, num_buffers, q->min_buffers_needed);
  649. memset(q->alloc_ctx, 0, sizeof(q->alloc_ctx));
  650. q->memory = memory;
  651. /*
  652. * Ask the driver how many buffers and planes per buffer it requires.
  653. * Driver also sets the size and allocator context for each plane.
  654. */
  655. ret = call_qop(q, queue_setup, q, &num_buffers, &num_planes,
  656. plane_sizes, q->alloc_ctx);
  657. if (ret)
  658. return ret;
  659. /* Finally, allocate buffers and video memory */
  660. allocated_buffers =
  661. __vb2_queue_alloc(q, memory, num_buffers, num_planes, plane_sizes);
  662. if (allocated_buffers == 0) {
  663. dprintk(1, "memory allocation failed\n");
  664. return -ENOMEM;
  665. }
  666. /*
  667. * There is no point in continuing if we can't allocate the minimum
  668. * number of buffers needed by this vb2_queue.
  669. */
  670. if (allocated_buffers < q->min_buffers_needed)
  671. ret = -ENOMEM;
  672. /*
  673. * Check if driver can handle the allocated number of buffers.
  674. */
  675. if (!ret && allocated_buffers < num_buffers) {
  676. num_buffers = allocated_buffers;
  677. /*
  678. * num_planes is set by the previous queue_setup(), but since it
  679. * signals to queue_setup() whether it is called from create_bufs()
  680. * vs reqbufs() we zero it here to signal that queue_setup() is
  681. * called for the reqbufs() case.
  682. */
  683. num_planes = 0;
  684. ret = call_qop(q, queue_setup, q, &num_buffers,
  685. &num_planes, plane_sizes, q->alloc_ctx);
  686. if (!ret && allocated_buffers < num_buffers)
  687. ret = -ENOMEM;
  688. /*
  689. * Either the driver has accepted a smaller number of buffers,
  690. * or .queue_setup() returned an error
  691. */
  692. }
  693. mutex_lock(&q->mmap_lock);
  694. q->num_buffers = allocated_buffers;
  695. if (ret < 0) {
  696. /*
  697. * Note: __vb2_queue_free() will subtract 'allocated_buffers'
  698. * from q->num_buffers.
  699. */
  700. __vb2_queue_free(q, allocated_buffers);
  701. mutex_unlock(&q->mmap_lock);
  702. return ret;
  703. }
  704. mutex_unlock(&q->mmap_lock);
  705. /*
  706. * Return the number of successfully allocated buffers
  707. * to the userspace.
  708. */
  709. *count = allocated_buffers;
  710. q->waiting_for_buffers = !q->is_output;
  711. return 0;
  712. }
  713. EXPORT_SYMBOL_GPL(vb2_core_reqbufs);
  714. /**
  715. * vb2_core_create_bufs() - Allocate buffers and any required auxiliary structs
  716. * @q: videobuf2 queue
  717. * @memory: memory type
  718. * @count: requested buffer count
  719. * @parg: parameter passed to device driver
  720. *
  721. * Should be called from vidioc_create_bufs ioctl handler of a driver.
  722. * This function:
  723. * 1) verifies parameter sanity
  724. * 2) calls the .queue_setup() queue operation
  725. * 3) performs any necessary memory allocations
  726. *
  727. * The return values from this function are intended to be directly returned
  728. * from vidioc_create_bufs handler in driver.
  729. */
  730. int vb2_core_create_bufs(struct vb2_queue *q, enum vb2_memory memory,
  731. unsigned int *count, unsigned requested_planes,
  732. const unsigned requested_sizes[])
  733. {
  734. unsigned int num_planes = 0, num_buffers, allocated_buffers;
  735. unsigned plane_sizes[VB2_MAX_PLANES] = { };
  736. int ret;
  737. if (q->num_buffers == VB2_MAX_FRAME) {
  738. dprintk(1, "maximum number of buffers already allocated\n");
  739. return -ENOBUFS;
  740. }
  741. if (!q->num_buffers) {
  742. memset(q->alloc_ctx, 0, sizeof(q->alloc_ctx));
  743. q->memory = memory;
  744. q->waiting_for_buffers = !q->is_output;
  745. }
  746. num_buffers = min(*count, VB2_MAX_FRAME - q->num_buffers);
  747. if (requested_planes && requested_sizes) {
  748. num_planes = requested_planes;
  749. memcpy(plane_sizes, requested_sizes, sizeof(plane_sizes));
  750. }
  751. /*
  752. * Ask the driver, whether the requested number of buffers, planes per
  753. * buffer and their sizes are acceptable
  754. */
  755. ret = call_qop(q, queue_setup, q, &num_buffers,
  756. &num_planes, plane_sizes, q->alloc_ctx);
  757. if (ret)
  758. return ret;
  759. /* Finally, allocate buffers and video memory */
  760. allocated_buffers = __vb2_queue_alloc(q, memory, num_buffers,
  761. num_planes, plane_sizes);
  762. if (allocated_buffers == 0) {
  763. dprintk(1, "memory allocation failed\n");
  764. return -ENOMEM;
  765. }
  766. /*
  767. * Check if driver can handle the so far allocated number of buffers.
  768. */
  769. if (allocated_buffers < num_buffers) {
  770. num_buffers = allocated_buffers;
  771. /*
  772. * q->num_buffers contains the total number of buffers, that the
  773. * queue driver has set up
  774. */
  775. ret = call_qop(q, queue_setup, q, &num_buffers,
  776. &num_planes, plane_sizes, q->alloc_ctx);
  777. if (!ret && allocated_buffers < num_buffers)
  778. ret = -ENOMEM;
  779. /*
  780. * Either the driver has accepted a smaller number of buffers,
  781. * or .queue_setup() returned an error
  782. */
  783. }
  784. mutex_lock(&q->mmap_lock);
  785. q->num_buffers += allocated_buffers;
  786. if (ret < 0) {
  787. /*
  788. * Note: __vb2_queue_free() will subtract 'allocated_buffers'
  789. * from q->num_buffers.
  790. */
  791. __vb2_queue_free(q, allocated_buffers);
  792. mutex_unlock(&q->mmap_lock);
  793. return -ENOMEM;
  794. }
  795. mutex_unlock(&q->mmap_lock);
  796. /*
  797. * Return the number of successfully allocated buffers
  798. * to the userspace.
  799. */
  800. *count = allocated_buffers;
  801. return 0;
  802. }
  803. EXPORT_SYMBOL_GPL(vb2_core_create_bufs);
  804. /**
  805. * vb2_plane_vaddr() - Return a kernel virtual address of a given plane
  806. * @vb: vb2_buffer to which the plane in question belongs to
  807. * @plane_no: plane number for which the address is to be returned
  808. *
  809. * This function returns a kernel virtual address of a given plane if
  810. * such a mapping exist, NULL otherwise.
  811. */
  812. void *vb2_plane_vaddr(struct vb2_buffer *vb, unsigned int plane_no)
  813. {
  814. if (plane_no > vb->num_planes || !vb->planes[plane_no].mem_priv)
  815. return NULL;
  816. return call_ptr_memop(vb, vaddr, vb->planes[plane_no].mem_priv);
  817. }
  818. EXPORT_SYMBOL_GPL(vb2_plane_vaddr);
  819. /**
  820. * vb2_plane_cookie() - Return allocator specific cookie for the given plane
  821. * @vb: vb2_buffer to which the plane in question belongs to
  822. * @plane_no: plane number for which the cookie is to be returned
  823. *
  824. * This function returns an allocator specific cookie for a given plane if
  825. * available, NULL otherwise. The allocator should provide some simple static
  826. * inline function, which would convert this cookie to the allocator specific
  827. * type that can be used directly by the driver to access the buffer. This can
  828. * be for example physical address, pointer to scatter list or IOMMU mapping.
  829. */
  830. void *vb2_plane_cookie(struct vb2_buffer *vb, unsigned int plane_no)
  831. {
  832. if (plane_no >= vb->num_planes || !vb->planes[plane_no].mem_priv)
  833. return NULL;
  834. return call_ptr_memop(vb, cookie, vb->planes[plane_no].mem_priv);
  835. }
  836. EXPORT_SYMBOL_GPL(vb2_plane_cookie);
  837. /**
  838. * vb2_buffer_done() - inform videobuf that an operation on a buffer is finished
  839. * @vb: vb2_buffer returned from the driver
  840. * @state: either VB2_BUF_STATE_DONE if the operation finished successfully,
  841. * VB2_BUF_STATE_ERROR if the operation finished with an error or
  842. * VB2_BUF_STATE_QUEUED if the driver wants to requeue buffers.
  843. * If start_streaming fails then it should return buffers with state
  844. * VB2_BUF_STATE_QUEUED to put them back into the queue.
  845. *
  846. * This function should be called by the driver after a hardware operation on
  847. * a buffer is finished and the buffer may be returned to userspace. The driver
  848. * cannot use this buffer anymore until it is queued back to it by videobuf
  849. * by the means of buf_queue callback. Only buffers previously queued to the
  850. * driver by buf_queue can be passed to this function.
  851. *
  852. * While streaming a buffer can only be returned in state DONE or ERROR.
  853. * The start_streaming op can also return them in case the DMA engine cannot
  854. * be started for some reason. In that case the buffers should be returned with
  855. * state QUEUED.
  856. */
  857. void vb2_buffer_done(struct vb2_buffer *vb, enum vb2_buffer_state state)
  858. {
  859. struct vb2_queue *q = vb->vb2_queue;
  860. unsigned long flags;
  861. unsigned int plane;
  862. if (WARN_ON(vb->state != VB2_BUF_STATE_ACTIVE))
  863. return;
  864. if (WARN_ON(state != VB2_BUF_STATE_DONE &&
  865. state != VB2_BUF_STATE_ERROR &&
  866. state != VB2_BUF_STATE_QUEUED &&
  867. state != VB2_BUF_STATE_REQUEUEING))
  868. state = VB2_BUF_STATE_ERROR;
  869. #ifdef CONFIG_VIDEO_ADV_DEBUG
  870. /*
  871. * Although this is not a callback, it still does have to balance
  872. * with the buf_queue op. So update this counter manually.
  873. */
  874. vb->cnt_buf_done++;
  875. #endif
  876. dprintk(4, "done processing on buffer %d, state: %d\n",
  877. vb->index, state);
  878. /* sync buffers */
  879. for (plane = 0; plane < vb->num_planes; ++plane)
  880. call_void_memop(vb, finish, vb->planes[plane].mem_priv);
  881. spin_lock_irqsave(&q->done_lock, flags);
  882. if (state == VB2_BUF_STATE_QUEUED ||
  883. state == VB2_BUF_STATE_REQUEUEING) {
  884. vb->state = VB2_BUF_STATE_QUEUED;
  885. } else {
  886. /* Add the buffer to the done buffers list */
  887. list_add_tail(&vb->done_entry, &q->done_list);
  888. vb->state = state;
  889. }
  890. atomic_dec(&q->owned_by_drv_count);
  891. spin_unlock_irqrestore(&q->done_lock, flags);
  892. trace_vb2_buf_done(q, vb);
  893. switch (state) {
  894. case VB2_BUF_STATE_QUEUED:
  895. return;
  896. case VB2_BUF_STATE_REQUEUEING:
  897. if (q->start_streaming_called)
  898. __enqueue_in_driver(vb);
  899. return;
  900. default:
  901. /* Inform any processes that may be waiting for buffers */
  902. wake_up(&q->done_wq);
  903. break;
  904. }
  905. }
  906. EXPORT_SYMBOL_GPL(vb2_buffer_done);
  907. /**
  908. * vb2_discard_done() - discard all buffers marked as DONE
  909. * @q: videobuf2 queue
  910. *
  911. * This function is intended to be used with suspend/resume operations. It
  912. * discards all 'done' buffers as they would be too old to be requested after
  913. * resume.
  914. *
  915. * Drivers must stop the hardware and synchronize with interrupt handlers and/or
  916. * delayed works before calling this function to make sure no buffer will be
  917. * touched by the driver and/or hardware.
  918. */
  919. void vb2_discard_done(struct vb2_queue *q)
  920. {
  921. struct vb2_buffer *vb;
  922. unsigned long flags;
  923. spin_lock_irqsave(&q->done_lock, flags);
  924. list_for_each_entry(vb, &q->done_list, done_entry)
  925. vb->state = VB2_BUF_STATE_ERROR;
  926. spin_unlock_irqrestore(&q->done_lock, flags);
  927. }
  928. EXPORT_SYMBOL_GPL(vb2_discard_done);
  929. /**
  930. * __qbuf_mmap() - handle qbuf of an MMAP buffer
  931. */
  932. static int __qbuf_mmap(struct vb2_buffer *vb, const void *pb)
  933. {
  934. int ret = 0;
  935. if (pb)
  936. ret = call_bufop(vb->vb2_queue, fill_vb2_buffer,
  937. vb, pb, vb->planes);
  938. return ret ? ret : call_vb_qop(vb, buf_prepare, vb);
  939. }
  940. /**
  941. * __qbuf_userptr() - handle qbuf of a USERPTR buffer
  942. */
  943. static int __qbuf_userptr(struct vb2_buffer *vb, const void *pb)
  944. {
  945. struct vb2_plane planes[VB2_MAX_PLANES];
  946. struct vb2_queue *q = vb->vb2_queue;
  947. void *mem_priv;
  948. unsigned int plane;
  949. int ret = 0;
  950. enum dma_data_direction dma_dir =
  951. q->is_output ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
  952. bool reacquired = vb->planes[0].mem_priv == NULL;
  953. memset(planes, 0, sizeof(planes[0]) * vb->num_planes);
  954. /* Copy relevant information provided by the userspace */
  955. if (pb)
  956. ret = call_bufop(vb->vb2_queue, fill_vb2_buffer,
  957. vb, pb, planes);
  958. if (ret)
  959. return ret;
  960. for (plane = 0; plane < vb->num_planes; ++plane) {
  961. /* Skip the plane if already verified */
  962. if (vb->planes[plane].m.userptr &&
  963. vb->planes[plane].m.userptr == planes[plane].m.userptr
  964. && vb->planes[plane].length == planes[plane].length)
  965. continue;
  966. dprintk(3, "userspace address for plane %d changed, "
  967. "reacquiring memory\n", plane);
  968. /* Check if the provided plane buffer is large enough */
  969. if (planes[plane].length < vb->planes[plane].min_length) {
  970. dprintk(1, "provided buffer size %u is less than "
  971. "setup size %u for plane %d\n",
  972. planes[plane].length,
  973. vb->planes[plane].min_length,
  974. plane);
  975. ret = -EINVAL;
  976. goto err;
  977. }
  978. /* Release previously acquired memory if present */
  979. if (vb->planes[plane].mem_priv) {
  980. if (!reacquired) {
  981. reacquired = true;
  982. call_void_vb_qop(vb, buf_cleanup, vb);
  983. }
  984. call_void_memop(vb, put_userptr, vb->planes[plane].mem_priv);
  985. }
  986. vb->planes[plane].mem_priv = NULL;
  987. vb->planes[plane].bytesused = 0;
  988. vb->planes[plane].length = 0;
  989. vb->planes[plane].m.userptr = 0;
  990. vb->planes[plane].data_offset = 0;
  991. /* Acquire each plane's memory */
  992. mem_priv = call_ptr_memop(vb, get_userptr, q->alloc_ctx[plane],
  993. planes[plane].m.userptr,
  994. planes[plane].length, dma_dir);
  995. if (IS_ERR_OR_NULL(mem_priv)) {
  996. dprintk(1, "failed acquiring userspace "
  997. "memory for plane %d\n", plane);
  998. ret = mem_priv ? PTR_ERR(mem_priv) : -EINVAL;
  999. goto err;
  1000. }
  1001. vb->planes[plane].mem_priv = mem_priv;
  1002. }
  1003. /*
  1004. * Now that everything is in order, copy relevant information
  1005. * provided by userspace.
  1006. */
  1007. for (plane = 0; plane < vb->num_planes; ++plane) {
  1008. vb->planes[plane].bytesused = planes[plane].bytesused;
  1009. vb->planes[plane].length = planes[plane].length;
  1010. vb->planes[plane].m.userptr = planes[plane].m.userptr;
  1011. vb->planes[plane].data_offset = planes[plane].data_offset;
  1012. }
  1013. if (reacquired) {
  1014. /*
  1015. * One or more planes changed, so we must call buf_init to do
  1016. * the driver-specific initialization on the newly acquired
  1017. * buffer, if provided.
  1018. */
  1019. ret = call_vb_qop(vb, buf_init, vb);
  1020. if (ret) {
  1021. dprintk(1, "buffer initialization failed\n");
  1022. goto err;
  1023. }
  1024. }
  1025. ret = call_vb_qop(vb, buf_prepare, vb);
  1026. if (ret) {
  1027. dprintk(1, "buffer preparation failed\n");
  1028. call_void_vb_qop(vb, buf_cleanup, vb);
  1029. goto err;
  1030. }
  1031. return 0;
  1032. err:
  1033. /* In case of errors, release planes that were already acquired */
  1034. for (plane = 0; plane < vb->num_planes; ++plane) {
  1035. if (vb->planes[plane].mem_priv)
  1036. call_void_memop(vb, put_userptr,
  1037. vb->planes[plane].mem_priv);
  1038. vb->planes[plane].mem_priv = NULL;
  1039. vb->planes[plane].m.userptr = 0;
  1040. vb->planes[plane].length = 0;
  1041. }
  1042. return ret;
  1043. }
  1044. /**
  1045. * __qbuf_dmabuf() - handle qbuf of a DMABUF buffer
  1046. */
  1047. static int __qbuf_dmabuf(struct vb2_buffer *vb, const void *pb)
  1048. {
  1049. struct vb2_plane planes[VB2_MAX_PLANES];
  1050. struct vb2_queue *q = vb->vb2_queue;
  1051. void *mem_priv;
  1052. unsigned int plane;
  1053. int ret = 0;
  1054. enum dma_data_direction dma_dir =
  1055. q->is_output ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
  1056. bool reacquired = vb->planes[0].mem_priv == NULL;
  1057. memset(planes, 0, sizeof(planes[0]) * vb->num_planes);
  1058. /* Copy relevant information provided by the userspace */
  1059. if (pb)
  1060. ret = call_bufop(vb->vb2_queue, fill_vb2_buffer,
  1061. vb, pb, planes);
  1062. if (ret)
  1063. return ret;
  1064. for (plane = 0; plane < vb->num_planes; ++plane) {
  1065. struct dma_buf *dbuf = dma_buf_get(planes[plane].m.fd);
  1066. if (IS_ERR_OR_NULL(dbuf)) {
  1067. dprintk(1, "invalid dmabuf fd for plane %d\n",
  1068. plane);
  1069. ret = -EINVAL;
  1070. goto err;
  1071. }
  1072. /* use DMABUF size if length is not provided */
  1073. if (planes[plane].length == 0)
  1074. planes[plane].length = dbuf->size;
  1075. if (planes[plane].length < vb->planes[plane].min_length) {
  1076. dprintk(1, "invalid dmabuf length for plane %d\n",
  1077. plane);
  1078. ret = -EINVAL;
  1079. goto err;
  1080. }
  1081. /* Skip the plane if already verified */
  1082. if (dbuf == vb->planes[plane].dbuf &&
  1083. vb->planes[plane].length == planes[plane].length) {
  1084. dma_buf_put(dbuf);
  1085. continue;
  1086. }
  1087. dprintk(1, "buffer for plane %d changed\n", plane);
  1088. if (!reacquired) {
  1089. reacquired = true;
  1090. call_void_vb_qop(vb, buf_cleanup, vb);
  1091. }
  1092. /* Release previously acquired memory if present */
  1093. __vb2_plane_dmabuf_put(vb, &vb->planes[plane]);
  1094. vb->planes[plane].bytesused = 0;
  1095. vb->planes[plane].length = 0;
  1096. vb->planes[plane].m.fd = 0;
  1097. vb->planes[plane].data_offset = 0;
  1098. /* Acquire each plane's memory */
  1099. mem_priv = call_ptr_memop(vb, attach_dmabuf,
  1100. q->alloc_ctx[plane], dbuf, planes[plane].length,
  1101. dma_dir);
  1102. if (IS_ERR(mem_priv)) {
  1103. dprintk(1, "failed to attach dmabuf\n");
  1104. ret = PTR_ERR(mem_priv);
  1105. dma_buf_put(dbuf);
  1106. goto err;
  1107. }
  1108. vb->planes[plane].dbuf = dbuf;
  1109. vb->planes[plane].mem_priv = mem_priv;
  1110. }
  1111. /* TODO: This pins the buffer(s) with dma_buf_map_attachment()).. but
  1112. * really we want to do this just before the DMA, not while queueing
  1113. * the buffer(s)..
  1114. */
  1115. for (plane = 0; plane < vb->num_planes; ++plane) {
  1116. ret = call_memop(vb, map_dmabuf, vb->planes[plane].mem_priv);
  1117. if (ret) {
  1118. dprintk(1, "failed to map dmabuf for plane %d\n",
  1119. plane);
  1120. goto err;
  1121. }
  1122. vb->planes[plane].dbuf_mapped = 1;
  1123. }
  1124. /*
  1125. * Now that everything is in order, copy relevant information
  1126. * provided by userspace.
  1127. */
  1128. for (plane = 0; plane < vb->num_planes; ++plane) {
  1129. vb->planes[plane].bytesused = planes[plane].bytesused;
  1130. vb->planes[plane].length = planes[plane].length;
  1131. vb->planes[plane].m.fd = planes[plane].m.fd;
  1132. vb->planes[plane].data_offset = planes[plane].data_offset;
  1133. }
  1134. if (reacquired) {
  1135. /*
  1136. * Call driver-specific initialization on the newly acquired buffer,
  1137. * if provided.
  1138. */
  1139. ret = call_vb_qop(vb, buf_init, vb);
  1140. if (ret) {
  1141. dprintk(1, "buffer initialization failed\n");
  1142. goto err;
  1143. }
  1144. }
  1145. ret = call_vb_qop(vb, buf_prepare, vb);
  1146. if (ret) {
  1147. dprintk(1, "buffer preparation failed\n");
  1148. call_void_vb_qop(vb, buf_cleanup, vb);
  1149. goto err;
  1150. }
  1151. return 0;
  1152. err:
  1153. /* In case of errors, release planes that were already acquired */
  1154. __vb2_buf_dmabuf_put(vb);
  1155. return ret;
  1156. }
  1157. /**
  1158. * __enqueue_in_driver() - enqueue a vb2_buffer in driver for processing
  1159. */
  1160. static void __enqueue_in_driver(struct vb2_buffer *vb)
  1161. {
  1162. struct vb2_queue *q = vb->vb2_queue;
  1163. unsigned int plane;
  1164. vb->state = VB2_BUF_STATE_ACTIVE;
  1165. atomic_inc(&q->owned_by_drv_count);
  1166. trace_vb2_buf_queue(q, vb);
  1167. /* sync buffers */
  1168. for (plane = 0; plane < vb->num_planes; ++plane)
  1169. call_void_memop(vb, prepare, vb->planes[plane].mem_priv);
  1170. call_void_vb_qop(vb, buf_queue, vb);
  1171. }
  1172. static int __buf_prepare(struct vb2_buffer *vb, const void *pb)
  1173. {
  1174. struct vb2_queue *q = vb->vb2_queue;
  1175. int ret;
  1176. if (q->error) {
  1177. dprintk(1, "fatal error occurred on queue\n");
  1178. return -EIO;
  1179. }
  1180. vb->state = VB2_BUF_STATE_PREPARING;
  1181. switch (q->memory) {
  1182. case VB2_MEMORY_MMAP:
  1183. ret = __qbuf_mmap(vb, pb);
  1184. break;
  1185. case VB2_MEMORY_USERPTR:
  1186. ret = __qbuf_userptr(vb, pb);
  1187. break;
  1188. case VB2_MEMORY_DMABUF:
  1189. ret = __qbuf_dmabuf(vb, pb);
  1190. break;
  1191. default:
  1192. WARN(1, "Invalid queue type\n");
  1193. ret = -EINVAL;
  1194. }
  1195. if (ret)
  1196. dprintk(1, "buffer preparation failed: %d\n", ret);
  1197. vb->state = ret ? VB2_BUF_STATE_DEQUEUED : VB2_BUF_STATE_PREPARED;
  1198. return ret;
  1199. }
  1200. /**
  1201. * vb2_core_prepare_buf() - Pass ownership of a buffer from userspace
  1202. * to the kernel
  1203. * @q: videobuf2 queue
  1204. * @index: id number of the buffer
  1205. * @pb: buffer structure passed from userspace to vidioc_prepare_buf
  1206. * handler in driver
  1207. *
  1208. * Should be called from vidioc_prepare_buf ioctl handler of a driver.
  1209. * The passed buffer should have been verified.
  1210. * This function calls buf_prepare callback in the driver (if provided),
  1211. * in which driver-specific buffer initialization can be performed,
  1212. *
  1213. * The return values from this function are intended to be directly returned
  1214. * from vidioc_prepare_buf handler in driver.
  1215. */
  1216. int vb2_core_prepare_buf(struct vb2_queue *q, unsigned int index, void *pb)
  1217. {
  1218. struct vb2_buffer *vb;
  1219. int ret;
  1220. vb = q->bufs[index];
  1221. if (vb->state != VB2_BUF_STATE_DEQUEUED) {
  1222. dprintk(1, "invalid buffer state %d\n",
  1223. vb->state);
  1224. return -EINVAL;
  1225. }
  1226. ret = __buf_prepare(vb, pb);
  1227. if (ret)
  1228. return ret;
  1229. /* Fill buffer information for the userspace */
  1230. call_void_bufop(q, fill_user_buffer, vb, pb);
  1231. dprintk(1, "prepare of buffer %d succeeded\n", vb->index);
  1232. return ret;
  1233. }
  1234. EXPORT_SYMBOL_GPL(vb2_core_prepare_buf);
  1235. /**
  1236. * vb2_start_streaming() - Attempt to start streaming.
  1237. * @q: videobuf2 queue
  1238. *
  1239. * Attempt to start streaming. When this function is called there must be
  1240. * at least q->min_buffers_needed buffers queued up (i.e. the minimum
  1241. * number of buffers required for the DMA engine to function). If the
  1242. * @start_streaming op fails it is supposed to return all the driver-owned
  1243. * buffers back to vb2 in state QUEUED. Check if that happened and if
  1244. * not warn and reclaim them forcefully.
  1245. */
  1246. static int vb2_start_streaming(struct vb2_queue *q)
  1247. {
  1248. struct vb2_buffer *vb;
  1249. int ret;
  1250. /*
  1251. * If any buffers were queued before streamon,
  1252. * we can now pass them to driver for processing.
  1253. */
  1254. list_for_each_entry(vb, &q->queued_list, queued_entry)
  1255. __enqueue_in_driver(vb);
  1256. /* Tell the driver to start streaming */
  1257. q->start_streaming_called = 1;
  1258. ret = call_qop(q, start_streaming, q,
  1259. atomic_read(&q->owned_by_drv_count));
  1260. if (!ret)
  1261. return 0;
  1262. q->start_streaming_called = 0;
  1263. dprintk(1, "driver refused to start streaming\n");
  1264. /*
  1265. * If you see this warning, then the driver isn't cleaning up properly
  1266. * after a failed start_streaming(). See the start_streaming()
  1267. * documentation in videobuf2-core.h for more information how buffers
  1268. * should be returned to vb2 in start_streaming().
  1269. */
  1270. if (WARN_ON(atomic_read(&q->owned_by_drv_count))) {
  1271. unsigned i;
  1272. /*
  1273. * Forcefully reclaim buffers if the driver did not
  1274. * correctly return them to vb2.
  1275. */
  1276. for (i = 0; i < q->num_buffers; ++i) {
  1277. vb = q->bufs[i];
  1278. if (vb->state == VB2_BUF_STATE_ACTIVE)
  1279. vb2_buffer_done(vb, VB2_BUF_STATE_QUEUED);
  1280. }
  1281. /* Must be zero now */
  1282. WARN_ON(atomic_read(&q->owned_by_drv_count));
  1283. }
  1284. /*
  1285. * If done_list is not empty, then start_streaming() didn't call
  1286. * vb2_buffer_done(vb, VB2_BUF_STATE_QUEUED) but STATE_ERROR or
  1287. * STATE_DONE.
  1288. */
  1289. WARN_ON(!list_empty(&q->done_list));
  1290. return ret;
  1291. }
  1292. /**
  1293. * vb2_core_qbuf() - Queue a buffer from userspace
  1294. * @q: videobuf2 queue
  1295. * @index: id number of the buffer
  1296. * @pb: buffer structure passed from userspace to vidioc_qbuf handler
  1297. * in driver
  1298. *
  1299. * Should be called from vidioc_qbuf ioctl handler of a driver.
  1300. * The passed buffer should have been verified.
  1301. * This function:
  1302. * 1) if necessary, calls buf_prepare callback in the driver (if provided), in
  1303. * which driver-specific buffer initialization can be performed,
  1304. * 2) if streaming is on, queues the buffer in driver by the means of buf_queue
  1305. * callback for processing.
  1306. *
  1307. * The return values from this function are intended to be directly returned
  1308. * from vidioc_qbuf handler in driver.
  1309. */
  1310. int vb2_core_qbuf(struct vb2_queue *q, unsigned int index, void *pb)
  1311. {
  1312. struct vb2_buffer *vb;
  1313. int ret;
  1314. vb = q->bufs[index];
  1315. switch (vb->state) {
  1316. case VB2_BUF_STATE_DEQUEUED:
  1317. ret = __buf_prepare(vb, pb);
  1318. if (ret)
  1319. return ret;
  1320. break;
  1321. case VB2_BUF_STATE_PREPARED:
  1322. break;
  1323. case VB2_BUF_STATE_PREPARING:
  1324. dprintk(1, "buffer still being prepared\n");
  1325. return -EINVAL;
  1326. default:
  1327. dprintk(1, "invalid buffer state %d\n", vb->state);
  1328. return -EINVAL;
  1329. }
  1330. /*
  1331. * Add to the queued buffers list, a buffer will stay on it until
  1332. * dequeued in dqbuf.
  1333. */
  1334. list_add_tail(&vb->queued_entry, &q->queued_list);
  1335. q->queued_count++;
  1336. q->waiting_for_buffers = false;
  1337. vb->state = VB2_BUF_STATE_QUEUED;
  1338. if (pb)
  1339. call_void_bufop(q, copy_timestamp, vb, pb);
  1340. trace_vb2_qbuf(q, vb);
  1341. /*
  1342. * If already streaming, give the buffer to driver for processing.
  1343. * If not, the buffer will be given to driver on next streamon.
  1344. */
  1345. if (q->start_streaming_called)
  1346. __enqueue_in_driver(vb);
  1347. /* Fill buffer information for the userspace */
  1348. if (pb)
  1349. call_void_bufop(q, fill_user_buffer, vb, pb);
  1350. /*
  1351. * If streamon has been called, and we haven't yet called
  1352. * start_streaming() since not enough buffers were queued, and
  1353. * we now have reached the minimum number of queued buffers,
  1354. * then we can finally call start_streaming().
  1355. */
  1356. if (q->streaming && !q->start_streaming_called &&
  1357. q->queued_count >= q->min_buffers_needed) {
  1358. ret = vb2_start_streaming(q);
  1359. if (ret)
  1360. return ret;
  1361. }
  1362. dprintk(1, "qbuf of buffer %d succeeded\n", vb->index);
  1363. return 0;
  1364. }
  1365. EXPORT_SYMBOL_GPL(vb2_core_qbuf);
  1366. /**
  1367. * __vb2_wait_for_done_vb() - wait for a buffer to become available
  1368. * for dequeuing
  1369. *
  1370. * Will sleep if required for nonblocking == false.
  1371. */
  1372. static int __vb2_wait_for_done_vb(struct vb2_queue *q, int nonblocking)
  1373. {
  1374. /*
  1375. * All operations on vb_done_list are performed under done_lock
  1376. * spinlock protection. However, buffers may be removed from
  1377. * it and returned to userspace only while holding both driver's
  1378. * lock and the done_lock spinlock. Thus we can be sure that as
  1379. * long as we hold the driver's lock, the list will remain not
  1380. * empty if list_empty() check succeeds.
  1381. */
  1382. for (;;) {
  1383. int ret;
  1384. if (!q->streaming) {
  1385. dprintk(1, "streaming off, will not wait for buffers\n");
  1386. return -EINVAL;
  1387. }
  1388. if (q->error) {
  1389. dprintk(1, "Queue in error state, will not wait for buffers\n");
  1390. return -EIO;
  1391. }
  1392. if (q->last_buffer_dequeued) {
  1393. dprintk(3, "last buffer dequeued already, will not wait for buffers\n");
  1394. return -EPIPE;
  1395. }
  1396. if (!list_empty(&q->done_list)) {
  1397. /*
  1398. * Found a buffer that we were waiting for.
  1399. */
  1400. break;
  1401. }
  1402. if (nonblocking) {
  1403. dprintk(1, "nonblocking and no buffers to dequeue, "
  1404. "will not wait\n");
  1405. return -EAGAIN;
  1406. }
  1407. /*
  1408. * We are streaming and blocking, wait for another buffer to
  1409. * become ready or for streamoff. Driver's lock is released to
  1410. * allow streamoff or qbuf to be called while waiting.
  1411. */
  1412. call_void_qop(q, wait_prepare, q);
  1413. /*
  1414. * All locks have been released, it is safe to sleep now.
  1415. */
  1416. dprintk(3, "will sleep waiting for buffers\n");
  1417. ret = wait_event_interruptible(q->done_wq,
  1418. !list_empty(&q->done_list) || !q->streaming ||
  1419. q->error);
  1420. /*
  1421. * We need to reevaluate both conditions again after reacquiring
  1422. * the locks or return an error if one occurred.
  1423. */
  1424. call_void_qop(q, wait_finish, q);
  1425. if (ret) {
  1426. dprintk(1, "sleep was interrupted\n");
  1427. return ret;
  1428. }
  1429. }
  1430. return 0;
  1431. }
  1432. /**
  1433. * __vb2_get_done_vb() - get a buffer ready for dequeuing
  1434. *
  1435. * Will sleep if required for nonblocking == false.
  1436. */
  1437. static int __vb2_get_done_vb(struct vb2_queue *q, struct vb2_buffer **vb,
  1438. int nonblocking)
  1439. {
  1440. unsigned long flags;
  1441. int ret;
  1442. /*
  1443. * Wait for at least one buffer to become available on the done_list.
  1444. */
  1445. ret = __vb2_wait_for_done_vb(q, nonblocking);
  1446. if (ret)
  1447. return ret;
  1448. /*
  1449. * Driver's lock has been held since we last verified that done_list
  1450. * is not empty, so no need for another list_empty(done_list) check.
  1451. */
  1452. spin_lock_irqsave(&q->done_lock, flags);
  1453. *vb = list_first_entry(&q->done_list, struct vb2_buffer, done_entry);
  1454. /*
  1455. * Only remove the buffer from done_list if v4l2_buffer can handle all
  1456. * the planes.
  1457. * Verifying planes is NOT necessary since it already has been checked
  1458. * before the buffer is queued/prepared. So it can never fail.
  1459. */
  1460. list_del(&(*vb)->done_entry);
  1461. spin_unlock_irqrestore(&q->done_lock, flags);
  1462. return ret;
  1463. }
  1464. /**
  1465. * vb2_wait_for_all_buffers() - wait until all buffers are given back to vb2
  1466. * @q: videobuf2 queue
  1467. *
  1468. * This function will wait until all buffers that have been given to the driver
  1469. * by buf_queue() are given back to vb2 with vb2_buffer_done(). It doesn't call
  1470. * wait_prepare, wait_finish pair. It is intended to be called with all locks
  1471. * taken, for example from stop_streaming() callback.
  1472. */
  1473. int vb2_wait_for_all_buffers(struct vb2_queue *q)
  1474. {
  1475. if (!q->streaming) {
  1476. dprintk(1, "streaming off, will not wait for buffers\n");
  1477. return -EINVAL;
  1478. }
  1479. if (q->start_streaming_called)
  1480. wait_event(q->done_wq, !atomic_read(&q->owned_by_drv_count));
  1481. return 0;
  1482. }
  1483. EXPORT_SYMBOL_GPL(vb2_wait_for_all_buffers);
  1484. /**
  1485. * __vb2_dqbuf() - bring back the buffer to the DEQUEUED state
  1486. */
  1487. static void __vb2_dqbuf(struct vb2_buffer *vb)
  1488. {
  1489. struct vb2_queue *q = vb->vb2_queue;
  1490. unsigned int i;
  1491. /* nothing to do if the buffer is already dequeued */
  1492. if (vb->state == VB2_BUF_STATE_DEQUEUED)
  1493. return;
  1494. vb->state = VB2_BUF_STATE_DEQUEUED;
  1495. /* unmap DMABUF buffer */
  1496. if (q->memory == VB2_MEMORY_DMABUF)
  1497. for (i = 0; i < vb->num_planes; ++i) {
  1498. if (!vb->planes[i].dbuf_mapped)
  1499. continue;
  1500. call_void_memop(vb, unmap_dmabuf, vb->planes[i].mem_priv);
  1501. vb->planes[i].dbuf_mapped = 0;
  1502. }
  1503. }
  1504. /**
  1505. * vb2_dqbuf() - Dequeue a buffer to the userspace
  1506. * @q: videobuf2 queue
  1507. * @pb: buffer structure passed from userspace to vidioc_dqbuf handler
  1508. * in driver
  1509. * @nonblocking: if true, this call will not sleep waiting for a buffer if no
  1510. * buffers ready for dequeuing are present. Normally the driver
  1511. * would be passing (file->f_flags & O_NONBLOCK) here
  1512. *
  1513. * Should be called from vidioc_dqbuf ioctl handler of a driver.
  1514. * The passed buffer should have been verified.
  1515. * This function:
  1516. * 1) calls buf_finish callback in the driver (if provided), in which
  1517. * driver can perform any additional operations that may be required before
  1518. * returning the buffer to userspace, such as cache sync,
  1519. * 2) the buffer struct members are filled with relevant information for
  1520. * the userspace.
  1521. *
  1522. * The return values from this function are intended to be directly returned
  1523. * from vidioc_dqbuf handler in driver.
  1524. */
  1525. int vb2_core_dqbuf(struct vb2_queue *q, unsigned int *pindex, void *pb,
  1526. bool nonblocking)
  1527. {
  1528. struct vb2_buffer *vb = NULL;
  1529. int ret;
  1530. ret = __vb2_get_done_vb(q, &vb, nonblocking);
  1531. if (ret < 0)
  1532. return ret;
  1533. switch (vb->state) {
  1534. case VB2_BUF_STATE_DONE:
  1535. dprintk(3, "returning done buffer\n");
  1536. break;
  1537. case VB2_BUF_STATE_ERROR:
  1538. dprintk(3, "returning done buffer with errors\n");
  1539. break;
  1540. default:
  1541. dprintk(1, "invalid buffer state\n");
  1542. return -EINVAL;
  1543. }
  1544. call_void_vb_qop(vb, buf_finish, vb);
  1545. if (pindex)
  1546. *pindex = vb->index;
  1547. /* Fill buffer information for the userspace */
  1548. if (pb)
  1549. call_void_bufop(q, fill_user_buffer, vb, pb);
  1550. /* Remove from videobuf queue */
  1551. list_del(&vb->queued_entry);
  1552. q->queued_count--;
  1553. trace_vb2_dqbuf(q, vb);
  1554. /* go back to dequeued state */
  1555. __vb2_dqbuf(vb);
  1556. dprintk(1, "dqbuf of buffer %d, with state %d\n",
  1557. vb->index, vb->state);
  1558. return 0;
  1559. }
  1560. EXPORT_SYMBOL_GPL(vb2_core_dqbuf);
  1561. /**
  1562. * __vb2_queue_cancel() - cancel and stop (pause) streaming
  1563. *
  1564. * Removes all queued buffers from driver's queue and all buffers queued by
  1565. * userspace from videobuf's queue. Returns to state after reqbufs.
  1566. */
  1567. static void __vb2_queue_cancel(struct vb2_queue *q)
  1568. {
  1569. unsigned int i;
  1570. /*
  1571. * Tell driver to stop all transactions and release all queued
  1572. * buffers.
  1573. */
  1574. if (q->start_streaming_called)
  1575. call_void_qop(q, stop_streaming, q);
  1576. /*
  1577. * If you see this warning, then the driver isn't cleaning up properly
  1578. * in stop_streaming(). See the stop_streaming() documentation in
  1579. * videobuf2-core.h for more information how buffers should be returned
  1580. * to vb2 in stop_streaming().
  1581. */
  1582. if (WARN_ON(atomic_read(&q->owned_by_drv_count))) {
  1583. for (i = 0; i < q->num_buffers; ++i)
  1584. if (q->bufs[i]->state == VB2_BUF_STATE_ACTIVE)
  1585. vb2_buffer_done(q->bufs[i], VB2_BUF_STATE_ERROR);
  1586. /* Must be zero now */
  1587. WARN_ON(atomic_read(&q->owned_by_drv_count));
  1588. }
  1589. q->streaming = 0;
  1590. q->start_streaming_called = 0;
  1591. q->queued_count = 0;
  1592. q->error = 0;
  1593. /*
  1594. * Remove all buffers from videobuf's list...
  1595. */
  1596. INIT_LIST_HEAD(&q->queued_list);
  1597. /*
  1598. * ...and done list; userspace will not receive any buffers it
  1599. * has not already dequeued before initiating cancel.
  1600. */
  1601. INIT_LIST_HEAD(&q->done_list);
  1602. atomic_set(&q->owned_by_drv_count, 0);
  1603. wake_up_all(&q->done_wq);
  1604. /*
  1605. * Reinitialize all buffers for next use.
  1606. * Make sure to call buf_finish for any queued buffers. Normally
  1607. * that's done in dqbuf, but that's not going to happen when we
  1608. * cancel the whole queue. Note: this code belongs here, not in
  1609. * __vb2_dqbuf() since in vb2_internal_dqbuf() there is a critical
  1610. * call to __fill_user_buffer() after buf_finish(). That order can't
  1611. * be changed, so we can't move the buf_finish() to __vb2_dqbuf().
  1612. */
  1613. for (i = 0; i < q->num_buffers; ++i) {
  1614. struct vb2_buffer *vb = q->bufs[i];
  1615. if (vb->state != VB2_BUF_STATE_DEQUEUED) {
  1616. vb->state = VB2_BUF_STATE_PREPARED;
  1617. call_void_vb_qop(vb, buf_finish, vb);
  1618. }
  1619. __vb2_dqbuf(vb);
  1620. }
  1621. }
  1622. int vb2_core_streamon(struct vb2_queue *q, unsigned int type)
  1623. {
  1624. int ret;
  1625. if (type != q->type) {
  1626. dprintk(1, "invalid stream type\n");
  1627. return -EINVAL;
  1628. }
  1629. if (q->streaming) {
  1630. dprintk(3, "already streaming\n");
  1631. return 0;
  1632. }
  1633. if (!q->num_buffers) {
  1634. dprintk(1, "no buffers have been allocated\n");
  1635. return -EINVAL;
  1636. }
  1637. if (q->num_buffers < q->min_buffers_needed) {
  1638. dprintk(1, "need at least %u allocated buffers\n",
  1639. q->min_buffers_needed);
  1640. return -EINVAL;
  1641. }
  1642. /*
  1643. * Tell driver to start streaming provided sufficient buffers
  1644. * are available.
  1645. */
  1646. if (q->queued_count >= q->min_buffers_needed) {
  1647. ret = vb2_start_streaming(q);
  1648. if (ret) {
  1649. __vb2_queue_cancel(q);
  1650. return ret;
  1651. }
  1652. }
  1653. q->streaming = 1;
  1654. dprintk(3, "successful\n");
  1655. return 0;
  1656. }
  1657. EXPORT_SYMBOL_GPL(vb2_core_streamon);
  1658. /**
  1659. * vb2_queue_error() - signal a fatal error on the queue
  1660. * @q: videobuf2 queue
  1661. *
  1662. * Flag that a fatal unrecoverable error has occurred and wake up all processes
  1663. * waiting on the queue. Polling will now set POLLERR and queuing and dequeuing
  1664. * buffers will return -EIO.
  1665. *
  1666. * The error flag will be cleared when cancelling the queue, either from
  1667. * vb2_streamoff or vb2_queue_release. Drivers should thus not call this
  1668. * function before starting the stream, otherwise the error flag will remain set
  1669. * until the queue is released when closing the device node.
  1670. */
  1671. void vb2_queue_error(struct vb2_queue *q)
  1672. {
  1673. q->error = 1;
  1674. wake_up_all(&q->done_wq);
  1675. }
  1676. EXPORT_SYMBOL_GPL(vb2_queue_error);
  1677. int vb2_core_streamoff(struct vb2_queue *q, unsigned int type)
  1678. {
  1679. if (type != q->type) {
  1680. dprintk(1, "invalid stream type\n");
  1681. return -EINVAL;
  1682. }
  1683. /*
  1684. * Cancel will pause streaming and remove all buffers from the driver
  1685. * and videobuf, effectively returning control over them to userspace.
  1686. *
  1687. * Note that we do this even if q->streaming == 0: if you prepare or
  1688. * queue buffers, and then call streamoff without ever having called
  1689. * streamon, you would still expect those buffers to be returned to
  1690. * their normal dequeued state.
  1691. */
  1692. __vb2_queue_cancel(q);
  1693. q->waiting_for_buffers = !q->is_output;
  1694. q->last_buffer_dequeued = false;
  1695. dprintk(3, "successful\n");
  1696. return 0;
  1697. }
  1698. EXPORT_SYMBOL_GPL(vb2_core_streamoff);
  1699. /**
  1700. * __find_plane_by_offset() - find plane associated with the given offset off
  1701. */
  1702. static int __find_plane_by_offset(struct vb2_queue *q, unsigned long off,
  1703. unsigned int *_buffer, unsigned int *_plane)
  1704. {
  1705. struct vb2_buffer *vb;
  1706. unsigned int buffer, plane;
  1707. /*
  1708. * Go over all buffers and their planes, comparing the given offset
  1709. * with an offset assigned to each plane. If a match is found,
  1710. * return its buffer and plane numbers.
  1711. */
  1712. for (buffer = 0; buffer < q->num_buffers; ++buffer) {
  1713. vb = q->bufs[buffer];
  1714. for (plane = 0; plane < vb->num_planes; ++plane) {
  1715. if (vb->planes[plane].m.offset == off) {
  1716. *_buffer = buffer;
  1717. *_plane = plane;
  1718. return 0;
  1719. }
  1720. }
  1721. }
  1722. return -EINVAL;
  1723. }
  1724. /**
  1725. * vb2_core_expbuf() - Export a buffer as a file descriptor
  1726. * @q: videobuf2 queue
  1727. * @fd: file descriptor associated with DMABUF (set by driver) *
  1728. * @type: buffer type
  1729. * @index: id number of the buffer
  1730. * @plane: index of the plane to be exported, 0 for single plane queues
  1731. * @flags: flags for newly created file, currently only O_CLOEXEC is
  1732. * supported, refer to manual of open syscall for more details
  1733. *
  1734. * The return values from this function are intended to be directly returned
  1735. * from vidioc_expbuf handler in driver.
  1736. */
  1737. int vb2_core_expbuf(struct vb2_queue *q, int *fd, unsigned int type,
  1738. unsigned int index, unsigned int plane, unsigned int flags)
  1739. {
  1740. struct vb2_buffer *vb = NULL;
  1741. struct vb2_plane *vb_plane;
  1742. int ret;
  1743. struct dma_buf *dbuf;
  1744. if (q->memory != VB2_MEMORY_MMAP) {
  1745. dprintk(1, "queue is not currently set up for mmap\n");
  1746. return -EINVAL;
  1747. }
  1748. if (!q->mem_ops->get_dmabuf) {
  1749. dprintk(1, "queue does not support DMA buffer exporting\n");
  1750. return -EINVAL;
  1751. }
  1752. if (flags & ~(O_CLOEXEC | O_ACCMODE)) {
  1753. dprintk(1, "queue does support only O_CLOEXEC and access mode flags\n");
  1754. return -EINVAL;
  1755. }
  1756. if (type != q->type) {
  1757. dprintk(1, "invalid buffer type\n");
  1758. return -EINVAL;
  1759. }
  1760. if (index >= q->num_buffers) {
  1761. dprintk(1, "buffer index out of range\n");
  1762. return -EINVAL;
  1763. }
  1764. vb = q->bufs[index];
  1765. if (plane >= vb->num_planes) {
  1766. dprintk(1, "buffer plane out of range\n");
  1767. return -EINVAL;
  1768. }
  1769. if (vb2_fileio_is_active(q)) {
  1770. dprintk(1, "expbuf: file io in progress\n");
  1771. return -EBUSY;
  1772. }
  1773. vb_plane = &vb->planes[plane];
  1774. dbuf = call_ptr_memop(vb, get_dmabuf, vb_plane->mem_priv,
  1775. flags & O_ACCMODE);
  1776. if (IS_ERR_OR_NULL(dbuf)) {
  1777. dprintk(1, "failed to export buffer %d, plane %d\n",
  1778. index, plane);
  1779. return -EINVAL;
  1780. }
  1781. ret = dma_buf_fd(dbuf, flags & ~O_ACCMODE);
  1782. if (ret < 0) {
  1783. dprintk(3, "buffer %d, plane %d failed to export (%d)\n",
  1784. index, plane, ret);
  1785. dma_buf_put(dbuf);
  1786. return ret;
  1787. }
  1788. dprintk(3, "buffer %d, plane %d exported as %d descriptor\n",
  1789. index, plane, ret);
  1790. *fd = ret;
  1791. return 0;
  1792. }
  1793. EXPORT_SYMBOL_GPL(vb2_core_expbuf);
  1794. /**
  1795. * vb2_mmap() - map video buffers into application address space
  1796. * @q: videobuf2 queue
  1797. * @vma: vma passed to the mmap file operation handler in the driver
  1798. *
  1799. * Should be called from mmap file operation handler of a driver.
  1800. * This function maps one plane of one of the available video buffers to
  1801. * userspace. To map whole video memory allocated on reqbufs, this function
  1802. * has to be called once per each plane per each buffer previously allocated.
  1803. *
  1804. * When the userspace application calls mmap, it passes to it an offset returned
  1805. * to it earlier by the means of vidioc_querybuf handler. That offset acts as
  1806. * a "cookie", which is then used to identify the plane to be mapped.
  1807. * This function finds a plane with a matching offset and a mapping is performed
  1808. * by the means of a provided memory operation.
  1809. *
  1810. * The return values from this function are intended to be directly returned
  1811. * from the mmap handler in driver.
  1812. */
  1813. int vb2_mmap(struct vb2_queue *q, struct vm_area_struct *vma)
  1814. {
  1815. unsigned long off = vma->vm_pgoff << PAGE_SHIFT;
  1816. struct vb2_buffer *vb;
  1817. unsigned int buffer = 0, plane = 0;
  1818. int ret;
  1819. unsigned long length;
  1820. if (q->memory != VB2_MEMORY_MMAP) {
  1821. dprintk(1, "queue is not currently set up for mmap\n");
  1822. return -EINVAL;
  1823. }
  1824. /*
  1825. * Check memory area access mode.
  1826. */
  1827. if (!(vma->vm_flags & VM_SHARED)) {
  1828. dprintk(1, "invalid vma flags, VM_SHARED needed\n");
  1829. return -EINVAL;
  1830. }
  1831. if (q->is_output) {
  1832. if (!(vma->vm_flags & VM_WRITE)) {
  1833. dprintk(1, "invalid vma flags, VM_WRITE needed\n");
  1834. return -EINVAL;
  1835. }
  1836. } else {
  1837. if (!(vma->vm_flags & VM_READ)) {
  1838. dprintk(1, "invalid vma flags, VM_READ needed\n");
  1839. return -EINVAL;
  1840. }
  1841. }
  1842. if (vb2_fileio_is_active(q)) {
  1843. dprintk(1, "mmap: file io in progress\n");
  1844. return -EBUSY;
  1845. }
  1846. /*
  1847. * Find the plane corresponding to the offset passed by userspace.
  1848. */
  1849. ret = __find_plane_by_offset(q, off, &buffer, &plane);
  1850. if (ret)
  1851. return ret;
  1852. vb = q->bufs[buffer];
  1853. /*
  1854. * MMAP requires page_aligned buffers.
  1855. * The buffer length was page_aligned at __vb2_buf_mem_alloc(),
  1856. * so, we need to do the same here.
  1857. */
  1858. length = PAGE_ALIGN(vb->planes[plane].length);
  1859. if (length < (vma->vm_end - vma->vm_start)) {
  1860. dprintk(1,
  1861. "MMAP invalid, as it would overflow buffer length\n");
  1862. return -EINVAL;
  1863. }
  1864. mutex_lock(&q->mmap_lock);
  1865. ret = call_memop(vb, mmap, vb->planes[plane].mem_priv, vma);
  1866. mutex_unlock(&q->mmap_lock);
  1867. if (ret)
  1868. return ret;
  1869. dprintk(3, "buffer %d, plane %d successfully mapped\n", buffer, plane);
  1870. return 0;
  1871. }
  1872. EXPORT_SYMBOL_GPL(vb2_mmap);
  1873. #ifndef CONFIG_MMU
  1874. unsigned long vb2_get_unmapped_area(struct vb2_queue *q,
  1875. unsigned long addr,
  1876. unsigned long len,
  1877. unsigned long pgoff,
  1878. unsigned long flags)
  1879. {
  1880. unsigned long off = pgoff << PAGE_SHIFT;
  1881. struct vb2_buffer *vb;
  1882. unsigned int buffer, plane;
  1883. void *vaddr;
  1884. int ret;
  1885. if (q->memory != VB2_MEMORY_MMAP) {
  1886. dprintk(1, "queue is not currently set up for mmap\n");
  1887. return -EINVAL;
  1888. }
  1889. /*
  1890. * Find the plane corresponding to the offset passed by userspace.
  1891. */
  1892. ret = __find_plane_by_offset(q, off, &buffer, &plane);
  1893. if (ret)
  1894. return ret;
  1895. vb = q->bufs[buffer];
  1896. vaddr = vb2_plane_vaddr(vb, plane);
  1897. return vaddr ? (unsigned long)vaddr : -EINVAL;
  1898. }
  1899. EXPORT_SYMBOL_GPL(vb2_get_unmapped_area);
  1900. #endif
  1901. /**
  1902. * vb2_core_queue_init() - initialize a videobuf2 queue
  1903. * @q: videobuf2 queue; this structure should be allocated in driver
  1904. *
  1905. * The vb2_queue structure should be allocated by the driver. The driver is
  1906. * responsible of clearing it's content and setting initial values for some
  1907. * required entries before calling this function.
  1908. * q->ops, q->mem_ops, q->type and q->io_modes are mandatory. Please refer
  1909. * to the struct vb2_queue description in include/media/videobuf2-core.h
  1910. * for more information.
  1911. */
  1912. int vb2_core_queue_init(struct vb2_queue *q)
  1913. {
  1914. /*
  1915. * Sanity check
  1916. */
  1917. if (WARN_ON(!q) ||
  1918. WARN_ON(!q->ops) ||
  1919. WARN_ON(!q->mem_ops) ||
  1920. WARN_ON(!q->type) ||
  1921. WARN_ON(!q->io_modes) ||
  1922. WARN_ON(!q->ops->queue_setup) ||
  1923. WARN_ON(!q->ops->buf_queue))
  1924. return -EINVAL;
  1925. INIT_LIST_HEAD(&q->queued_list);
  1926. INIT_LIST_HEAD(&q->done_list);
  1927. spin_lock_init(&q->done_lock);
  1928. mutex_init(&q->mmap_lock);
  1929. init_waitqueue_head(&q->done_wq);
  1930. if (q->buf_struct_size == 0)
  1931. q->buf_struct_size = sizeof(struct vb2_buffer);
  1932. return 0;
  1933. }
  1934. EXPORT_SYMBOL_GPL(vb2_core_queue_init);
  1935. static int __vb2_init_fileio(struct vb2_queue *q, int read);
  1936. static int __vb2_cleanup_fileio(struct vb2_queue *q);
  1937. /**
  1938. * vb2_core_queue_release() - stop streaming, release the queue and free memory
  1939. * @q: videobuf2 queue
  1940. *
  1941. * This function stops streaming and performs necessary clean ups, including
  1942. * freeing video buffer memory. The driver is responsible for freeing
  1943. * the vb2_queue structure itself.
  1944. */
  1945. void vb2_core_queue_release(struct vb2_queue *q)
  1946. {
  1947. __vb2_cleanup_fileio(q);
  1948. __vb2_queue_cancel(q);
  1949. mutex_lock(&q->mmap_lock);
  1950. __vb2_queue_free(q, q->num_buffers);
  1951. mutex_unlock(&q->mmap_lock);
  1952. }
  1953. EXPORT_SYMBOL_GPL(vb2_core_queue_release);
  1954. /**
  1955. * vb2_core_poll() - implements poll userspace operation
  1956. * @q: videobuf2 queue
  1957. * @file: file argument passed to the poll file operation handler
  1958. * @wait: wait argument passed to the poll file operation handler
  1959. *
  1960. * This function implements poll file operation handler for a driver.
  1961. * For CAPTURE queues, if a buffer is ready to be dequeued, the userspace will
  1962. * be informed that the file descriptor of a video device is available for
  1963. * reading.
  1964. * For OUTPUT queues, if a buffer is ready to be dequeued, the file descriptor
  1965. * will be reported as available for writing.
  1966. *
  1967. * The return values from this function are intended to be directly returned
  1968. * from poll handler in driver.
  1969. */
  1970. unsigned int vb2_core_poll(struct vb2_queue *q, struct file *file,
  1971. poll_table *wait)
  1972. {
  1973. unsigned long req_events = poll_requested_events(wait);
  1974. struct vb2_buffer *vb = NULL;
  1975. unsigned long flags;
  1976. if (!q->is_output && !(req_events & (POLLIN | POLLRDNORM)))
  1977. return 0;
  1978. if (q->is_output && !(req_events & (POLLOUT | POLLWRNORM)))
  1979. return 0;
  1980. /*
  1981. * Start file I/O emulator only if streaming API has not been used yet.
  1982. */
  1983. if (q->num_buffers == 0 && !vb2_fileio_is_active(q)) {
  1984. if (!q->is_output && (q->io_modes & VB2_READ) &&
  1985. (req_events & (POLLIN | POLLRDNORM))) {
  1986. if (__vb2_init_fileio(q, 1))
  1987. return POLLERR;
  1988. }
  1989. if (q->is_output && (q->io_modes & VB2_WRITE) &&
  1990. (req_events & (POLLOUT | POLLWRNORM))) {
  1991. if (__vb2_init_fileio(q, 0))
  1992. return POLLERR;
  1993. /*
  1994. * Write to OUTPUT queue can be done immediately.
  1995. */
  1996. return POLLOUT | POLLWRNORM;
  1997. }
  1998. }
  1999. /*
  2000. * There is nothing to wait for if the queue isn't streaming, or if the
  2001. * error flag is set.
  2002. */
  2003. if (!vb2_is_streaming(q) || q->error)
  2004. return POLLERR;
  2005. /*
  2006. * For output streams you can call write() as long as there are fewer
  2007. * buffers queued than there are buffers available.
  2008. */
  2009. if (q->is_output && q->fileio && q->queued_count < q->num_buffers)
  2010. return POLLOUT | POLLWRNORM;
  2011. if (list_empty(&q->done_list)) {
  2012. /*
  2013. * If the last buffer was dequeued from a capture queue,
  2014. * return immediately. DQBUF will return -EPIPE.
  2015. */
  2016. if (q->last_buffer_dequeued)
  2017. return POLLIN | POLLRDNORM;
  2018. poll_wait(file, &q->done_wq, wait);
  2019. }
  2020. /*
  2021. * Take first buffer available for dequeuing.
  2022. */
  2023. spin_lock_irqsave(&q->done_lock, flags);
  2024. if (!list_empty(&q->done_list))
  2025. vb = list_first_entry(&q->done_list, struct vb2_buffer,
  2026. done_entry);
  2027. spin_unlock_irqrestore(&q->done_lock, flags);
  2028. if (vb && (vb->state == VB2_BUF_STATE_DONE
  2029. || vb->state == VB2_BUF_STATE_ERROR)) {
  2030. return (q->is_output) ?
  2031. POLLOUT | POLLWRNORM :
  2032. POLLIN | POLLRDNORM;
  2033. }
  2034. return 0;
  2035. }
  2036. EXPORT_SYMBOL_GPL(vb2_core_poll);
  2037. /**
  2038. * struct vb2_fileio_buf - buffer context used by file io emulator
  2039. *
  2040. * vb2 provides a compatibility layer and emulator of file io (read and
  2041. * write) calls on top of streaming API. This structure is used for
  2042. * tracking context related to the buffers.
  2043. */
  2044. struct vb2_fileio_buf {
  2045. void *vaddr;
  2046. unsigned int size;
  2047. unsigned int pos;
  2048. unsigned int queued:1;
  2049. };
  2050. /**
  2051. * struct vb2_fileio_data - queue context used by file io emulator
  2052. *
  2053. * @cur_index: the index of the buffer currently being read from or
  2054. * written to. If equal to q->num_buffers then a new buffer
  2055. * must be dequeued.
  2056. * @initial_index: in the read() case all buffers are queued up immediately
  2057. * in __vb2_init_fileio() and __vb2_perform_fileio() just cycles
  2058. * buffers. However, in the write() case no buffers are initially
  2059. * queued, instead whenever a buffer is full it is queued up by
  2060. * __vb2_perform_fileio(). Only once all available buffers have
  2061. * been queued up will __vb2_perform_fileio() start to dequeue
  2062. * buffers. This means that initially __vb2_perform_fileio()
  2063. * needs to know what buffer index to use when it is queuing up
  2064. * the buffers for the first time. That initial index is stored
  2065. * in this field. Once it is equal to q->num_buffers all
  2066. * available buffers have been queued and __vb2_perform_fileio()
  2067. * should start the normal dequeue/queue cycle.
  2068. *
  2069. * vb2 provides a compatibility layer and emulator of file io (read and
  2070. * write) calls on top of streaming API. For proper operation it required
  2071. * this structure to save the driver state between each call of the read
  2072. * or write function.
  2073. */
  2074. struct vb2_fileio_data {
  2075. unsigned int count;
  2076. unsigned int type;
  2077. unsigned int memory;
  2078. struct vb2_fileio_buf bufs[VB2_MAX_FRAME];
  2079. unsigned int cur_index;
  2080. unsigned int initial_index;
  2081. unsigned int q_count;
  2082. unsigned int dq_count;
  2083. unsigned read_once:1;
  2084. unsigned write_immediately:1;
  2085. };
  2086. /**
  2087. * __vb2_init_fileio() - initialize file io emulator
  2088. * @q: videobuf2 queue
  2089. * @read: mode selector (1 means read, 0 means write)
  2090. */
  2091. static int __vb2_init_fileio(struct vb2_queue *q, int read)
  2092. {
  2093. struct vb2_fileio_data *fileio;
  2094. int i, ret;
  2095. unsigned int count = 0;
  2096. /*
  2097. * Sanity check
  2098. */
  2099. if (WARN_ON((read && !(q->io_modes & VB2_READ)) ||
  2100. (!read && !(q->io_modes & VB2_WRITE))))
  2101. return -EINVAL;
  2102. /*
  2103. * Check if device supports mapping buffers to kernel virtual space.
  2104. */
  2105. if (!q->mem_ops->vaddr)
  2106. return -EBUSY;
  2107. /*
  2108. * Check if streaming api has not been already activated.
  2109. */
  2110. if (q->streaming || q->num_buffers > 0)
  2111. return -EBUSY;
  2112. /*
  2113. * Start with count 1, driver can increase it in queue_setup()
  2114. */
  2115. count = 1;
  2116. dprintk(3, "setting up file io: mode %s, count %d, read_once %d, write_immediately %d\n",
  2117. (read) ? "read" : "write", count, q->fileio_read_once,
  2118. q->fileio_write_immediately);
  2119. fileio = kzalloc(sizeof(*fileio), GFP_KERNEL);
  2120. if (fileio == NULL)
  2121. return -ENOMEM;
  2122. fileio->read_once = q->fileio_read_once;
  2123. fileio->write_immediately = q->fileio_write_immediately;
  2124. /*
  2125. * Request buffers and use MMAP type to force driver
  2126. * to allocate buffers by itself.
  2127. */
  2128. fileio->count = count;
  2129. fileio->memory = VB2_MEMORY_MMAP;
  2130. fileio->type = q->type;
  2131. q->fileio = fileio;
  2132. ret = vb2_core_reqbufs(q, fileio->memory, &fileio->count);
  2133. if (ret)
  2134. goto err_kfree;
  2135. /*
  2136. * Check if plane_count is correct
  2137. * (multiplane buffers are not supported).
  2138. */
  2139. if (q->bufs[0]->num_planes != 1) {
  2140. ret = -EBUSY;
  2141. goto err_reqbufs;
  2142. }
  2143. /*
  2144. * Get kernel address of each buffer.
  2145. */
  2146. for (i = 0; i < q->num_buffers; i++) {
  2147. fileio->bufs[i].vaddr = vb2_plane_vaddr(q->bufs[i], 0);
  2148. if (fileio->bufs[i].vaddr == NULL) {
  2149. ret = -EINVAL;
  2150. goto err_reqbufs;
  2151. }
  2152. fileio->bufs[i].size = vb2_plane_size(q->bufs[i], 0);
  2153. }
  2154. /*
  2155. * Read mode requires pre queuing of all buffers.
  2156. */
  2157. if (read) {
  2158. /*
  2159. * Queue all buffers.
  2160. */
  2161. for (i = 0; i < q->num_buffers; i++) {
  2162. ret = vb2_core_qbuf(q, i, NULL);
  2163. if (ret)
  2164. goto err_reqbufs;
  2165. fileio->bufs[i].queued = 1;
  2166. }
  2167. /*
  2168. * All buffers have been queued, so mark that by setting
  2169. * initial_index to q->num_buffers
  2170. */
  2171. fileio->initial_index = q->num_buffers;
  2172. fileio->cur_index = q->num_buffers;
  2173. }
  2174. /*
  2175. * Start streaming.
  2176. */
  2177. ret = vb2_core_streamon(q, q->type);
  2178. if (ret)
  2179. goto err_reqbufs;
  2180. return ret;
  2181. err_reqbufs:
  2182. fileio->count = 0;
  2183. vb2_core_reqbufs(q, fileio->memory, &fileio->count);
  2184. err_kfree:
  2185. q->fileio = NULL;
  2186. kfree(fileio);
  2187. return ret;
  2188. }
  2189. /**
  2190. * __vb2_cleanup_fileio() - free resourced used by file io emulator
  2191. * @q: videobuf2 queue
  2192. */
  2193. static int __vb2_cleanup_fileio(struct vb2_queue *q)
  2194. {
  2195. struct vb2_fileio_data *fileio = q->fileio;
  2196. if (fileio) {
  2197. vb2_core_streamoff(q, q->type);
  2198. q->fileio = NULL;
  2199. fileio->count = 0;
  2200. vb2_core_reqbufs(q, fileio->memory, &fileio->count);
  2201. kfree(fileio);
  2202. dprintk(3, "file io emulator closed\n");
  2203. }
  2204. return 0;
  2205. }
  2206. /**
  2207. * __vb2_perform_fileio() - perform a single file io (read or write) operation
  2208. * @q: videobuf2 queue
  2209. * @data: pointed to target userspace buffer
  2210. * @count: number of bytes to read or write
  2211. * @ppos: file handle position tracking pointer
  2212. * @nonblock: mode selector (1 means blocking calls, 0 means nonblocking)
  2213. * @read: access mode selector (1 means read, 0 means write)
  2214. */
  2215. static size_t __vb2_perform_fileio(struct vb2_queue *q, char __user *data, size_t count,
  2216. loff_t *ppos, int nonblock, int read)
  2217. {
  2218. struct vb2_fileio_data *fileio;
  2219. struct vb2_fileio_buf *buf;
  2220. bool is_multiplanar = q->is_multiplanar;
  2221. /*
  2222. * When using write() to write data to an output video node the vb2 core
  2223. * should copy timestamps if V4L2_BUF_FLAG_TIMESTAMP_COPY is set. Nobody
  2224. * else is able to provide this information with the write() operation.
  2225. */
  2226. bool copy_timestamp = !read && q->copy_timestamp;
  2227. unsigned index;
  2228. int ret;
  2229. dprintk(3, "mode %s, offset %ld, count %zd, %sblocking\n",
  2230. read ? "read" : "write", (long)*ppos, count,
  2231. nonblock ? "non" : "");
  2232. if (!data)
  2233. return -EINVAL;
  2234. /*
  2235. * Initialize emulator on first call.
  2236. */
  2237. if (!vb2_fileio_is_active(q)) {
  2238. ret = __vb2_init_fileio(q, read);
  2239. dprintk(3, "vb2_init_fileio result: %d\n", ret);
  2240. if (ret)
  2241. return ret;
  2242. }
  2243. fileio = q->fileio;
  2244. /*
  2245. * Check if we need to dequeue the buffer.
  2246. */
  2247. index = fileio->cur_index;
  2248. if (index >= q->num_buffers) {
  2249. struct vb2_buffer *b;
  2250. /*
  2251. * Call vb2_dqbuf to get buffer back.
  2252. */
  2253. ret = vb2_core_dqbuf(q, &index, NULL, nonblock);
  2254. dprintk(5, "vb2_dqbuf result: %d\n", ret);
  2255. if (ret)
  2256. return ret;
  2257. fileio->dq_count += 1;
  2258. fileio->cur_index = index;
  2259. buf = &fileio->bufs[index];
  2260. b = q->bufs[index];
  2261. /*
  2262. * Get number of bytes filled by the driver
  2263. */
  2264. buf->pos = 0;
  2265. buf->queued = 0;
  2266. buf->size = read ? vb2_get_plane_payload(q->bufs[index], 0)
  2267. : vb2_plane_size(q->bufs[index], 0);
  2268. /* Compensate for data_offset on read in the multiplanar case. */
  2269. if (is_multiplanar && read &&
  2270. b->planes[0].data_offset < buf->size) {
  2271. buf->pos = b->planes[0].data_offset;
  2272. buf->size -= buf->pos;
  2273. }
  2274. } else {
  2275. buf = &fileio->bufs[index];
  2276. }
  2277. /*
  2278. * Limit count on last few bytes of the buffer.
  2279. */
  2280. if (buf->pos + count > buf->size) {
  2281. count = buf->size - buf->pos;
  2282. dprintk(5, "reducing read count: %zd\n", count);
  2283. }
  2284. /*
  2285. * Transfer data to userspace.
  2286. */
  2287. dprintk(3, "copying %zd bytes - buffer %d, offset %u\n",
  2288. count, index, buf->pos);
  2289. if (read)
  2290. ret = copy_to_user(data, buf->vaddr + buf->pos, count);
  2291. else
  2292. ret = copy_from_user(buf->vaddr + buf->pos, data, count);
  2293. if (ret) {
  2294. dprintk(3, "error copying data\n");
  2295. return -EFAULT;
  2296. }
  2297. /*
  2298. * Update counters.
  2299. */
  2300. buf->pos += count;
  2301. *ppos += count;
  2302. /*
  2303. * Queue next buffer if required.
  2304. */
  2305. if (buf->pos == buf->size || (!read && fileio->write_immediately)) {
  2306. struct vb2_buffer *b = q->bufs[index];
  2307. /*
  2308. * Check if this is the last buffer to read.
  2309. */
  2310. if (read && fileio->read_once && fileio->dq_count == 1) {
  2311. dprintk(3, "read limit reached\n");
  2312. return __vb2_cleanup_fileio(q);
  2313. }
  2314. /*
  2315. * Call vb2_qbuf and give buffer to the driver.
  2316. */
  2317. b->planes[0].bytesused = buf->pos;
  2318. if (copy_timestamp)
  2319. b->timestamp = ktime_get_ns();
  2320. ret = vb2_core_qbuf(q, index, NULL);
  2321. dprintk(5, "vb2_dbuf result: %d\n", ret);
  2322. if (ret)
  2323. return ret;
  2324. /*
  2325. * Buffer has been queued, update the status
  2326. */
  2327. buf->pos = 0;
  2328. buf->queued = 1;
  2329. buf->size = vb2_plane_size(q->bufs[index], 0);
  2330. fileio->q_count += 1;
  2331. /*
  2332. * If we are queuing up buffers for the first time, then
  2333. * increase initial_index by one.
  2334. */
  2335. if (fileio->initial_index < q->num_buffers)
  2336. fileio->initial_index++;
  2337. /*
  2338. * The next buffer to use is either a buffer that's going to be
  2339. * queued for the first time (initial_index < q->num_buffers)
  2340. * or it is equal to q->num_buffers, meaning that the next
  2341. * time we need to dequeue a buffer since we've now queued up
  2342. * all the 'first time' buffers.
  2343. */
  2344. fileio->cur_index = fileio->initial_index;
  2345. }
  2346. /*
  2347. * Return proper number of bytes processed.
  2348. */
  2349. if (ret == 0)
  2350. ret = count;
  2351. return ret;
  2352. }
  2353. size_t vb2_read(struct vb2_queue *q, char __user *data, size_t count,
  2354. loff_t *ppos, int nonblocking)
  2355. {
  2356. return __vb2_perform_fileio(q, data, count, ppos, nonblocking, 1);
  2357. }
  2358. EXPORT_SYMBOL_GPL(vb2_read);
  2359. size_t vb2_write(struct vb2_queue *q, const char __user *data, size_t count,
  2360. loff_t *ppos, int nonblocking)
  2361. {
  2362. return __vb2_perform_fileio(q, (char __user *) data, count,
  2363. ppos, nonblocking, 0);
  2364. }
  2365. EXPORT_SYMBOL_GPL(vb2_write);
  2366. struct vb2_threadio_data {
  2367. struct task_struct *thread;
  2368. vb2_thread_fnc fnc;
  2369. void *priv;
  2370. bool stop;
  2371. };
  2372. static int vb2_thread(void *data)
  2373. {
  2374. struct vb2_queue *q = data;
  2375. struct vb2_threadio_data *threadio = q->threadio;
  2376. bool copy_timestamp = false;
  2377. unsigned prequeue = 0;
  2378. unsigned index = 0;
  2379. int ret = 0;
  2380. if (q->is_output) {
  2381. prequeue = q->num_buffers;
  2382. copy_timestamp = q->copy_timestamp;
  2383. }
  2384. set_freezable();
  2385. for (;;) {
  2386. struct vb2_buffer *vb;
  2387. /*
  2388. * Call vb2_dqbuf to get buffer back.
  2389. */
  2390. if (prequeue) {
  2391. vb = q->bufs[index++];
  2392. prequeue--;
  2393. } else {
  2394. call_void_qop(q, wait_finish, q);
  2395. if (!threadio->stop)
  2396. ret = vb2_core_dqbuf(q, &index, NULL, 0);
  2397. call_void_qop(q, wait_prepare, q);
  2398. dprintk(5, "file io: vb2_dqbuf result: %d\n", ret);
  2399. if (!ret)
  2400. vb = q->bufs[index];
  2401. }
  2402. if (ret || threadio->stop)
  2403. break;
  2404. try_to_freeze();
  2405. if (vb->state != VB2_BUF_STATE_ERROR)
  2406. if (threadio->fnc(vb, threadio->priv))
  2407. break;
  2408. call_void_qop(q, wait_finish, q);
  2409. if (copy_timestamp)
  2410. vb->timestamp = ktime_get_ns();;
  2411. if (!threadio->stop)
  2412. ret = vb2_core_qbuf(q, vb->index, NULL);
  2413. call_void_qop(q, wait_prepare, q);
  2414. if (ret || threadio->stop)
  2415. break;
  2416. }
  2417. /* Hmm, linux becomes *very* unhappy without this ... */
  2418. while (!kthread_should_stop()) {
  2419. set_current_state(TASK_INTERRUPTIBLE);
  2420. schedule();
  2421. }
  2422. return 0;
  2423. }
  2424. /*
  2425. * This function should not be used for anything else but the videobuf2-dvb
  2426. * support. If you think you have another good use-case for this, then please
  2427. * contact the linux-media mailinglist first.
  2428. */
  2429. int vb2_thread_start(struct vb2_queue *q, vb2_thread_fnc fnc, void *priv,
  2430. const char *thread_name)
  2431. {
  2432. struct vb2_threadio_data *threadio;
  2433. int ret = 0;
  2434. if (q->threadio)
  2435. return -EBUSY;
  2436. if (vb2_is_busy(q))
  2437. return -EBUSY;
  2438. if (WARN_ON(q->fileio))
  2439. return -EBUSY;
  2440. threadio = kzalloc(sizeof(*threadio), GFP_KERNEL);
  2441. if (threadio == NULL)
  2442. return -ENOMEM;
  2443. threadio->fnc = fnc;
  2444. threadio->priv = priv;
  2445. ret = __vb2_init_fileio(q, !q->is_output);
  2446. dprintk(3, "file io: vb2_init_fileio result: %d\n", ret);
  2447. if (ret)
  2448. goto nomem;
  2449. q->threadio = threadio;
  2450. threadio->thread = kthread_run(vb2_thread, q, "vb2-%s", thread_name);
  2451. if (IS_ERR(threadio->thread)) {
  2452. ret = PTR_ERR(threadio->thread);
  2453. threadio->thread = NULL;
  2454. goto nothread;
  2455. }
  2456. return 0;
  2457. nothread:
  2458. __vb2_cleanup_fileio(q);
  2459. nomem:
  2460. kfree(threadio);
  2461. return ret;
  2462. }
  2463. EXPORT_SYMBOL_GPL(vb2_thread_start);
  2464. int vb2_thread_stop(struct vb2_queue *q)
  2465. {
  2466. struct vb2_threadio_data *threadio = q->threadio;
  2467. int err;
  2468. if (threadio == NULL)
  2469. return 0;
  2470. threadio->stop = true;
  2471. /* Wake up all pending sleeps in the thread */
  2472. vb2_queue_error(q);
  2473. err = kthread_stop(threadio->thread);
  2474. __vb2_cleanup_fileio(q);
  2475. threadio->thread = NULL;
  2476. kfree(threadio);
  2477. q->threadio = NULL;
  2478. return err;
  2479. }
  2480. EXPORT_SYMBOL_GPL(vb2_thread_stop);
  2481. MODULE_DESCRIPTION("Media buffer core framework");
  2482. MODULE_AUTHOR("Pawel Osciak <pawel@osciak.com>, Marek Szyprowski");
  2483. MODULE_LICENSE("GPL");