videobuf2-core.c 67 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666
  1. /*
  2. * videobuf2-core.c - video buffer 2 core framework
  3. *
  4. * Copyright (C) 2010 Samsung Electronics
  5. *
  6. * Author: Pawel Osciak <pawel@osciak.com>
  7. * Marek Szyprowski <m.szyprowski@samsung.com>
  8. *
  9. * The vb2_thread implementation was based on code from videobuf-dvb.c:
  10. * (c) 2004 Gerd Knorr <kraxel@bytesex.org> [SUSE Labs]
  11. *
  12. * This program is free software; you can redistribute it and/or modify
  13. * it under the terms of the GNU General Public License as published by
  14. * the Free Software Foundation.
  15. */
  16. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  17. #include <linux/err.h>
  18. #include <linux/kernel.h>
  19. #include <linux/module.h>
  20. #include <linux/mm.h>
  21. #include <linux/poll.h>
  22. #include <linux/slab.h>
  23. #include <linux/sched.h>
  24. #include <linux/freezer.h>
  25. #include <linux/kthread.h>
  26. #include <media/videobuf2-core.h>
  27. #include <media/v4l2-mc.h>
  28. #include <trace/events/vb2.h>
  29. static int debug;
  30. module_param(debug, int, 0644);
  31. #define dprintk(level, fmt, arg...) \
  32. do { \
  33. if (debug >= level) \
  34. pr_info("%s: " fmt, __func__, ## arg); \
  35. } while (0)
  36. #ifdef CONFIG_VIDEO_ADV_DEBUG
  37. /*
  38. * If advanced debugging is on, then count how often each op is called
  39. * successfully, which can either be per-buffer or per-queue.
  40. *
  41. * This makes it easy to check that the 'init' and 'cleanup'
  42. * (and variations thereof) stay balanced.
  43. */
  44. #define log_memop(vb, op) \
  45. dprintk(2, "call_memop(%p, %d, %s)%s\n", \
  46. (vb)->vb2_queue, (vb)->index, #op, \
  47. (vb)->vb2_queue->mem_ops->op ? "" : " (nop)")
  48. #define call_memop(vb, op, args...) \
  49. ({ \
  50. struct vb2_queue *_q = (vb)->vb2_queue; \
  51. int err; \
  52. \
  53. log_memop(vb, op); \
  54. err = _q->mem_ops->op ? _q->mem_ops->op(args) : 0; \
  55. if (!err) \
  56. (vb)->cnt_mem_ ## op++; \
  57. err; \
  58. })
  59. #define call_ptr_memop(vb, op, args...) \
  60. ({ \
  61. struct vb2_queue *_q = (vb)->vb2_queue; \
  62. void *ptr; \
  63. \
  64. log_memop(vb, op); \
  65. ptr = _q->mem_ops->op ? _q->mem_ops->op(args) : NULL; \
  66. if (!IS_ERR_OR_NULL(ptr)) \
  67. (vb)->cnt_mem_ ## op++; \
  68. ptr; \
  69. })
  70. #define call_void_memop(vb, op, args...) \
  71. ({ \
  72. struct vb2_queue *_q = (vb)->vb2_queue; \
  73. \
  74. log_memop(vb, op); \
  75. if (_q->mem_ops->op) \
  76. _q->mem_ops->op(args); \
  77. (vb)->cnt_mem_ ## op++; \
  78. })
  79. #define log_qop(q, op) \
  80. dprintk(2, "call_qop(%p, %s)%s\n", q, #op, \
  81. (q)->ops->op ? "" : " (nop)")
  82. #define call_qop(q, op, args...) \
  83. ({ \
  84. int err; \
  85. \
  86. log_qop(q, op); \
  87. err = (q)->ops->op ? (q)->ops->op(args) : 0; \
  88. if (!err) \
  89. (q)->cnt_ ## op++; \
  90. err; \
  91. })
  92. #define call_void_qop(q, op, args...) \
  93. ({ \
  94. log_qop(q, op); \
  95. if ((q)->ops->op) \
  96. (q)->ops->op(args); \
  97. (q)->cnt_ ## op++; \
  98. })
  99. #define log_vb_qop(vb, op, args...) \
  100. dprintk(2, "call_vb_qop(%p, %d, %s)%s\n", \
  101. (vb)->vb2_queue, (vb)->index, #op, \
  102. (vb)->vb2_queue->ops->op ? "" : " (nop)")
  103. #define call_vb_qop(vb, op, args...) \
  104. ({ \
  105. int err; \
  106. \
  107. log_vb_qop(vb, op); \
  108. err = (vb)->vb2_queue->ops->op ? \
  109. (vb)->vb2_queue->ops->op(args) : 0; \
  110. if (!err) \
  111. (vb)->cnt_ ## op++; \
  112. err; \
  113. })
  114. #define call_void_vb_qop(vb, op, args...) \
  115. ({ \
  116. log_vb_qop(vb, op); \
  117. if ((vb)->vb2_queue->ops->op) \
  118. (vb)->vb2_queue->ops->op(args); \
  119. (vb)->cnt_ ## op++; \
  120. })
  121. #else
  122. #define call_memop(vb, op, args...) \
  123. ((vb)->vb2_queue->mem_ops->op ? \
  124. (vb)->vb2_queue->mem_ops->op(args) : 0)
  125. #define call_ptr_memop(vb, op, args...) \
  126. ((vb)->vb2_queue->mem_ops->op ? \
  127. (vb)->vb2_queue->mem_ops->op(args) : NULL)
  128. #define call_void_memop(vb, op, args...) \
  129. do { \
  130. if ((vb)->vb2_queue->mem_ops->op) \
  131. (vb)->vb2_queue->mem_ops->op(args); \
  132. } while (0)
  133. #define call_qop(q, op, args...) \
  134. ((q)->ops->op ? (q)->ops->op(args) : 0)
  135. #define call_void_qop(q, op, args...) \
  136. do { \
  137. if ((q)->ops->op) \
  138. (q)->ops->op(args); \
  139. } while (0)
  140. #define call_vb_qop(vb, op, args...) \
  141. ((vb)->vb2_queue->ops->op ? (vb)->vb2_queue->ops->op(args) : 0)
  142. #define call_void_vb_qop(vb, op, args...) \
  143. do { \
  144. if ((vb)->vb2_queue->ops->op) \
  145. (vb)->vb2_queue->ops->op(args); \
  146. } while (0)
  147. #endif
  148. #define call_bufop(q, op, args...) \
  149. ({ \
  150. int ret = 0; \
  151. if (q && q->buf_ops && q->buf_ops->op) \
  152. ret = q->buf_ops->op(args); \
  153. ret; \
  154. })
  155. #define call_void_bufop(q, op, args...) \
  156. ({ \
  157. if (q && q->buf_ops && q->buf_ops->op) \
  158. q->buf_ops->op(args); \
  159. })
  160. static void __vb2_queue_cancel(struct vb2_queue *q);
  161. static void __enqueue_in_driver(struct vb2_buffer *vb);
  162. /*
  163. * __vb2_buf_mem_alloc() - allocate video memory for the given buffer
  164. */
  165. static int __vb2_buf_mem_alloc(struct vb2_buffer *vb)
  166. {
  167. struct vb2_queue *q = vb->vb2_queue;
  168. void *mem_priv;
  169. int plane;
  170. int ret = -ENOMEM;
  171. /*
  172. * Allocate memory for all planes in this buffer
  173. * NOTE: mmapped areas should be page aligned
  174. */
  175. for (plane = 0; plane < vb->num_planes; ++plane) {
  176. unsigned long size = PAGE_ALIGN(vb->planes[plane].length);
  177. mem_priv = call_ptr_memop(vb, alloc,
  178. q->alloc_devs[plane] ? : q->dev,
  179. q->dma_attrs, size, q->dma_dir, q->gfp_flags);
  180. if (IS_ERR_OR_NULL(mem_priv)) {
  181. if (mem_priv)
  182. ret = PTR_ERR(mem_priv);
  183. goto free;
  184. }
  185. /* Associate allocator private data with this plane */
  186. vb->planes[plane].mem_priv = mem_priv;
  187. }
  188. return 0;
  189. free:
  190. /* Free already allocated memory if one of the allocations failed */
  191. for (; plane > 0; --plane) {
  192. call_void_memop(vb, put, vb->planes[plane - 1].mem_priv);
  193. vb->planes[plane - 1].mem_priv = NULL;
  194. }
  195. return ret;
  196. }
  197. /*
  198. * __vb2_buf_mem_free() - free memory of the given buffer
  199. */
  200. static void __vb2_buf_mem_free(struct vb2_buffer *vb)
  201. {
  202. unsigned int plane;
  203. for (plane = 0; plane < vb->num_planes; ++plane) {
  204. call_void_memop(vb, put, vb->planes[plane].mem_priv);
  205. vb->planes[plane].mem_priv = NULL;
  206. dprintk(3, "freed plane %d of buffer %d\n", plane, vb->index);
  207. }
  208. }
  209. /*
  210. * __vb2_buf_userptr_put() - release userspace memory associated with
  211. * a USERPTR buffer
  212. */
  213. static void __vb2_buf_userptr_put(struct vb2_buffer *vb)
  214. {
  215. unsigned int plane;
  216. for (plane = 0; plane < vb->num_planes; ++plane) {
  217. if (vb->planes[plane].mem_priv)
  218. call_void_memop(vb, put_userptr, vb->planes[plane].mem_priv);
  219. vb->planes[plane].mem_priv = NULL;
  220. }
  221. }
  222. /*
  223. * __vb2_plane_dmabuf_put() - release memory associated with
  224. * a DMABUF shared plane
  225. */
  226. static void __vb2_plane_dmabuf_put(struct vb2_buffer *vb, struct vb2_plane *p)
  227. {
  228. if (!p->mem_priv)
  229. return;
  230. if (p->dbuf_mapped)
  231. call_void_memop(vb, unmap_dmabuf, p->mem_priv);
  232. call_void_memop(vb, detach_dmabuf, p->mem_priv);
  233. dma_buf_put(p->dbuf);
  234. p->mem_priv = NULL;
  235. p->dbuf = NULL;
  236. p->dbuf_mapped = 0;
  237. }
  238. /*
  239. * __vb2_buf_dmabuf_put() - release memory associated with
  240. * a DMABUF shared buffer
  241. */
  242. static void __vb2_buf_dmabuf_put(struct vb2_buffer *vb)
  243. {
  244. unsigned int plane;
  245. for (plane = 0; plane < vb->num_planes; ++plane)
  246. __vb2_plane_dmabuf_put(vb, &vb->planes[plane]);
  247. }
  248. /*
  249. * __setup_offsets() - setup unique offsets ("cookies") for every plane in
  250. * the buffer.
  251. */
  252. static void __setup_offsets(struct vb2_buffer *vb)
  253. {
  254. struct vb2_queue *q = vb->vb2_queue;
  255. unsigned int plane;
  256. unsigned long off = 0;
  257. if (vb->index) {
  258. struct vb2_buffer *prev = q->bufs[vb->index - 1];
  259. struct vb2_plane *p = &prev->planes[prev->num_planes - 1];
  260. off = PAGE_ALIGN(p->m.offset + p->length);
  261. }
  262. for (plane = 0; plane < vb->num_planes; ++plane) {
  263. vb->planes[plane].m.offset = off;
  264. dprintk(3, "buffer %d, plane %d offset 0x%08lx\n",
  265. vb->index, plane, off);
  266. off += vb->planes[plane].length;
  267. off = PAGE_ALIGN(off);
  268. }
  269. }
  270. /*
  271. * __vb2_queue_alloc() - allocate videobuf buffer structures and (for MMAP type)
  272. * video buffer memory for all buffers/planes on the queue and initializes the
  273. * queue
  274. *
  275. * Returns the number of buffers successfully allocated.
  276. */
  277. static int __vb2_queue_alloc(struct vb2_queue *q, enum vb2_memory memory,
  278. unsigned int num_buffers, unsigned int num_planes,
  279. const unsigned plane_sizes[VB2_MAX_PLANES])
  280. {
  281. unsigned int buffer, plane;
  282. struct vb2_buffer *vb;
  283. int ret;
  284. /* Ensure that q->num_buffers+num_buffers is below VB2_MAX_FRAME */
  285. num_buffers = min_t(unsigned int, num_buffers,
  286. VB2_MAX_FRAME - q->num_buffers);
  287. for (buffer = 0; buffer < num_buffers; ++buffer) {
  288. /* Allocate videobuf buffer structures */
  289. vb = kzalloc(q->buf_struct_size, GFP_KERNEL);
  290. if (!vb) {
  291. dprintk(1, "memory alloc for buffer struct failed\n");
  292. break;
  293. }
  294. vb->state = VB2_BUF_STATE_DEQUEUED;
  295. vb->vb2_queue = q;
  296. vb->num_planes = num_planes;
  297. vb->index = q->num_buffers + buffer;
  298. vb->type = q->type;
  299. vb->memory = memory;
  300. for (plane = 0; plane < num_planes; ++plane) {
  301. vb->planes[plane].length = plane_sizes[plane];
  302. vb->planes[plane].min_length = plane_sizes[plane];
  303. }
  304. q->bufs[vb->index] = vb;
  305. /* Allocate video buffer memory for the MMAP type */
  306. if (memory == VB2_MEMORY_MMAP) {
  307. ret = __vb2_buf_mem_alloc(vb);
  308. if (ret) {
  309. dprintk(1, "failed allocating memory for buffer %d\n",
  310. buffer);
  311. q->bufs[vb->index] = NULL;
  312. kfree(vb);
  313. break;
  314. }
  315. __setup_offsets(vb);
  316. /*
  317. * Call the driver-provided buffer initialization
  318. * callback, if given. An error in initialization
  319. * results in queue setup failure.
  320. */
  321. ret = call_vb_qop(vb, buf_init, vb);
  322. if (ret) {
  323. dprintk(1, "buffer %d %p initialization failed\n",
  324. buffer, vb);
  325. __vb2_buf_mem_free(vb);
  326. q->bufs[vb->index] = NULL;
  327. kfree(vb);
  328. break;
  329. }
  330. }
  331. }
  332. dprintk(1, "allocated %d buffers, %d plane(s) each\n",
  333. buffer, num_planes);
  334. return buffer;
  335. }
  336. /*
  337. * __vb2_free_mem() - release all video buffer memory for a given queue
  338. */
  339. static void __vb2_free_mem(struct vb2_queue *q, unsigned int buffers)
  340. {
  341. unsigned int buffer;
  342. struct vb2_buffer *vb;
  343. for (buffer = q->num_buffers - buffers; buffer < q->num_buffers;
  344. ++buffer) {
  345. vb = q->bufs[buffer];
  346. if (!vb)
  347. continue;
  348. /* Free MMAP buffers or release USERPTR buffers */
  349. if (q->memory == VB2_MEMORY_MMAP)
  350. __vb2_buf_mem_free(vb);
  351. else if (q->memory == VB2_MEMORY_DMABUF)
  352. __vb2_buf_dmabuf_put(vb);
  353. else
  354. __vb2_buf_userptr_put(vb);
  355. }
  356. }
  357. /*
  358. * __vb2_queue_free() - free buffers at the end of the queue - video memory and
  359. * related information, if no buffers are left return the queue to an
  360. * uninitialized state. Might be called even if the queue has already been freed.
  361. */
  362. static int __vb2_queue_free(struct vb2_queue *q, unsigned int buffers)
  363. {
  364. unsigned int buffer;
  365. /*
  366. * Sanity check: when preparing a buffer the queue lock is released for
  367. * a short while (see __buf_prepare for the details), which would allow
  368. * a race with a reqbufs which can call this function. Removing the
  369. * buffers from underneath __buf_prepare is obviously a bad idea, so we
  370. * check if any of the buffers is in the state PREPARING, and if so we
  371. * just return -EAGAIN.
  372. */
  373. for (buffer = q->num_buffers - buffers; buffer < q->num_buffers;
  374. ++buffer) {
  375. if (q->bufs[buffer] == NULL)
  376. continue;
  377. if (q->bufs[buffer]->state == VB2_BUF_STATE_PREPARING) {
  378. dprintk(1, "preparing buffers, cannot free\n");
  379. return -EAGAIN;
  380. }
  381. }
  382. /* Call driver-provided cleanup function for each buffer, if provided */
  383. for (buffer = q->num_buffers - buffers; buffer < q->num_buffers;
  384. ++buffer) {
  385. struct vb2_buffer *vb = q->bufs[buffer];
  386. if (vb && vb->planes[0].mem_priv)
  387. call_void_vb_qop(vb, buf_cleanup, vb);
  388. }
  389. /* Release video buffer memory */
  390. __vb2_free_mem(q, buffers);
  391. #ifdef CONFIG_VIDEO_ADV_DEBUG
  392. /*
  393. * Check that all the calls were balances during the life-time of this
  394. * queue. If not (or if the debug level is 1 or up), then dump the
  395. * counters to the kernel log.
  396. */
  397. if (q->num_buffers) {
  398. bool unbalanced = q->cnt_start_streaming != q->cnt_stop_streaming ||
  399. q->cnt_wait_prepare != q->cnt_wait_finish;
  400. if (unbalanced || debug) {
  401. pr_info("counters for queue %p:%s\n", q,
  402. unbalanced ? " UNBALANCED!" : "");
  403. pr_info(" setup: %u start_streaming: %u stop_streaming: %u\n",
  404. q->cnt_queue_setup, q->cnt_start_streaming,
  405. q->cnt_stop_streaming);
  406. pr_info(" wait_prepare: %u wait_finish: %u\n",
  407. q->cnt_wait_prepare, q->cnt_wait_finish);
  408. }
  409. q->cnt_queue_setup = 0;
  410. q->cnt_wait_prepare = 0;
  411. q->cnt_wait_finish = 0;
  412. q->cnt_start_streaming = 0;
  413. q->cnt_stop_streaming = 0;
  414. }
  415. for (buffer = 0; buffer < q->num_buffers; ++buffer) {
  416. struct vb2_buffer *vb = q->bufs[buffer];
  417. bool unbalanced = vb->cnt_mem_alloc != vb->cnt_mem_put ||
  418. vb->cnt_mem_prepare != vb->cnt_mem_finish ||
  419. vb->cnt_mem_get_userptr != vb->cnt_mem_put_userptr ||
  420. vb->cnt_mem_attach_dmabuf != vb->cnt_mem_detach_dmabuf ||
  421. vb->cnt_mem_map_dmabuf != vb->cnt_mem_unmap_dmabuf ||
  422. vb->cnt_buf_queue != vb->cnt_buf_done ||
  423. vb->cnt_buf_prepare != vb->cnt_buf_finish ||
  424. vb->cnt_buf_init != vb->cnt_buf_cleanup;
  425. if (unbalanced || debug) {
  426. pr_info(" counters for queue %p, buffer %d:%s\n",
  427. q, buffer, unbalanced ? " UNBALANCED!" : "");
  428. pr_info(" buf_init: %u buf_cleanup: %u buf_prepare: %u buf_finish: %u\n",
  429. vb->cnt_buf_init, vb->cnt_buf_cleanup,
  430. vb->cnt_buf_prepare, vb->cnt_buf_finish);
  431. pr_info(" buf_queue: %u buf_done: %u\n",
  432. vb->cnt_buf_queue, vb->cnt_buf_done);
  433. pr_info(" alloc: %u put: %u prepare: %u finish: %u mmap: %u\n",
  434. vb->cnt_mem_alloc, vb->cnt_mem_put,
  435. vb->cnt_mem_prepare, vb->cnt_mem_finish,
  436. vb->cnt_mem_mmap);
  437. pr_info(" get_userptr: %u put_userptr: %u\n",
  438. vb->cnt_mem_get_userptr, vb->cnt_mem_put_userptr);
  439. pr_info(" attach_dmabuf: %u detach_dmabuf: %u map_dmabuf: %u unmap_dmabuf: %u\n",
  440. vb->cnt_mem_attach_dmabuf, vb->cnt_mem_detach_dmabuf,
  441. vb->cnt_mem_map_dmabuf, vb->cnt_mem_unmap_dmabuf);
  442. pr_info(" get_dmabuf: %u num_users: %u vaddr: %u cookie: %u\n",
  443. vb->cnt_mem_get_dmabuf,
  444. vb->cnt_mem_num_users,
  445. vb->cnt_mem_vaddr,
  446. vb->cnt_mem_cookie);
  447. }
  448. }
  449. #endif
  450. /* Free videobuf buffers */
  451. for (buffer = q->num_buffers - buffers; buffer < q->num_buffers;
  452. ++buffer) {
  453. kfree(q->bufs[buffer]);
  454. q->bufs[buffer] = NULL;
  455. }
  456. q->num_buffers -= buffers;
  457. if (!q->num_buffers) {
  458. q->memory = VB2_MEMORY_UNKNOWN;
  459. INIT_LIST_HEAD(&q->queued_list);
  460. }
  461. return 0;
  462. }
  463. bool vb2_buffer_in_use(struct vb2_queue *q, struct vb2_buffer *vb)
  464. {
  465. unsigned int plane;
  466. for (plane = 0; plane < vb->num_planes; ++plane) {
  467. void *mem_priv = vb->planes[plane].mem_priv;
  468. /*
  469. * If num_users() has not been provided, call_memop
  470. * will return 0, apparently nobody cares about this
  471. * case anyway. If num_users() returns more than 1,
  472. * we are not the only user of the plane's memory.
  473. */
  474. if (mem_priv && call_memop(vb, num_users, mem_priv) > 1)
  475. return true;
  476. }
  477. return false;
  478. }
  479. EXPORT_SYMBOL(vb2_buffer_in_use);
  480. /*
  481. * __buffers_in_use() - return true if any buffers on the queue are in use and
  482. * the queue cannot be freed (by the means of REQBUFS(0)) call
  483. */
  484. static bool __buffers_in_use(struct vb2_queue *q)
  485. {
  486. unsigned int buffer;
  487. for (buffer = 0; buffer < q->num_buffers; ++buffer) {
  488. if (vb2_buffer_in_use(q, q->bufs[buffer]))
  489. return true;
  490. }
  491. return false;
  492. }
  493. void vb2_core_querybuf(struct vb2_queue *q, unsigned int index, void *pb)
  494. {
  495. call_void_bufop(q, fill_user_buffer, q->bufs[index], pb);
  496. }
  497. EXPORT_SYMBOL_GPL(vb2_core_querybuf);
  498. /*
  499. * __verify_userptr_ops() - verify that all memory operations required for
  500. * USERPTR queue type have been provided
  501. */
  502. static int __verify_userptr_ops(struct vb2_queue *q)
  503. {
  504. if (!(q->io_modes & VB2_USERPTR) || !q->mem_ops->get_userptr ||
  505. !q->mem_ops->put_userptr)
  506. return -EINVAL;
  507. return 0;
  508. }
  509. /*
  510. * __verify_mmap_ops() - verify that all memory operations required for
  511. * MMAP queue type have been provided
  512. */
  513. static int __verify_mmap_ops(struct vb2_queue *q)
  514. {
  515. if (!(q->io_modes & VB2_MMAP) || !q->mem_ops->alloc ||
  516. !q->mem_ops->put || !q->mem_ops->mmap)
  517. return -EINVAL;
  518. return 0;
  519. }
  520. /*
  521. * __verify_dmabuf_ops() - verify that all memory operations required for
  522. * DMABUF queue type have been provided
  523. */
  524. static int __verify_dmabuf_ops(struct vb2_queue *q)
  525. {
  526. if (!(q->io_modes & VB2_DMABUF) || !q->mem_ops->attach_dmabuf ||
  527. !q->mem_ops->detach_dmabuf || !q->mem_ops->map_dmabuf ||
  528. !q->mem_ops->unmap_dmabuf)
  529. return -EINVAL;
  530. return 0;
  531. }
  532. int vb2_verify_memory_type(struct vb2_queue *q,
  533. enum vb2_memory memory, unsigned int type)
  534. {
  535. if (memory != VB2_MEMORY_MMAP && memory != VB2_MEMORY_USERPTR &&
  536. memory != VB2_MEMORY_DMABUF) {
  537. dprintk(1, "unsupported memory type\n");
  538. return -EINVAL;
  539. }
  540. if (type != q->type) {
  541. dprintk(1, "requested type is incorrect\n");
  542. return -EINVAL;
  543. }
  544. /*
  545. * Make sure all the required memory ops for given memory type
  546. * are available.
  547. */
  548. if (memory == VB2_MEMORY_MMAP && __verify_mmap_ops(q)) {
  549. dprintk(1, "MMAP for current setup unsupported\n");
  550. return -EINVAL;
  551. }
  552. if (memory == VB2_MEMORY_USERPTR && __verify_userptr_ops(q)) {
  553. dprintk(1, "USERPTR for current setup unsupported\n");
  554. return -EINVAL;
  555. }
  556. if (memory == VB2_MEMORY_DMABUF && __verify_dmabuf_ops(q)) {
  557. dprintk(1, "DMABUF for current setup unsupported\n");
  558. return -EINVAL;
  559. }
  560. /*
  561. * Place the busy tests at the end: -EBUSY can be ignored when
  562. * create_bufs is called with count == 0, but count == 0 should still
  563. * do the memory and type validation.
  564. */
  565. if (vb2_fileio_is_active(q)) {
  566. dprintk(1, "file io in progress\n");
  567. return -EBUSY;
  568. }
  569. return 0;
  570. }
  571. EXPORT_SYMBOL(vb2_verify_memory_type);
  572. int vb2_core_reqbufs(struct vb2_queue *q, enum vb2_memory memory,
  573. unsigned int *count)
  574. {
  575. unsigned int num_buffers, allocated_buffers, num_planes = 0;
  576. unsigned plane_sizes[VB2_MAX_PLANES] = { };
  577. int ret;
  578. if (q->streaming) {
  579. dprintk(1, "streaming active\n");
  580. return -EBUSY;
  581. }
  582. if (q->waiting_in_dqbuf && *count) {
  583. dprintk(1, "another dup()ped fd is waiting for a buffer\n");
  584. return -EBUSY;
  585. }
  586. if (*count == 0 || q->num_buffers != 0 ||
  587. (q->memory != VB2_MEMORY_UNKNOWN && q->memory != memory)) {
  588. /*
  589. * We already have buffers allocated, so first check if they
  590. * are not in use and can be freed.
  591. */
  592. mutex_lock(&q->mmap_lock);
  593. if (q->memory == VB2_MEMORY_MMAP && __buffers_in_use(q)) {
  594. mutex_unlock(&q->mmap_lock);
  595. dprintk(1, "memory in use, cannot free\n");
  596. return -EBUSY;
  597. }
  598. /*
  599. * Call queue_cancel to clean up any buffers in the PREPARED or
  600. * QUEUED state which is possible if buffers were prepared or
  601. * queued without ever calling STREAMON.
  602. */
  603. __vb2_queue_cancel(q);
  604. ret = __vb2_queue_free(q, q->num_buffers);
  605. mutex_unlock(&q->mmap_lock);
  606. if (ret)
  607. return ret;
  608. /*
  609. * In case of REQBUFS(0) return immediately without calling
  610. * driver's queue_setup() callback and allocating resources.
  611. */
  612. if (*count == 0)
  613. return 0;
  614. }
  615. /*
  616. * Make sure the requested values and current defaults are sane.
  617. */
  618. WARN_ON(q->min_buffers_needed > VB2_MAX_FRAME);
  619. num_buffers = max_t(unsigned int, *count, q->min_buffers_needed);
  620. num_buffers = min_t(unsigned int, num_buffers, VB2_MAX_FRAME);
  621. memset(q->alloc_devs, 0, sizeof(q->alloc_devs));
  622. q->memory = memory;
  623. /*
  624. * Ask the driver how many buffers and planes per buffer it requires.
  625. * Driver also sets the size and allocator context for each plane.
  626. */
  627. ret = call_qop(q, queue_setup, q, &num_buffers, &num_planes,
  628. plane_sizes, q->alloc_devs);
  629. if (ret)
  630. return ret;
  631. /* Finally, allocate buffers and video memory */
  632. allocated_buffers =
  633. __vb2_queue_alloc(q, memory, num_buffers, num_planes, plane_sizes);
  634. if (allocated_buffers == 0) {
  635. dprintk(1, "memory allocation failed\n");
  636. return -ENOMEM;
  637. }
  638. /*
  639. * There is no point in continuing if we can't allocate the minimum
  640. * number of buffers needed by this vb2_queue.
  641. */
  642. if (allocated_buffers < q->min_buffers_needed)
  643. ret = -ENOMEM;
  644. /*
  645. * Check if driver can handle the allocated number of buffers.
  646. */
  647. if (!ret && allocated_buffers < num_buffers) {
  648. num_buffers = allocated_buffers;
  649. /*
  650. * num_planes is set by the previous queue_setup(), but since it
  651. * signals to queue_setup() whether it is called from create_bufs()
  652. * vs reqbufs() we zero it here to signal that queue_setup() is
  653. * called for the reqbufs() case.
  654. */
  655. num_planes = 0;
  656. ret = call_qop(q, queue_setup, q, &num_buffers,
  657. &num_planes, plane_sizes, q->alloc_devs);
  658. if (!ret && allocated_buffers < num_buffers)
  659. ret = -ENOMEM;
  660. /*
  661. * Either the driver has accepted a smaller number of buffers,
  662. * or .queue_setup() returned an error
  663. */
  664. }
  665. mutex_lock(&q->mmap_lock);
  666. q->num_buffers = allocated_buffers;
  667. if (ret < 0) {
  668. /*
  669. * Note: __vb2_queue_free() will subtract 'allocated_buffers'
  670. * from q->num_buffers.
  671. */
  672. __vb2_queue_free(q, allocated_buffers);
  673. mutex_unlock(&q->mmap_lock);
  674. return ret;
  675. }
  676. mutex_unlock(&q->mmap_lock);
  677. /*
  678. * Return the number of successfully allocated buffers
  679. * to the userspace.
  680. */
  681. *count = allocated_buffers;
  682. q->waiting_for_buffers = !q->is_output;
  683. return 0;
  684. }
  685. EXPORT_SYMBOL_GPL(vb2_core_reqbufs);
  686. int vb2_core_create_bufs(struct vb2_queue *q, enum vb2_memory memory,
  687. unsigned int *count, unsigned requested_planes,
  688. const unsigned requested_sizes[])
  689. {
  690. unsigned int num_planes = 0, num_buffers, allocated_buffers;
  691. unsigned plane_sizes[VB2_MAX_PLANES] = { };
  692. int ret;
  693. if (q->num_buffers == VB2_MAX_FRAME) {
  694. dprintk(1, "maximum number of buffers already allocated\n");
  695. return -ENOBUFS;
  696. }
  697. if (!q->num_buffers) {
  698. if (q->waiting_in_dqbuf && *count) {
  699. dprintk(1, "another dup()ped fd is waiting for a buffer\n");
  700. return -EBUSY;
  701. }
  702. memset(q->alloc_devs, 0, sizeof(q->alloc_devs));
  703. q->memory = memory;
  704. q->waiting_for_buffers = !q->is_output;
  705. } else if (q->memory != memory) {
  706. dprintk(1, "memory model mismatch\n");
  707. return -EINVAL;
  708. }
  709. num_buffers = min(*count, VB2_MAX_FRAME - q->num_buffers);
  710. if (requested_planes && requested_sizes) {
  711. num_planes = requested_planes;
  712. memcpy(plane_sizes, requested_sizes, sizeof(plane_sizes));
  713. }
  714. /*
  715. * Ask the driver, whether the requested number of buffers, planes per
  716. * buffer and their sizes are acceptable
  717. */
  718. ret = call_qop(q, queue_setup, q, &num_buffers,
  719. &num_planes, plane_sizes, q->alloc_devs);
  720. if (ret)
  721. return ret;
  722. /* Finally, allocate buffers and video memory */
  723. allocated_buffers = __vb2_queue_alloc(q, memory, num_buffers,
  724. num_planes, plane_sizes);
  725. if (allocated_buffers == 0) {
  726. dprintk(1, "memory allocation failed\n");
  727. return -ENOMEM;
  728. }
  729. /*
  730. * Check if driver can handle the so far allocated number of buffers.
  731. */
  732. if (allocated_buffers < num_buffers) {
  733. num_buffers = allocated_buffers;
  734. /*
  735. * q->num_buffers contains the total number of buffers, that the
  736. * queue driver has set up
  737. */
  738. ret = call_qop(q, queue_setup, q, &num_buffers,
  739. &num_planes, plane_sizes, q->alloc_devs);
  740. if (!ret && allocated_buffers < num_buffers)
  741. ret = -ENOMEM;
  742. /*
  743. * Either the driver has accepted a smaller number of buffers,
  744. * or .queue_setup() returned an error
  745. */
  746. }
  747. mutex_lock(&q->mmap_lock);
  748. q->num_buffers += allocated_buffers;
  749. if (ret < 0) {
  750. /*
  751. * Note: __vb2_queue_free() will subtract 'allocated_buffers'
  752. * from q->num_buffers.
  753. */
  754. __vb2_queue_free(q, allocated_buffers);
  755. mutex_unlock(&q->mmap_lock);
  756. return -ENOMEM;
  757. }
  758. mutex_unlock(&q->mmap_lock);
  759. /*
  760. * Return the number of successfully allocated buffers
  761. * to the userspace.
  762. */
  763. *count = allocated_buffers;
  764. return 0;
  765. }
  766. EXPORT_SYMBOL_GPL(vb2_core_create_bufs);
  767. void *vb2_plane_vaddr(struct vb2_buffer *vb, unsigned int plane_no)
  768. {
  769. if (plane_no >= vb->num_planes || !vb->planes[plane_no].mem_priv)
  770. return NULL;
  771. return call_ptr_memop(vb, vaddr, vb->planes[plane_no].mem_priv);
  772. }
  773. EXPORT_SYMBOL_GPL(vb2_plane_vaddr);
  774. void *vb2_plane_cookie(struct vb2_buffer *vb, unsigned int plane_no)
  775. {
  776. if (plane_no >= vb->num_planes || !vb->planes[plane_no].mem_priv)
  777. return NULL;
  778. return call_ptr_memop(vb, cookie, vb->planes[plane_no].mem_priv);
  779. }
  780. EXPORT_SYMBOL_GPL(vb2_plane_cookie);
  781. void vb2_buffer_done(struct vb2_buffer *vb, enum vb2_buffer_state state)
  782. {
  783. struct vb2_queue *q = vb->vb2_queue;
  784. unsigned long flags;
  785. unsigned int plane;
  786. if (WARN_ON(vb->state != VB2_BUF_STATE_ACTIVE))
  787. return;
  788. if (WARN_ON(state != VB2_BUF_STATE_DONE &&
  789. state != VB2_BUF_STATE_ERROR &&
  790. state != VB2_BUF_STATE_QUEUED &&
  791. state != VB2_BUF_STATE_REQUEUEING))
  792. state = VB2_BUF_STATE_ERROR;
  793. #ifdef CONFIG_VIDEO_ADV_DEBUG
  794. /*
  795. * Although this is not a callback, it still does have to balance
  796. * with the buf_queue op. So update this counter manually.
  797. */
  798. vb->cnt_buf_done++;
  799. #endif
  800. dprintk(4, "done processing on buffer %d, state: %d\n",
  801. vb->index, state);
  802. if (state != VB2_BUF_STATE_QUEUED &&
  803. state != VB2_BUF_STATE_REQUEUEING) {
  804. /* sync buffers */
  805. for (plane = 0; plane < vb->num_planes; ++plane)
  806. call_void_memop(vb, finish, vb->planes[plane].mem_priv);
  807. }
  808. spin_lock_irqsave(&q->done_lock, flags);
  809. if (state == VB2_BUF_STATE_QUEUED ||
  810. state == VB2_BUF_STATE_REQUEUEING) {
  811. vb->state = VB2_BUF_STATE_QUEUED;
  812. } else {
  813. /* Add the buffer to the done buffers list */
  814. list_add_tail(&vb->done_entry, &q->done_list);
  815. vb->state = state;
  816. }
  817. atomic_dec(&q->owned_by_drv_count);
  818. spin_unlock_irqrestore(&q->done_lock, flags);
  819. trace_vb2_buf_done(q, vb);
  820. switch (state) {
  821. case VB2_BUF_STATE_QUEUED:
  822. return;
  823. case VB2_BUF_STATE_REQUEUEING:
  824. if (q->start_streaming_called)
  825. __enqueue_in_driver(vb);
  826. return;
  827. default:
  828. /* Inform any processes that may be waiting for buffers */
  829. wake_up(&q->done_wq);
  830. break;
  831. }
  832. }
  833. EXPORT_SYMBOL_GPL(vb2_buffer_done);
  834. void vb2_discard_done(struct vb2_queue *q)
  835. {
  836. struct vb2_buffer *vb;
  837. unsigned long flags;
  838. spin_lock_irqsave(&q->done_lock, flags);
  839. list_for_each_entry(vb, &q->done_list, done_entry)
  840. vb->state = VB2_BUF_STATE_ERROR;
  841. spin_unlock_irqrestore(&q->done_lock, flags);
  842. }
  843. EXPORT_SYMBOL_GPL(vb2_discard_done);
  844. /*
  845. * __prepare_mmap() - prepare an MMAP buffer
  846. */
  847. static int __prepare_mmap(struct vb2_buffer *vb, const void *pb)
  848. {
  849. int ret = 0;
  850. if (pb)
  851. ret = call_bufop(vb->vb2_queue, fill_vb2_buffer,
  852. vb, pb, vb->planes);
  853. return ret ? ret : call_vb_qop(vb, buf_prepare, vb);
  854. }
  855. /*
  856. * __prepare_userptr() - prepare a USERPTR buffer
  857. */
  858. static int __prepare_userptr(struct vb2_buffer *vb, const void *pb)
  859. {
  860. struct vb2_plane planes[VB2_MAX_PLANES];
  861. struct vb2_queue *q = vb->vb2_queue;
  862. void *mem_priv;
  863. unsigned int plane;
  864. int ret = 0;
  865. bool reacquired = vb->planes[0].mem_priv == NULL;
  866. memset(planes, 0, sizeof(planes[0]) * vb->num_planes);
  867. /* Copy relevant information provided by the userspace */
  868. if (pb) {
  869. ret = call_bufop(vb->vb2_queue, fill_vb2_buffer,
  870. vb, pb, planes);
  871. if (ret)
  872. return ret;
  873. }
  874. for (plane = 0; plane < vb->num_planes; ++plane) {
  875. /* Skip the plane if already verified */
  876. if (vb->planes[plane].m.userptr &&
  877. vb->planes[plane].m.userptr == planes[plane].m.userptr
  878. && vb->planes[plane].length == planes[plane].length)
  879. continue;
  880. dprintk(3, "userspace address for plane %d changed, reacquiring memory\n",
  881. plane);
  882. /* Check if the provided plane buffer is large enough */
  883. if (planes[plane].length < vb->planes[plane].min_length) {
  884. dprintk(1, "provided buffer size %u is less than setup size %u for plane %d\n",
  885. planes[plane].length,
  886. vb->planes[plane].min_length,
  887. plane);
  888. ret = -EINVAL;
  889. goto err;
  890. }
  891. /* Release previously acquired memory if present */
  892. if (vb->planes[plane].mem_priv) {
  893. if (!reacquired) {
  894. reacquired = true;
  895. call_void_vb_qop(vb, buf_cleanup, vb);
  896. }
  897. call_void_memop(vb, put_userptr, vb->planes[plane].mem_priv);
  898. }
  899. vb->planes[plane].mem_priv = NULL;
  900. vb->planes[plane].bytesused = 0;
  901. vb->planes[plane].length = 0;
  902. vb->planes[plane].m.userptr = 0;
  903. vb->planes[plane].data_offset = 0;
  904. /* Acquire each plane's memory */
  905. mem_priv = call_ptr_memop(vb, get_userptr,
  906. q->alloc_devs[plane] ? : q->dev,
  907. planes[plane].m.userptr,
  908. planes[plane].length, q->dma_dir);
  909. if (IS_ERR(mem_priv)) {
  910. dprintk(1, "failed acquiring userspace memory for plane %d\n",
  911. plane);
  912. ret = PTR_ERR(mem_priv);
  913. goto err;
  914. }
  915. vb->planes[plane].mem_priv = mem_priv;
  916. }
  917. /*
  918. * Now that everything is in order, copy relevant information
  919. * provided by userspace.
  920. */
  921. for (plane = 0; plane < vb->num_planes; ++plane) {
  922. vb->planes[plane].bytesused = planes[plane].bytesused;
  923. vb->planes[plane].length = planes[plane].length;
  924. vb->planes[plane].m.userptr = planes[plane].m.userptr;
  925. vb->planes[plane].data_offset = planes[plane].data_offset;
  926. }
  927. if (reacquired) {
  928. /*
  929. * One or more planes changed, so we must call buf_init to do
  930. * the driver-specific initialization on the newly acquired
  931. * buffer, if provided.
  932. */
  933. ret = call_vb_qop(vb, buf_init, vb);
  934. if (ret) {
  935. dprintk(1, "buffer initialization failed\n");
  936. goto err;
  937. }
  938. }
  939. ret = call_vb_qop(vb, buf_prepare, vb);
  940. if (ret) {
  941. dprintk(1, "buffer preparation failed\n");
  942. call_void_vb_qop(vb, buf_cleanup, vb);
  943. goto err;
  944. }
  945. return 0;
  946. err:
  947. /* In case of errors, release planes that were already acquired */
  948. for (plane = 0; plane < vb->num_planes; ++plane) {
  949. if (vb->planes[plane].mem_priv)
  950. call_void_memop(vb, put_userptr,
  951. vb->planes[plane].mem_priv);
  952. vb->planes[plane].mem_priv = NULL;
  953. vb->planes[plane].m.userptr = 0;
  954. vb->planes[plane].length = 0;
  955. }
  956. return ret;
  957. }
  958. /*
  959. * __prepare_dmabuf() - prepare a DMABUF buffer
  960. */
  961. static int __prepare_dmabuf(struct vb2_buffer *vb, const void *pb)
  962. {
  963. struct vb2_plane planes[VB2_MAX_PLANES];
  964. struct vb2_queue *q = vb->vb2_queue;
  965. void *mem_priv;
  966. unsigned int plane;
  967. int ret = 0;
  968. bool reacquired = vb->planes[0].mem_priv == NULL;
  969. memset(planes, 0, sizeof(planes[0]) * vb->num_planes);
  970. /* Copy relevant information provided by the userspace */
  971. if (pb) {
  972. ret = call_bufop(vb->vb2_queue, fill_vb2_buffer,
  973. vb, pb, planes);
  974. if (ret)
  975. return ret;
  976. }
  977. for (plane = 0; plane < vb->num_planes; ++plane) {
  978. struct dma_buf *dbuf = dma_buf_get(planes[plane].m.fd);
  979. if (IS_ERR_OR_NULL(dbuf)) {
  980. dprintk(1, "invalid dmabuf fd for plane %d\n",
  981. plane);
  982. ret = -EINVAL;
  983. goto err;
  984. }
  985. /* use DMABUF size if length is not provided */
  986. if (planes[plane].length == 0)
  987. planes[plane].length = dbuf->size;
  988. if (planes[plane].length < vb->planes[plane].min_length) {
  989. dprintk(1, "invalid dmabuf length %u for plane %d, minimum length %u\n",
  990. planes[plane].length, plane,
  991. vb->planes[plane].min_length);
  992. dma_buf_put(dbuf);
  993. ret = -EINVAL;
  994. goto err;
  995. }
  996. /* Skip the plane if already verified */
  997. if (dbuf == vb->planes[plane].dbuf &&
  998. vb->planes[plane].length == planes[plane].length) {
  999. dma_buf_put(dbuf);
  1000. continue;
  1001. }
  1002. dprintk(3, "buffer for plane %d changed\n", plane);
  1003. if (!reacquired) {
  1004. reacquired = true;
  1005. call_void_vb_qop(vb, buf_cleanup, vb);
  1006. }
  1007. /* Release previously acquired memory if present */
  1008. __vb2_plane_dmabuf_put(vb, &vb->planes[plane]);
  1009. vb->planes[plane].bytesused = 0;
  1010. vb->planes[plane].length = 0;
  1011. vb->planes[plane].m.fd = 0;
  1012. vb->planes[plane].data_offset = 0;
  1013. /* Acquire each plane's memory */
  1014. mem_priv = call_ptr_memop(vb, attach_dmabuf,
  1015. q->alloc_devs[plane] ? : q->dev,
  1016. dbuf, planes[plane].length, q->dma_dir);
  1017. if (IS_ERR(mem_priv)) {
  1018. dprintk(1, "failed to attach dmabuf\n");
  1019. ret = PTR_ERR(mem_priv);
  1020. dma_buf_put(dbuf);
  1021. goto err;
  1022. }
  1023. vb->planes[plane].dbuf = dbuf;
  1024. vb->planes[plane].mem_priv = mem_priv;
  1025. }
  1026. /*
  1027. * This pins the buffer(s) with dma_buf_map_attachment()). It's done
  1028. * here instead just before the DMA, while queueing the buffer(s) so
  1029. * userspace knows sooner rather than later if the dma-buf map fails.
  1030. */
  1031. for (plane = 0; plane < vb->num_planes; ++plane) {
  1032. ret = call_memop(vb, map_dmabuf, vb->planes[plane].mem_priv);
  1033. if (ret) {
  1034. dprintk(1, "failed to map dmabuf for plane %d\n",
  1035. plane);
  1036. goto err;
  1037. }
  1038. vb->planes[plane].dbuf_mapped = 1;
  1039. }
  1040. /*
  1041. * Now that everything is in order, copy relevant information
  1042. * provided by userspace.
  1043. */
  1044. for (plane = 0; plane < vb->num_planes; ++plane) {
  1045. vb->planes[plane].bytesused = planes[plane].bytesused;
  1046. vb->planes[plane].length = planes[plane].length;
  1047. vb->planes[plane].m.fd = planes[plane].m.fd;
  1048. vb->planes[plane].data_offset = planes[plane].data_offset;
  1049. }
  1050. if (reacquired) {
  1051. /*
  1052. * Call driver-specific initialization on the newly acquired buffer,
  1053. * if provided.
  1054. */
  1055. ret = call_vb_qop(vb, buf_init, vb);
  1056. if (ret) {
  1057. dprintk(1, "buffer initialization failed\n");
  1058. goto err;
  1059. }
  1060. }
  1061. ret = call_vb_qop(vb, buf_prepare, vb);
  1062. if (ret) {
  1063. dprintk(1, "buffer preparation failed\n");
  1064. call_void_vb_qop(vb, buf_cleanup, vb);
  1065. goto err;
  1066. }
  1067. return 0;
  1068. err:
  1069. /* In case of errors, release planes that were already acquired */
  1070. __vb2_buf_dmabuf_put(vb);
  1071. return ret;
  1072. }
  1073. /*
  1074. * __enqueue_in_driver() - enqueue a vb2_buffer in driver for processing
  1075. */
  1076. static void __enqueue_in_driver(struct vb2_buffer *vb)
  1077. {
  1078. struct vb2_queue *q = vb->vb2_queue;
  1079. vb->state = VB2_BUF_STATE_ACTIVE;
  1080. atomic_inc(&q->owned_by_drv_count);
  1081. trace_vb2_buf_queue(q, vb);
  1082. call_void_vb_qop(vb, buf_queue, vb);
  1083. }
  1084. static int __buf_prepare(struct vb2_buffer *vb, const void *pb)
  1085. {
  1086. struct vb2_queue *q = vb->vb2_queue;
  1087. unsigned int plane;
  1088. int ret;
  1089. if (q->error) {
  1090. dprintk(1, "fatal error occurred on queue\n");
  1091. return -EIO;
  1092. }
  1093. vb->state = VB2_BUF_STATE_PREPARING;
  1094. switch (q->memory) {
  1095. case VB2_MEMORY_MMAP:
  1096. ret = __prepare_mmap(vb, pb);
  1097. break;
  1098. case VB2_MEMORY_USERPTR:
  1099. ret = __prepare_userptr(vb, pb);
  1100. break;
  1101. case VB2_MEMORY_DMABUF:
  1102. ret = __prepare_dmabuf(vb, pb);
  1103. break;
  1104. default:
  1105. WARN(1, "Invalid queue type\n");
  1106. ret = -EINVAL;
  1107. }
  1108. if (ret) {
  1109. dprintk(1, "buffer preparation failed: %d\n", ret);
  1110. vb->state = VB2_BUF_STATE_DEQUEUED;
  1111. return ret;
  1112. }
  1113. /* sync buffers */
  1114. for (plane = 0; plane < vb->num_planes; ++plane)
  1115. call_void_memop(vb, prepare, vb->planes[plane].mem_priv);
  1116. vb->state = VB2_BUF_STATE_PREPARED;
  1117. return 0;
  1118. }
  1119. int vb2_core_prepare_buf(struct vb2_queue *q, unsigned int index, void *pb)
  1120. {
  1121. struct vb2_buffer *vb;
  1122. int ret;
  1123. vb = q->bufs[index];
  1124. if (vb->state != VB2_BUF_STATE_DEQUEUED) {
  1125. dprintk(1, "invalid buffer state %d\n",
  1126. vb->state);
  1127. return -EINVAL;
  1128. }
  1129. ret = __buf_prepare(vb, pb);
  1130. if (ret)
  1131. return ret;
  1132. /* Fill buffer information for the userspace */
  1133. call_void_bufop(q, fill_user_buffer, vb, pb);
  1134. dprintk(2, "prepare of buffer %d succeeded\n", vb->index);
  1135. return ret;
  1136. }
  1137. EXPORT_SYMBOL_GPL(vb2_core_prepare_buf);
  1138. /*
  1139. * vb2_start_streaming() - Attempt to start streaming.
  1140. * @q: videobuf2 queue
  1141. *
  1142. * Attempt to start streaming. When this function is called there must be
  1143. * at least q->min_buffers_needed buffers queued up (i.e. the minimum
  1144. * number of buffers required for the DMA engine to function). If the
  1145. * @start_streaming op fails it is supposed to return all the driver-owned
  1146. * buffers back to vb2 in state QUEUED. Check if that happened and if
  1147. * not warn and reclaim them forcefully.
  1148. */
  1149. static int vb2_start_streaming(struct vb2_queue *q)
  1150. {
  1151. struct vb2_buffer *vb;
  1152. int ret;
  1153. /*
  1154. * If any buffers were queued before streamon,
  1155. * we can now pass them to driver for processing.
  1156. */
  1157. list_for_each_entry(vb, &q->queued_list, queued_entry)
  1158. __enqueue_in_driver(vb);
  1159. /* Tell the driver to start streaming */
  1160. q->start_streaming_called = 1;
  1161. ret = call_qop(q, start_streaming, q,
  1162. atomic_read(&q->owned_by_drv_count));
  1163. if (!ret)
  1164. return 0;
  1165. q->start_streaming_called = 0;
  1166. dprintk(1, "driver refused to start streaming\n");
  1167. /*
  1168. * If you see this warning, then the driver isn't cleaning up properly
  1169. * after a failed start_streaming(). See the start_streaming()
  1170. * documentation in videobuf2-core.h for more information how buffers
  1171. * should be returned to vb2 in start_streaming().
  1172. */
  1173. if (WARN_ON(atomic_read(&q->owned_by_drv_count))) {
  1174. unsigned i;
  1175. /*
  1176. * Forcefully reclaim buffers if the driver did not
  1177. * correctly return them to vb2.
  1178. */
  1179. for (i = 0; i < q->num_buffers; ++i) {
  1180. vb = q->bufs[i];
  1181. if (vb->state == VB2_BUF_STATE_ACTIVE)
  1182. vb2_buffer_done(vb, VB2_BUF_STATE_QUEUED);
  1183. }
  1184. /* Must be zero now */
  1185. WARN_ON(atomic_read(&q->owned_by_drv_count));
  1186. }
  1187. /*
  1188. * If done_list is not empty, then start_streaming() didn't call
  1189. * vb2_buffer_done(vb, VB2_BUF_STATE_QUEUED) but STATE_ERROR or
  1190. * STATE_DONE.
  1191. */
  1192. WARN_ON(!list_empty(&q->done_list));
  1193. return ret;
  1194. }
  1195. int vb2_core_qbuf(struct vb2_queue *q, unsigned int index, void *pb)
  1196. {
  1197. struct vb2_buffer *vb;
  1198. int ret;
  1199. if (q->error) {
  1200. dprintk(1, "fatal error occurred on queue\n");
  1201. return -EIO;
  1202. }
  1203. vb = q->bufs[index];
  1204. switch (vb->state) {
  1205. case VB2_BUF_STATE_DEQUEUED:
  1206. ret = __buf_prepare(vb, pb);
  1207. if (ret)
  1208. return ret;
  1209. break;
  1210. case VB2_BUF_STATE_PREPARED:
  1211. break;
  1212. case VB2_BUF_STATE_PREPARING:
  1213. dprintk(1, "buffer still being prepared\n");
  1214. return -EINVAL;
  1215. default:
  1216. dprintk(1, "invalid buffer state %d\n", vb->state);
  1217. return -EINVAL;
  1218. }
  1219. /*
  1220. * Add to the queued buffers list, a buffer will stay on it until
  1221. * dequeued in dqbuf.
  1222. */
  1223. list_add_tail(&vb->queued_entry, &q->queued_list);
  1224. q->queued_count++;
  1225. q->waiting_for_buffers = false;
  1226. vb->state = VB2_BUF_STATE_QUEUED;
  1227. if (pb)
  1228. call_void_bufop(q, copy_timestamp, vb, pb);
  1229. trace_vb2_qbuf(q, vb);
  1230. /*
  1231. * If already streaming, give the buffer to driver for processing.
  1232. * If not, the buffer will be given to driver on next streamon.
  1233. */
  1234. if (q->start_streaming_called)
  1235. __enqueue_in_driver(vb);
  1236. /* Fill buffer information for the userspace */
  1237. if (pb)
  1238. call_void_bufop(q, fill_user_buffer, vb, pb);
  1239. /*
  1240. * If streamon has been called, and we haven't yet called
  1241. * start_streaming() since not enough buffers were queued, and
  1242. * we now have reached the minimum number of queued buffers,
  1243. * then we can finally call start_streaming().
  1244. */
  1245. if (q->streaming && !q->start_streaming_called &&
  1246. q->queued_count >= q->min_buffers_needed) {
  1247. ret = vb2_start_streaming(q);
  1248. if (ret)
  1249. return ret;
  1250. }
  1251. dprintk(2, "qbuf of buffer %d succeeded\n", vb->index);
  1252. return 0;
  1253. }
  1254. EXPORT_SYMBOL_GPL(vb2_core_qbuf);
  1255. /*
  1256. * __vb2_wait_for_done_vb() - wait for a buffer to become available
  1257. * for dequeuing
  1258. *
  1259. * Will sleep if required for nonblocking == false.
  1260. */
  1261. static int __vb2_wait_for_done_vb(struct vb2_queue *q, int nonblocking)
  1262. {
  1263. /*
  1264. * All operations on vb_done_list are performed under done_lock
  1265. * spinlock protection. However, buffers may be removed from
  1266. * it and returned to userspace only while holding both driver's
  1267. * lock and the done_lock spinlock. Thus we can be sure that as
  1268. * long as we hold the driver's lock, the list will remain not
  1269. * empty if list_empty() check succeeds.
  1270. */
  1271. for (;;) {
  1272. int ret;
  1273. if (q->waiting_in_dqbuf) {
  1274. dprintk(1, "another dup()ped fd is waiting for a buffer\n");
  1275. return -EBUSY;
  1276. }
  1277. if (!q->streaming) {
  1278. dprintk(1, "streaming off, will not wait for buffers\n");
  1279. return -EINVAL;
  1280. }
  1281. if (q->error) {
  1282. dprintk(1, "Queue in error state, will not wait for buffers\n");
  1283. return -EIO;
  1284. }
  1285. if (q->last_buffer_dequeued) {
  1286. dprintk(3, "last buffer dequeued already, will not wait for buffers\n");
  1287. return -EPIPE;
  1288. }
  1289. if (!list_empty(&q->done_list)) {
  1290. /*
  1291. * Found a buffer that we were waiting for.
  1292. */
  1293. break;
  1294. }
  1295. if (nonblocking) {
  1296. dprintk(3, "nonblocking and no buffers to dequeue, will not wait\n");
  1297. return -EAGAIN;
  1298. }
  1299. q->waiting_in_dqbuf = 1;
  1300. /*
  1301. * We are streaming and blocking, wait for another buffer to
  1302. * become ready or for streamoff. Driver's lock is released to
  1303. * allow streamoff or qbuf to be called while waiting.
  1304. */
  1305. call_void_qop(q, wait_prepare, q);
  1306. /*
  1307. * All locks have been released, it is safe to sleep now.
  1308. */
  1309. dprintk(3, "will sleep waiting for buffers\n");
  1310. ret = wait_event_interruptible(q->done_wq,
  1311. !list_empty(&q->done_list) || !q->streaming ||
  1312. q->error);
  1313. /*
  1314. * We need to reevaluate both conditions again after reacquiring
  1315. * the locks or return an error if one occurred.
  1316. */
  1317. call_void_qop(q, wait_finish, q);
  1318. q->waiting_in_dqbuf = 0;
  1319. if (ret) {
  1320. dprintk(1, "sleep was interrupted\n");
  1321. return ret;
  1322. }
  1323. }
  1324. return 0;
  1325. }
  1326. /*
  1327. * __vb2_get_done_vb() - get a buffer ready for dequeuing
  1328. *
  1329. * Will sleep if required for nonblocking == false.
  1330. */
  1331. static int __vb2_get_done_vb(struct vb2_queue *q, struct vb2_buffer **vb,
  1332. void *pb, int nonblocking)
  1333. {
  1334. unsigned long flags;
  1335. int ret = 0;
  1336. /*
  1337. * Wait for at least one buffer to become available on the done_list.
  1338. */
  1339. ret = __vb2_wait_for_done_vb(q, nonblocking);
  1340. if (ret)
  1341. return ret;
  1342. /*
  1343. * Driver's lock has been held since we last verified that done_list
  1344. * is not empty, so no need for another list_empty(done_list) check.
  1345. */
  1346. spin_lock_irqsave(&q->done_lock, flags);
  1347. *vb = list_first_entry(&q->done_list, struct vb2_buffer, done_entry);
  1348. /*
  1349. * Only remove the buffer from done_list if all planes can be
  1350. * handled. Some cases such as V4L2 file I/O and DVB have pb
  1351. * == NULL; skip the check then as there's nothing to verify.
  1352. */
  1353. if (pb)
  1354. ret = call_bufop(q, verify_planes_array, *vb, pb);
  1355. if (!ret)
  1356. list_del(&(*vb)->done_entry);
  1357. spin_unlock_irqrestore(&q->done_lock, flags);
  1358. return ret;
  1359. }
  1360. int vb2_wait_for_all_buffers(struct vb2_queue *q)
  1361. {
  1362. if (!q->streaming) {
  1363. dprintk(1, "streaming off, will not wait for buffers\n");
  1364. return -EINVAL;
  1365. }
  1366. if (q->start_streaming_called)
  1367. wait_event(q->done_wq, !atomic_read(&q->owned_by_drv_count));
  1368. return 0;
  1369. }
  1370. EXPORT_SYMBOL_GPL(vb2_wait_for_all_buffers);
  1371. /*
  1372. * __vb2_dqbuf() - bring back the buffer to the DEQUEUED state
  1373. */
  1374. static void __vb2_dqbuf(struct vb2_buffer *vb)
  1375. {
  1376. struct vb2_queue *q = vb->vb2_queue;
  1377. unsigned int i;
  1378. /* nothing to do if the buffer is already dequeued */
  1379. if (vb->state == VB2_BUF_STATE_DEQUEUED)
  1380. return;
  1381. vb->state = VB2_BUF_STATE_DEQUEUED;
  1382. /* unmap DMABUF buffer */
  1383. if (q->memory == VB2_MEMORY_DMABUF)
  1384. for (i = 0; i < vb->num_planes; ++i) {
  1385. if (!vb->planes[i].dbuf_mapped)
  1386. continue;
  1387. call_void_memop(vb, unmap_dmabuf, vb->planes[i].mem_priv);
  1388. vb->planes[i].dbuf_mapped = 0;
  1389. }
  1390. }
  1391. int vb2_core_dqbuf(struct vb2_queue *q, unsigned int *pindex, void *pb,
  1392. bool nonblocking)
  1393. {
  1394. struct vb2_buffer *vb = NULL;
  1395. int ret;
  1396. ret = __vb2_get_done_vb(q, &vb, pb, nonblocking);
  1397. if (ret < 0)
  1398. return ret;
  1399. switch (vb->state) {
  1400. case VB2_BUF_STATE_DONE:
  1401. dprintk(3, "returning done buffer\n");
  1402. break;
  1403. case VB2_BUF_STATE_ERROR:
  1404. dprintk(3, "returning done buffer with errors\n");
  1405. break;
  1406. default:
  1407. dprintk(1, "invalid buffer state\n");
  1408. return -EINVAL;
  1409. }
  1410. call_void_vb_qop(vb, buf_finish, vb);
  1411. if (pindex)
  1412. *pindex = vb->index;
  1413. /* Fill buffer information for the userspace */
  1414. if (pb)
  1415. call_void_bufop(q, fill_user_buffer, vb, pb);
  1416. /* Remove from videobuf queue */
  1417. list_del(&vb->queued_entry);
  1418. q->queued_count--;
  1419. trace_vb2_dqbuf(q, vb);
  1420. /* go back to dequeued state */
  1421. __vb2_dqbuf(vb);
  1422. dprintk(2, "dqbuf of buffer %d, with state %d\n",
  1423. vb->index, vb->state);
  1424. return 0;
  1425. }
  1426. EXPORT_SYMBOL_GPL(vb2_core_dqbuf);
  1427. /*
  1428. * __vb2_queue_cancel() - cancel and stop (pause) streaming
  1429. *
  1430. * Removes all queued buffers from driver's queue and all buffers queued by
  1431. * userspace from videobuf's queue. Returns to state after reqbufs.
  1432. */
  1433. static void __vb2_queue_cancel(struct vb2_queue *q)
  1434. {
  1435. unsigned int i;
  1436. /*
  1437. * Tell driver to stop all transactions and release all queued
  1438. * buffers.
  1439. */
  1440. if (q->start_streaming_called)
  1441. call_void_qop(q, stop_streaming, q);
  1442. /*
  1443. * If you see this warning, then the driver isn't cleaning up properly
  1444. * in stop_streaming(). See the stop_streaming() documentation in
  1445. * videobuf2-core.h for more information how buffers should be returned
  1446. * to vb2 in stop_streaming().
  1447. */
  1448. if (WARN_ON(atomic_read(&q->owned_by_drv_count))) {
  1449. for (i = 0; i < q->num_buffers; ++i)
  1450. if (q->bufs[i]->state == VB2_BUF_STATE_ACTIVE) {
  1451. pr_warn("driver bug: stop_streaming operation is leaving buf %p in active state\n",
  1452. q->bufs[i]);
  1453. vb2_buffer_done(q->bufs[i], VB2_BUF_STATE_ERROR);
  1454. }
  1455. /* Must be zero now */
  1456. WARN_ON(atomic_read(&q->owned_by_drv_count));
  1457. }
  1458. q->streaming = 0;
  1459. q->start_streaming_called = 0;
  1460. q->queued_count = 0;
  1461. q->error = 0;
  1462. /*
  1463. * Remove all buffers from videobuf's list...
  1464. */
  1465. INIT_LIST_HEAD(&q->queued_list);
  1466. /*
  1467. * ...and done list; userspace will not receive any buffers it
  1468. * has not already dequeued before initiating cancel.
  1469. */
  1470. INIT_LIST_HEAD(&q->done_list);
  1471. atomic_set(&q->owned_by_drv_count, 0);
  1472. wake_up_all(&q->done_wq);
  1473. /*
  1474. * Reinitialize all buffers for next use.
  1475. * Make sure to call buf_finish for any queued buffers. Normally
  1476. * that's done in dqbuf, but that's not going to happen when we
  1477. * cancel the whole queue. Note: this code belongs here, not in
  1478. * __vb2_dqbuf() since in vb2_core_dqbuf() there is a critical
  1479. * call to __fill_user_buffer() after buf_finish(). That order can't
  1480. * be changed, so we can't move the buf_finish() to __vb2_dqbuf().
  1481. */
  1482. for (i = 0; i < q->num_buffers; ++i) {
  1483. struct vb2_buffer *vb = q->bufs[i];
  1484. if (vb->state == VB2_BUF_STATE_PREPARED ||
  1485. vb->state == VB2_BUF_STATE_QUEUED) {
  1486. unsigned int plane;
  1487. for (plane = 0; plane < vb->num_planes; ++plane)
  1488. call_void_memop(vb, finish,
  1489. vb->planes[plane].mem_priv);
  1490. }
  1491. if (vb->state != VB2_BUF_STATE_DEQUEUED) {
  1492. vb->state = VB2_BUF_STATE_PREPARED;
  1493. call_void_vb_qop(vb, buf_finish, vb);
  1494. }
  1495. __vb2_dqbuf(vb);
  1496. }
  1497. }
  1498. int vb2_core_streamon(struct vb2_queue *q, unsigned int type)
  1499. {
  1500. int ret;
  1501. if (type != q->type) {
  1502. dprintk(1, "invalid stream type\n");
  1503. return -EINVAL;
  1504. }
  1505. if (q->streaming) {
  1506. dprintk(3, "already streaming\n");
  1507. return 0;
  1508. }
  1509. if (!q->num_buffers) {
  1510. dprintk(1, "no buffers have been allocated\n");
  1511. return -EINVAL;
  1512. }
  1513. if (q->num_buffers < q->min_buffers_needed) {
  1514. dprintk(1, "need at least %u allocated buffers\n",
  1515. q->min_buffers_needed);
  1516. return -EINVAL;
  1517. }
  1518. /*
  1519. * Tell driver to start streaming provided sufficient buffers
  1520. * are available.
  1521. */
  1522. if (q->queued_count >= q->min_buffers_needed) {
  1523. ret = v4l_vb2q_enable_media_source(q);
  1524. if (ret)
  1525. return ret;
  1526. ret = vb2_start_streaming(q);
  1527. if (ret)
  1528. return ret;
  1529. }
  1530. q->streaming = 1;
  1531. dprintk(3, "successful\n");
  1532. return 0;
  1533. }
  1534. EXPORT_SYMBOL_GPL(vb2_core_streamon);
  1535. void vb2_queue_error(struct vb2_queue *q)
  1536. {
  1537. q->error = 1;
  1538. wake_up_all(&q->done_wq);
  1539. }
  1540. EXPORT_SYMBOL_GPL(vb2_queue_error);
  1541. int vb2_core_streamoff(struct vb2_queue *q, unsigned int type)
  1542. {
  1543. if (type != q->type) {
  1544. dprintk(1, "invalid stream type\n");
  1545. return -EINVAL;
  1546. }
  1547. /*
  1548. * Cancel will pause streaming and remove all buffers from the driver
  1549. * and videobuf, effectively returning control over them to userspace.
  1550. *
  1551. * Note that we do this even if q->streaming == 0: if you prepare or
  1552. * queue buffers, and then call streamoff without ever having called
  1553. * streamon, you would still expect those buffers to be returned to
  1554. * their normal dequeued state.
  1555. */
  1556. __vb2_queue_cancel(q);
  1557. q->waiting_for_buffers = !q->is_output;
  1558. q->last_buffer_dequeued = false;
  1559. dprintk(3, "successful\n");
  1560. return 0;
  1561. }
  1562. EXPORT_SYMBOL_GPL(vb2_core_streamoff);
  1563. /*
  1564. * __find_plane_by_offset() - find plane associated with the given offset off
  1565. */
  1566. static int __find_plane_by_offset(struct vb2_queue *q, unsigned long off,
  1567. unsigned int *_buffer, unsigned int *_plane)
  1568. {
  1569. struct vb2_buffer *vb;
  1570. unsigned int buffer, plane;
  1571. /*
  1572. * Go over all buffers and their planes, comparing the given offset
  1573. * with an offset assigned to each plane. If a match is found,
  1574. * return its buffer and plane numbers.
  1575. */
  1576. for (buffer = 0; buffer < q->num_buffers; ++buffer) {
  1577. vb = q->bufs[buffer];
  1578. for (plane = 0; plane < vb->num_planes; ++plane) {
  1579. if (vb->planes[plane].m.offset == off) {
  1580. *_buffer = buffer;
  1581. *_plane = plane;
  1582. return 0;
  1583. }
  1584. }
  1585. }
  1586. return -EINVAL;
  1587. }
  1588. int vb2_core_expbuf(struct vb2_queue *q, int *fd, unsigned int type,
  1589. unsigned int index, unsigned int plane, unsigned int flags)
  1590. {
  1591. struct vb2_buffer *vb = NULL;
  1592. struct vb2_plane *vb_plane;
  1593. int ret;
  1594. struct dma_buf *dbuf;
  1595. if (q->memory != VB2_MEMORY_MMAP) {
  1596. dprintk(1, "queue is not currently set up for mmap\n");
  1597. return -EINVAL;
  1598. }
  1599. if (!q->mem_ops->get_dmabuf) {
  1600. dprintk(1, "queue does not support DMA buffer exporting\n");
  1601. return -EINVAL;
  1602. }
  1603. if (flags & ~(O_CLOEXEC | O_ACCMODE)) {
  1604. dprintk(1, "queue does support only O_CLOEXEC and access mode flags\n");
  1605. return -EINVAL;
  1606. }
  1607. if (type != q->type) {
  1608. dprintk(1, "invalid buffer type\n");
  1609. return -EINVAL;
  1610. }
  1611. if (index >= q->num_buffers) {
  1612. dprintk(1, "buffer index out of range\n");
  1613. return -EINVAL;
  1614. }
  1615. vb = q->bufs[index];
  1616. if (plane >= vb->num_planes) {
  1617. dprintk(1, "buffer plane out of range\n");
  1618. return -EINVAL;
  1619. }
  1620. if (vb2_fileio_is_active(q)) {
  1621. dprintk(1, "expbuf: file io in progress\n");
  1622. return -EBUSY;
  1623. }
  1624. vb_plane = &vb->planes[plane];
  1625. dbuf = call_ptr_memop(vb, get_dmabuf, vb_plane->mem_priv,
  1626. flags & O_ACCMODE);
  1627. if (IS_ERR_OR_NULL(dbuf)) {
  1628. dprintk(1, "failed to export buffer %d, plane %d\n",
  1629. index, plane);
  1630. return -EINVAL;
  1631. }
  1632. ret = dma_buf_fd(dbuf, flags & ~O_ACCMODE);
  1633. if (ret < 0) {
  1634. dprintk(3, "buffer %d, plane %d failed to export (%d)\n",
  1635. index, plane, ret);
  1636. dma_buf_put(dbuf);
  1637. return ret;
  1638. }
  1639. dprintk(3, "buffer %d, plane %d exported as %d descriptor\n",
  1640. index, plane, ret);
  1641. *fd = ret;
  1642. return 0;
  1643. }
  1644. EXPORT_SYMBOL_GPL(vb2_core_expbuf);
  1645. int vb2_mmap(struct vb2_queue *q, struct vm_area_struct *vma)
  1646. {
  1647. unsigned long off = vma->vm_pgoff << PAGE_SHIFT;
  1648. struct vb2_buffer *vb;
  1649. unsigned int buffer = 0, plane = 0;
  1650. int ret;
  1651. unsigned long length;
  1652. if (q->memory != VB2_MEMORY_MMAP) {
  1653. dprintk(1, "queue is not currently set up for mmap\n");
  1654. return -EINVAL;
  1655. }
  1656. /*
  1657. * Check memory area access mode.
  1658. */
  1659. if (!(vma->vm_flags & VM_SHARED)) {
  1660. dprintk(1, "invalid vma flags, VM_SHARED needed\n");
  1661. return -EINVAL;
  1662. }
  1663. if (q->is_output) {
  1664. if (!(vma->vm_flags & VM_WRITE)) {
  1665. dprintk(1, "invalid vma flags, VM_WRITE needed\n");
  1666. return -EINVAL;
  1667. }
  1668. } else {
  1669. if (!(vma->vm_flags & VM_READ)) {
  1670. dprintk(1, "invalid vma flags, VM_READ needed\n");
  1671. return -EINVAL;
  1672. }
  1673. }
  1674. mutex_lock(&q->mmap_lock);
  1675. if (vb2_fileio_is_active(q)) {
  1676. dprintk(1, "mmap: file io in progress\n");
  1677. ret = -EBUSY;
  1678. goto unlock;
  1679. }
  1680. /*
  1681. * Find the plane corresponding to the offset passed by userspace.
  1682. */
  1683. ret = __find_plane_by_offset(q, off, &buffer, &plane);
  1684. if (ret)
  1685. goto unlock;
  1686. vb = q->bufs[buffer];
  1687. /*
  1688. * MMAP requires page_aligned buffers.
  1689. * The buffer length was page_aligned at __vb2_buf_mem_alloc(),
  1690. * so, we need to do the same here.
  1691. */
  1692. length = PAGE_ALIGN(vb->planes[plane].length);
  1693. if (length < (vma->vm_end - vma->vm_start)) {
  1694. dprintk(1,
  1695. "MMAP invalid, as it would overflow buffer length\n");
  1696. ret = -EINVAL;
  1697. goto unlock;
  1698. }
  1699. ret = call_memop(vb, mmap, vb->planes[plane].mem_priv, vma);
  1700. unlock:
  1701. mutex_unlock(&q->mmap_lock);
  1702. if (ret)
  1703. return ret;
  1704. dprintk(3, "buffer %d, plane %d successfully mapped\n", buffer, plane);
  1705. return 0;
  1706. }
  1707. EXPORT_SYMBOL_GPL(vb2_mmap);
  1708. #ifndef CONFIG_MMU
  1709. unsigned long vb2_get_unmapped_area(struct vb2_queue *q,
  1710. unsigned long addr,
  1711. unsigned long len,
  1712. unsigned long pgoff,
  1713. unsigned long flags)
  1714. {
  1715. unsigned long off = pgoff << PAGE_SHIFT;
  1716. struct vb2_buffer *vb;
  1717. unsigned int buffer, plane;
  1718. void *vaddr;
  1719. int ret;
  1720. if (q->memory != VB2_MEMORY_MMAP) {
  1721. dprintk(1, "queue is not currently set up for mmap\n");
  1722. return -EINVAL;
  1723. }
  1724. /*
  1725. * Find the plane corresponding to the offset passed by userspace.
  1726. */
  1727. ret = __find_plane_by_offset(q, off, &buffer, &plane);
  1728. if (ret)
  1729. return ret;
  1730. vb = q->bufs[buffer];
  1731. vaddr = vb2_plane_vaddr(vb, plane);
  1732. return vaddr ? (unsigned long)vaddr : -EINVAL;
  1733. }
  1734. EXPORT_SYMBOL_GPL(vb2_get_unmapped_area);
  1735. #endif
  1736. int vb2_core_queue_init(struct vb2_queue *q)
  1737. {
  1738. /*
  1739. * Sanity check
  1740. */
  1741. if (WARN_ON(!q) ||
  1742. WARN_ON(!q->ops) ||
  1743. WARN_ON(!q->mem_ops) ||
  1744. WARN_ON(!q->type) ||
  1745. WARN_ON(!q->io_modes) ||
  1746. WARN_ON(!q->ops->queue_setup) ||
  1747. WARN_ON(!q->ops->buf_queue))
  1748. return -EINVAL;
  1749. INIT_LIST_HEAD(&q->queued_list);
  1750. INIT_LIST_HEAD(&q->done_list);
  1751. spin_lock_init(&q->done_lock);
  1752. mutex_init(&q->mmap_lock);
  1753. init_waitqueue_head(&q->done_wq);
  1754. q->memory = VB2_MEMORY_UNKNOWN;
  1755. if (q->buf_struct_size == 0)
  1756. q->buf_struct_size = sizeof(struct vb2_buffer);
  1757. if (q->bidirectional)
  1758. q->dma_dir = DMA_BIDIRECTIONAL;
  1759. else
  1760. q->dma_dir = q->is_output ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
  1761. return 0;
  1762. }
  1763. EXPORT_SYMBOL_GPL(vb2_core_queue_init);
  1764. static int __vb2_init_fileio(struct vb2_queue *q, int read);
  1765. static int __vb2_cleanup_fileio(struct vb2_queue *q);
  1766. void vb2_core_queue_release(struct vb2_queue *q)
  1767. {
  1768. __vb2_cleanup_fileio(q);
  1769. __vb2_queue_cancel(q);
  1770. mutex_lock(&q->mmap_lock);
  1771. __vb2_queue_free(q, q->num_buffers);
  1772. mutex_unlock(&q->mmap_lock);
  1773. }
  1774. EXPORT_SYMBOL_GPL(vb2_core_queue_release);
  1775. __poll_t vb2_core_poll(struct vb2_queue *q, struct file *file,
  1776. poll_table *wait)
  1777. {
  1778. __poll_t req_events = poll_requested_events(wait);
  1779. struct vb2_buffer *vb = NULL;
  1780. unsigned long flags;
  1781. if (!q->is_output && !(req_events & (EPOLLIN | EPOLLRDNORM)))
  1782. return 0;
  1783. if (q->is_output && !(req_events & (EPOLLOUT | EPOLLWRNORM)))
  1784. return 0;
  1785. /*
  1786. * Start file I/O emulator only if streaming API has not been used yet.
  1787. */
  1788. if (q->num_buffers == 0 && !vb2_fileio_is_active(q)) {
  1789. if (!q->is_output && (q->io_modes & VB2_READ) &&
  1790. (req_events & (EPOLLIN | EPOLLRDNORM))) {
  1791. if (__vb2_init_fileio(q, 1))
  1792. return EPOLLERR;
  1793. }
  1794. if (q->is_output && (q->io_modes & VB2_WRITE) &&
  1795. (req_events & (EPOLLOUT | EPOLLWRNORM))) {
  1796. if (__vb2_init_fileio(q, 0))
  1797. return EPOLLERR;
  1798. /*
  1799. * Write to OUTPUT queue can be done immediately.
  1800. */
  1801. return EPOLLOUT | EPOLLWRNORM;
  1802. }
  1803. }
  1804. /*
  1805. * There is nothing to wait for if the queue isn't streaming, or if the
  1806. * error flag is set.
  1807. */
  1808. if (!vb2_is_streaming(q) || q->error)
  1809. return EPOLLERR;
  1810. /*
  1811. * If this quirk is set and QBUF hasn't been called yet then
  1812. * return EPOLLERR as well. This only affects capture queues, output
  1813. * queues will always initialize waiting_for_buffers to false.
  1814. * This quirk is set by V4L2 for backwards compatibility reasons.
  1815. */
  1816. if (q->quirk_poll_must_check_waiting_for_buffers &&
  1817. q->waiting_for_buffers && (req_events & (EPOLLIN | EPOLLRDNORM)))
  1818. return EPOLLERR;
  1819. /*
  1820. * For output streams you can call write() as long as there are fewer
  1821. * buffers queued than there are buffers available.
  1822. */
  1823. if (q->is_output && q->fileio && q->queued_count < q->num_buffers)
  1824. return EPOLLOUT | EPOLLWRNORM;
  1825. if (list_empty(&q->done_list)) {
  1826. /*
  1827. * If the last buffer was dequeued from a capture queue,
  1828. * return immediately. DQBUF will return -EPIPE.
  1829. */
  1830. if (q->last_buffer_dequeued)
  1831. return EPOLLIN | EPOLLRDNORM;
  1832. poll_wait(file, &q->done_wq, wait);
  1833. }
  1834. /*
  1835. * Take first buffer available for dequeuing.
  1836. */
  1837. spin_lock_irqsave(&q->done_lock, flags);
  1838. if (!list_empty(&q->done_list))
  1839. vb = list_first_entry(&q->done_list, struct vb2_buffer,
  1840. done_entry);
  1841. spin_unlock_irqrestore(&q->done_lock, flags);
  1842. if (vb && (vb->state == VB2_BUF_STATE_DONE
  1843. || vb->state == VB2_BUF_STATE_ERROR)) {
  1844. return (q->is_output) ?
  1845. EPOLLOUT | EPOLLWRNORM :
  1846. EPOLLIN | EPOLLRDNORM;
  1847. }
  1848. return 0;
  1849. }
  1850. EXPORT_SYMBOL_GPL(vb2_core_poll);
  1851. /*
  1852. * struct vb2_fileio_buf - buffer context used by file io emulator
  1853. *
  1854. * vb2 provides a compatibility layer and emulator of file io (read and
  1855. * write) calls on top of streaming API. This structure is used for
  1856. * tracking context related to the buffers.
  1857. */
  1858. struct vb2_fileio_buf {
  1859. void *vaddr;
  1860. unsigned int size;
  1861. unsigned int pos;
  1862. unsigned int queued:1;
  1863. };
  1864. /*
  1865. * struct vb2_fileio_data - queue context used by file io emulator
  1866. *
  1867. * @cur_index: the index of the buffer currently being read from or
  1868. * written to. If equal to q->num_buffers then a new buffer
  1869. * must be dequeued.
  1870. * @initial_index: in the read() case all buffers are queued up immediately
  1871. * in __vb2_init_fileio() and __vb2_perform_fileio() just cycles
  1872. * buffers. However, in the write() case no buffers are initially
  1873. * queued, instead whenever a buffer is full it is queued up by
  1874. * __vb2_perform_fileio(). Only once all available buffers have
  1875. * been queued up will __vb2_perform_fileio() start to dequeue
  1876. * buffers. This means that initially __vb2_perform_fileio()
  1877. * needs to know what buffer index to use when it is queuing up
  1878. * the buffers for the first time. That initial index is stored
  1879. * in this field. Once it is equal to q->num_buffers all
  1880. * available buffers have been queued and __vb2_perform_fileio()
  1881. * should start the normal dequeue/queue cycle.
  1882. *
  1883. * vb2 provides a compatibility layer and emulator of file io (read and
  1884. * write) calls on top of streaming API. For proper operation it required
  1885. * this structure to save the driver state between each call of the read
  1886. * or write function.
  1887. */
  1888. struct vb2_fileio_data {
  1889. unsigned int count;
  1890. unsigned int type;
  1891. unsigned int memory;
  1892. struct vb2_fileio_buf bufs[VB2_MAX_FRAME];
  1893. unsigned int cur_index;
  1894. unsigned int initial_index;
  1895. unsigned int q_count;
  1896. unsigned int dq_count;
  1897. unsigned read_once:1;
  1898. unsigned write_immediately:1;
  1899. };
  1900. /*
  1901. * __vb2_init_fileio() - initialize file io emulator
  1902. * @q: videobuf2 queue
  1903. * @read: mode selector (1 means read, 0 means write)
  1904. */
  1905. static int __vb2_init_fileio(struct vb2_queue *q, int read)
  1906. {
  1907. struct vb2_fileio_data *fileio;
  1908. int i, ret;
  1909. unsigned int count = 0;
  1910. /*
  1911. * Sanity check
  1912. */
  1913. if (WARN_ON((read && !(q->io_modes & VB2_READ)) ||
  1914. (!read && !(q->io_modes & VB2_WRITE))))
  1915. return -EINVAL;
  1916. /*
  1917. * Check if device supports mapping buffers to kernel virtual space.
  1918. */
  1919. if (!q->mem_ops->vaddr)
  1920. return -EBUSY;
  1921. /*
  1922. * Check if streaming api has not been already activated.
  1923. */
  1924. if (q->streaming || q->num_buffers > 0)
  1925. return -EBUSY;
  1926. /*
  1927. * Start with count 1, driver can increase it in queue_setup()
  1928. */
  1929. count = 1;
  1930. dprintk(3, "setting up file io: mode %s, count %d, read_once %d, write_immediately %d\n",
  1931. (read) ? "read" : "write", count, q->fileio_read_once,
  1932. q->fileio_write_immediately);
  1933. fileio = kzalloc(sizeof(*fileio), GFP_KERNEL);
  1934. if (fileio == NULL)
  1935. return -ENOMEM;
  1936. fileio->read_once = q->fileio_read_once;
  1937. fileio->write_immediately = q->fileio_write_immediately;
  1938. /*
  1939. * Request buffers and use MMAP type to force driver
  1940. * to allocate buffers by itself.
  1941. */
  1942. fileio->count = count;
  1943. fileio->memory = VB2_MEMORY_MMAP;
  1944. fileio->type = q->type;
  1945. q->fileio = fileio;
  1946. ret = vb2_core_reqbufs(q, fileio->memory, &fileio->count);
  1947. if (ret)
  1948. goto err_kfree;
  1949. /*
  1950. * Check if plane_count is correct
  1951. * (multiplane buffers are not supported).
  1952. */
  1953. if (q->bufs[0]->num_planes != 1) {
  1954. ret = -EBUSY;
  1955. goto err_reqbufs;
  1956. }
  1957. /*
  1958. * Get kernel address of each buffer.
  1959. */
  1960. for (i = 0; i < q->num_buffers; i++) {
  1961. fileio->bufs[i].vaddr = vb2_plane_vaddr(q->bufs[i], 0);
  1962. if (fileio->bufs[i].vaddr == NULL) {
  1963. ret = -EINVAL;
  1964. goto err_reqbufs;
  1965. }
  1966. fileio->bufs[i].size = vb2_plane_size(q->bufs[i], 0);
  1967. }
  1968. /*
  1969. * Read mode requires pre queuing of all buffers.
  1970. */
  1971. if (read) {
  1972. /*
  1973. * Queue all buffers.
  1974. */
  1975. for (i = 0; i < q->num_buffers; i++) {
  1976. ret = vb2_core_qbuf(q, i, NULL);
  1977. if (ret)
  1978. goto err_reqbufs;
  1979. fileio->bufs[i].queued = 1;
  1980. }
  1981. /*
  1982. * All buffers have been queued, so mark that by setting
  1983. * initial_index to q->num_buffers
  1984. */
  1985. fileio->initial_index = q->num_buffers;
  1986. fileio->cur_index = q->num_buffers;
  1987. }
  1988. /*
  1989. * Start streaming.
  1990. */
  1991. ret = vb2_core_streamon(q, q->type);
  1992. if (ret)
  1993. goto err_reqbufs;
  1994. return ret;
  1995. err_reqbufs:
  1996. fileio->count = 0;
  1997. vb2_core_reqbufs(q, fileio->memory, &fileio->count);
  1998. err_kfree:
  1999. q->fileio = NULL;
  2000. kfree(fileio);
  2001. return ret;
  2002. }
  2003. /*
  2004. * __vb2_cleanup_fileio() - free resourced used by file io emulator
  2005. * @q: videobuf2 queue
  2006. */
  2007. static int __vb2_cleanup_fileio(struct vb2_queue *q)
  2008. {
  2009. struct vb2_fileio_data *fileio = q->fileio;
  2010. if (fileio) {
  2011. vb2_core_streamoff(q, q->type);
  2012. q->fileio = NULL;
  2013. fileio->count = 0;
  2014. vb2_core_reqbufs(q, fileio->memory, &fileio->count);
  2015. kfree(fileio);
  2016. dprintk(3, "file io emulator closed\n");
  2017. }
  2018. return 0;
  2019. }
  2020. /*
  2021. * __vb2_perform_fileio() - perform a single file io (read or write) operation
  2022. * @q: videobuf2 queue
  2023. * @data: pointed to target userspace buffer
  2024. * @count: number of bytes to read or write
  2025. * @ppos: file handle position tracking pointer
  2026. * @nonblock: mode selector (1 means blocking calls, 0 means nonblocking)
  2027. * @read: access mode selector (1 means read, 0 means write)
  2028. */
  2029. static size_t __vb2_perform_fileio(struct vb2_queue *q, char __user *data, size_t count,
  2030. loff_t *ppos, int nonblock, int read)
  2031. {
  2032. struct vb2_fileio_data *fileio;
  2033. struct vb2_fileio_buf *buf;
  2034. bool is_multiplanar = q->is_multiplanar;
  2035. /*
  2036. * When using write() to write data to an output video node the vb2 core
  2037. * should copy timestamps if V4L2_BUF_FLAG_TIMESTAMP_COPY is set. Nobody
  2038. * else is able to provide this information with the write() operation.
  2039. */
  2040. bool copy_timestamp = !read && q->copy_timestamp;
  2041. unsigned index;
  2042. int ret;
  2043. dprintk(3, "mode %s, offset %ld, count %zd, %sblocking\n",
  2044. read ? "read" : "write", (long)*ppos, count,
  2045. nonblock ? "non" : "");
  2046. if (!data)
  2047. return -EINVAL;
  2048. if (q->waiting_in_dqbuf) {
  2049. dprintk(3, "another dup()ped fd is %s\n",
  2050. read ? "reading" : "writing");
  2051. return -EBUSY;
  2052. }
  2053. /*
  2054. * Initialize emulator on first call.
  2055. */
  2056. if (!vb2_fileio_is_active(q)) {
  2057. ret = __vb2_init_fileio(q, read);
  2058. dprintk(3, "vb2_init_fileio result: %d\n", ret);
  2059. if (ret)
  2060. return ret;
  2061. }
  2062. fileio = q->fileio;
  2063. /*
  2064. * Check if we need to dequeue the buffer.
  2065. */
  2066. index = fileio->cur_index;
  2067. if (index >= q->num_buffers) {
  2068. struct vb2_buffer *b;
  2069. /*
  2070. * Call vb2_dqbuf to get buffer back.
  2071. */
  2072. ret = vb2_core_dqbuf(q, &index, NULL, nonblock);
  2073. dprintk(5, "vb2_dqbuf result: %d\n", ret);
  2074. if (ret)
  2075. return ret;
  2076. fileio->dq_count += 1;
  2077. fileio->cur_index = index;
  2078. buf = &fileio->bufs[index];
  2079. b = q->bufs[index];
  2080. /*
  2081. * Get number of bytes filled by the driver
  2082. */
  2083. buf->pos = 0;
  2084. buf->queued = 0;
  2085. buf->size = read ? vb2_get_plane_payload(q->bufs[index], 0)
  2086. : vb2_plane_size(q->bufs[index], 0);
  2087. /* Compensate for data_offset on read in the multiplanar case. */
  2088. if (is_multiplanar && read &&
  2089. b->planes[0].data_offset < buf->size) {
  2090. buf->pos = b->planes[0].data_offset;
  2091. buf->size -= buf->pos;
  2092. }
  2093. } else {
  2094. buf = &fileio->bufs[index];
  2095. }
  2096. /*
  2097. * Limit count on last few bytes of the buffer.
  2098. */
  2099. if (buf->pos + count > buf->size) {
  2100. count = buf->size - buf->pos;
  2101. dprintk(5, "reducing read count: %zd\n", count);
  2102. }
  2103. /*
  2104. * Transfer data to userspace.
  2105. */
  2106. dprintk(3, "copying %zd bytes - buffer %d, offset %u\n",
  2107. count, index, buf->pos);
  2108. if (read)
  2109. ret = copy_to_user(data, buf->vaddr + buf->pos, count);
  2110. else
  2111. ret = copy_from_user(buf->vaddr + buf->pos, data, count);
  2112. if (ret) {
  2113. dprintk(3, "error copying data\n");
  2114. return -EFAULT;
  2115. }
  2116. /*
  2117. * Update counters.
  2118. */
  2119. buf->pos += count;
  2120. *ppos += count;
  2121. /*
  2122. * Queue next buffer if required.
  2123. */
  2124. if (buf->pos == buf->size || (!read && fileio->write_immediately)) {
  2125. struct vb2_buffer *b = q->bufs[index];
  2126. /*
  2127. * Check if this is the last buffer to read.
  2128. */
  2129. if (read && fileio->read_once && fileio->dq_count == 1) {
  2130. dprintk(3, "read limit reached\n");
  2131. return __vb2_cleanup_fileio(q);
  2132. }
  2133. /*
  2134. * Call vb2_qbuf and give buffer to the driver.
  2135. */
  2136. b->planes[0].bytesused = buf->pos;
  2137. if (copy_timestamp)
  2138. b->timestamp = ktime_get_ns();
  2139. ret = vb2_core_qbuf(q, index, NULL);
  2140. dprintk(5, "vb2_dbuf result: %d\n", ret);
  2141. if (ret)
  2142. return ret;
  2143. /*
  2144. * Buffer has been queued, update the status
  2145. */
  2146. buf->pos = 0;
  2147. buf->queued = 1;
  2148. buf->size = vb2_plane_size(q->bufs[index], 0);
  2149. fileio->q_count += 1;
  2150. /*
  2151. * If we are queuing up buffers for the first time, then
  2152. * increase initial_index by one.
  2153. */
  2154. if (fileio->initial_index < q->num_buffers)
  2155. fileio->initial_index++;
  2156. /*
  2157. * The next buffer to use is either a buffer that's going to be
  2158. * queued for the first time (initial_index < q->num_buffers)
  2159. * or it is equal to q->num_buffers, meaning that the next
  2160. * time we need to dequeue a buffer since we've now queued up
  2161. * all the 'first time' buffers.
  2162. */
  2163. fileio->cur_index = fileio->initial_index;
  2164. }
  2165. /*
  2166. * Return proper number of bytes processed.
  2167. */
  2168. if (ret == 0)
  2169. ret = count;
  2170. return ret;
  2171. }
  2172. size_t vb2_read(struct vb2_queue *q, char __user *data, size_t count,
  2173. loff_t *ppos, int nonblocking)
  2174. {
  2175. return __vb2_perform_fileio(q, data, count, ppos, nonblocking, 1);
  2176. }
  2177. EXPORT_SYMBOL_GPL(vb2_read);
  2178. size_t vb2_write(struct vb2_queue *q, const char __user *data, size_t count,
  2179. loff_t *ppos, int nonblocking)
  2180. {
  2181. return __vb2_perform_fileio(q, (char __user *) data, count,
  2182. ppos, nonblocking, 0);
  2183. }
  2184. EXPORT_SYMBOL_GPL(vb2_write);
  2185. struct vb2_threadio_data {
  2186. struct task_struct *thread;
  2187. vb2_thread_fnc fnc;
  2188. void *priv;
  2189. bool stop;
  2190. };
  2191. static int vb2_thread(void *data)
  2192. {
  2193. struct vb2_queue *q = data;
  2194. struct vb2_threadio_data *threadio = q->threadio;
  2195. bool copy_timestamp = false;
  2196. unsigned prequeue = 0;
  2197. unsigned index = 0;
  2198. int ret = 0;
  2199. if (q->is_output) {
  2200. prequeue = q->num_buffers;
  2201. copy_timestamp = q->copy_timestamp;
  2202. }
  2203. set_freezable();
  2204. for (;;) {
  2205. struct vb2_buffer *vb;
  2206. /*
  2207. * Call vb2_dqbuf to get buffer back.
  2208. */
  2209. if (prequeue) {
  2210. vb = q->bufs[index++];
  2211. prequeue--;
  2212. } else {
  2213. call_void_qop(q, wait_finish, q);
  2214. if (!threadio->stop)
  2215. ret = vb2_core_dqbuf(q, &index, NULL, 0);
  2216. call_void_qop(q, wait_prepare, q);
  2217. dprintk(5, "file io: vb2_dqbuf result: %d\n", ret);
  2218. if (!ret)
  2219. vb = q->bufs[index];
  2220. }
  2221. if (ret || threadio->stop)
  2222. break;
  2223. try_to_freeze();
  2224. if (vb->state != VB2_BUF_STATE_ERROR)
  2225. if (threadio->fnc(vb, threadio->priv))
  2226. break;
  2227. call_void_qop(q, wait_finish, q);
  2228. if (copy_timestamp)
  2229. vb->timestamp = ktime_get_ns();
  2230. if (!threadio->stop)
  2231. ret = vb2_core_qbuf(q, vb->index, NULL);
  2232. call_void_qop(q, wait_prepare, q);
  2233. if (ret || threadio->stop)
  2234. break;
  2235. }
  2236. /* Hmm, linux becomes *very* unhappy without this ... */
  2237. while (!kthread_should_stop()) {
  2238. set_current_state(TASK_INTERRUPTIBLE);
  2239. schedule();
  2240. }
  2241. return 0;
  2242. }
  2243. /*
  2244. * This function should not be used for anything else but the videobuf2-dvb
  2245. * support. If you think you have another good use-case for this, then please
  2246. * contact the linux-media mailinglist first.
  2247. */
  2248. int vb2_thread_start(struct vb2_queue *q, vb2_thread_fnc fnc, void *priv,
  2249. const char *thread_name)
  2250. {
  2251. struct vb2_threadio_data *threadio;
  2252. int ret = 0;
  2253. if (q->threadio)
  2254. return -EBUSY;
  2255. if (vb2_is_busy(q))
  2256. return -EBUSY;
  2257. if (WARN_ON(q->fileio))
  2258. return -EBUSY;
  2259. threadio = kzalloc(sizeof(*threadio), GFP_KERNEL);
  2260. if (threadio == NULL)
  2261. return -ENOMEM;
  2262. threadio->fnc = fnc;
  2263. threadio->priv = priv;
  2264. ret = __vb2_init_fileio(q, !q->is_output);
  2265. dprintk(3, "file io: vb2_init_fileio result: %d\n", ret);
  2266. if (ret)
  2267. goto nomem;
  2268. q->threadio = threadio;
  2269. threadio->thread = kthread_run(vb2_thread, q, "vb2-%s", thread_name);
  2270. if (IS_ERR(threadio->thread)) {
  2271. ret = PTR_ERR(threadio->thread);
  2272. threadio->thread = NULL;
  2273. goto nothread;
  2274. }
  2275. return 0;
  2276. nothread:
  2277. __vb2_cleanup_fileio(q);
  2278. nomem:
  2279. kfree(threadio);
  2280. return ret;
  2281. }
  2282. EXPORT_SYMBOL_GPL(vb2_thread_start);
  2283. int vb2_thread_stop(struct vb2_queue *q)
  2284. {
  2285. struct vb2_threadio_data *threadio = q->threadio;
  2286. int err;
  2287. if (threadio == NULL)
  2288. return 0;
  2289. threadio->stop = true;
  2290. /* Wake up all pending sleeps in the thread */
  2291. vb2_queue_error(q);
  2292. err = kthread_stop(threadio->thread);
  2293. __vb2_cleanup_fileio(q);
  2294. threadio->thread = NULL;
  2295. kfree(threadio);
  2296. q->threadio = NULL;
  2297. return err;
  2298. }
  2299. EXPORT_SYMBOL_GPL(vb2_thread_stop);
  2300. MODULE_DESCRIPTION("Media buffer core framework");
  2301. MODULE_AUTHOR("Pawel Osciak <pawel@osciak.com>, Marek Szyprowski");
  2302. MODULE_LICENSE("GPL");