stm32-dcmi.c 45 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Driver for STM32 Digital Camera Memory Interface
  4. *
  5. * Copyright (C) STMicroelectronics SA 2017
  6. * Authors: Yannick Fertre <yannick.fertre@st.com>
  7. * Hugues Fruchet <hugues.fruchet@st.com>
  8. * for STMicroelectronics.
  9. *
  10. * This driver is based on atmel_isi.c
  11. *
  12. */
  13. #include <linux/clk.h>
  14. #include <linux/completion.h>
  15. #include <linux/delay.h>
  16. #include <linux/dmaengine.h>
  17. #include <linux/init.h>
  18. #include <linux/interrupt.h>
  19. #include <linux/kernel.h>
  20. #include <linux/module.h>
  21. #include <linux/of.h>
  22. #include <linux/of_device.h>
  23. #include <linux/of_graph.h>
  24. #include <linux/platform_device.h>
  25. #include <linux/reset.h>
  26. #include <linux/videodev2.h>
  27. #include <media/v4l2-ctrls.h>
  28. #include <media/v4l2-dev.h>
  29. #include <media/v4l2-device.h>
  30. #include <media/v4l2-event.h>
  31. #include <media/v4l2-fwnode.h>
  32. #include <media/v4l2-image-sizes.h>
  33. #include <media/v4l2-ioctl.h>
  34. #include <media/v4l2-rect.h>
  35. #include <media/videobuf2-dma-contig.h>
  36. #define DRV_NAME "stm32-dcmi"
  37. /* Registers offset for DCMI */
  38. #define DCMI_CR 0x00 /* Control Register */
  39. #define DCMI_SR 0x04 /* Status Register */
  40. #define DCMI_RIS 0x08 /* Raw Interrupt Status register */
  41. #define DCMI_IER 0x0C /* Interrupt Enable Register */
  42. #define DCMI_MIS 0x10 /* Masked Interrupt Status register */
  43. #define DCMI_ICR 0x14 /* Interrupt Clear Register */
  44. #define DCMI_ESCR 0x18 /* Embedded Synchronization Code Register */
  45. #define DCMI_ESUR 0x1C /* Embedded Synchronization Unmask Register */
  46. #define DCMI_CWSTRT 0x20 /* Crop Window STaRT */
  47. #define DCMI_CWSIZE 0x24 /* Crop Window SIZE */
  48. #define DCMI_DR 0x28 /* Data Register */
  49. #define DCMI_IDR 0x2C /* IDentifier Register */
  50. /* Bits definition for control register (DCMI_CR) */
  51. #define CR_CAPTURE BIT(0)
  52. #define CR_CM BIT(1)
  53. #define CR_CROP BIT(2)
  54. #define CR_JPEG BIT(3)
  55. #define CR_ESS BIT(4)
  56. #define CR_PCKPOL BIT(5)
  57. #define CR_HSPOL BIT(6)
  58. #define CR_VSPOL BIT(7)
  59. #define CR_FCRC_0 BIT(8)
  60. #define CR_FCRC_1 BIT(9)
  61. #define CR_EDM_0 BIT(10)
  62. #define CR_EDM_1 BIT(11)
  63. #define CR_ENABLE BIT(14)
  64. /* Bits definition for status register (DCMI_SR) */
  65. #define SR_HSYNC BIT(0)
  66. #define SR_VSYNC BIT(1)
  67. #define SR_FNE BIT(2)
  68. /*
  69. * Bits definition for interrupt registers
  70. * (DCMI_RIS, DCMI_IER, DCMI_MIS, DCMI_ICR)
  71. */
  72. #define IT_FRAME BIT(0)
  73. #define IT_OVR BIT(1)
  74. #define IT_ERR BIT(2)
  75. #define IT_VSYNC BIT(3)
  76. #define IT_LINE BIT(4)
  77. enum state {
  78. STOPPED = 0,
  79. RUNNING,
  80. STOPPING,
  81. };
  82. #define MIN_WIDTH 16U
  83. #define MAX_WIDTH 2048U
  84. #define MIN_HEIGHT 16U
  85. #define MAX_HEIGHT 2048U
  86. #define MIN_JPEG_WIDTH 16U
  87. #define MAX_JPEG_WIDTH 2592U
  88. #define MIN_JPEG_HEIGHT 16U
  89. #define MAX_JPEG_HEIGHT 2592U
  90. #define TIMEOUT_MS 1000
  91. struct dcmi_graph_entity {
  92. struct device_node *node;
  93. struct v4l2_async_subdev asd;
  94. struct v4l2_subdev *subdev;
  95. };
  96. struct dcmi_format {
  97. u32 fourcc;
  98. u32 mbus_code;
  99. u8 bpp;
  100. };
  101. struct dcmi_framesize {
  102. u32 width;
  103. u32 height;
  104. };
  105. struct dcmi_buf {
  106. struct vb2_v4l2_buffer vb;
  107. bool prepared;
  108. dma_addr_t paddr;
  109. size_t size;
  110. struct list_head list;
  111. };
  112. struct stm32_dcmi {
  113. /* Protects the access of variables shared within the interrupt */
  114. spinlock_t irqlock;
  115. struct device *dev;
  116. void __iomem *regs;
  117. struct resource *res;
  118. struct reset_control *rstc;
  119. int sequence;
  120. struct list_head buffers;
  121. struct dcmi_buf *active;
  122. struct v4l2_device v4l2_dev;
  123. struct video_device *vdev;
  124. struct v4l2_async_notifier notifier;
  125. struct dcmi_graph_entity entity;
  126. struct v4l2_format fmt;
  127. struct v4l2_rect crop;
  128. bool do_crop;
  129. const struct dcmi_format **sd_formats;
  130. unsigned int num_of_sd_formats;
  131. const struct dcmi_format *sd_format;
  132. struct dcmi_framesize *sd_framesizes;
  133. unsigned int num_of_sd_framesizes;
  134. struct dcmi_framesize sd_framesize;
  135. struct v4l2_rect sd_bounds;
  136. /* Protect this data structure */
  137. struct mutex lock;
  138. struct vb2_queue queue;
  139. struct v4l2_fwnode_bus_parallel bus;
  140. struct completion complete;
  141. struct clk *mclk;
  142. enum state state;
  143. struct dma_chan *dma_chan;
  144. dma_cookie_t dma_cookie;
  145. u32 misr;
  146. int errors_count;
  147. int overrun_count;
  148. int buffers_count;
  149. };
  150. static inline struct stm32_dcmi *notifier_to_dcmi(struct v4l2_async_notifier *n)
  151. {
  152. return container_of(n, struct stm32_dcmi, notifier);
  153. }
  154. static inline u32 reg_read(void __iomem *base, u32 reg)
  155. {
  156. return readl_relaxed(base + reg);
  157. }
  158. static inline void reg_write(void __iomem *base, u32 reg, u32 val)
  159. {
  160. writel_relaxed(val, base + reg);
  161. }
  162. static inline void reg_set(void __iomem *base, u32 reg, u32 mask)
  163. {
  164. reg_write(base, reg, reg_read(base, reg) | mask);
  165. }
  166. static inline void reg_clear(void __iomem *base, u32 reg, u32 mask)
  167. {
  168. reg_write(base, reg, reg_read(base, reg) & ~mask);
  169. }
  170. static int dcmi_start_capture(struct stm32_dcmi *dcmi);
  171. static void dcmi_buffer_done(struct stm32_dcmi *dcmi,
  172. struct dcmi_buf *buf,
  173. size_t bytesused,
  174. int err)
  175. {
  176. struct vb2_v4l2_buffer *vbuf;
  177. if (!buf)
  178. return;
  179. vbuf = &buf->vb;
  180. vbuf->sequence = dcmi->sequence++;
  181. vbuf->field = V4L2_FIELD_NONE;
  182. vbuf->vb2_buf.timestamp = ktime_get_ns();
  183. vb2_set_plane_payload(&vbuf->vb2_buf, 0, bytesused);
  184. vb2_buffer_done(&vbuf->vb2_buf,
  185. err ? VB2_BUF_STATE_ERROR : VB2_BUF_STATE_DONE);
  186. dev_dbg(dcmi->dev, "buffer[%d] done seq=%d, bytesused=%zu\n",
  187. vbuf->vb2_buf.index, vbuf->sequence, bytesused);
  188. dcmi->buffers_count++;
  189. dcmi->active = NULL;
  190. }
  191. static int dcmi_restart_capture(struct stm32_dcmi *dcmi)
  192. {
  193. spin_lock_irq(&dcmi->irqlock);
  194. if (dcmi->state != RUNNING) {
  195. spin_unlock_irq(&dcmi->irqlock);
  196. return -EINVAL;
  197. }
  198. /* Restart a new DMA transfer with next buffer */
  199. if (list_empty(&dcmi->buffers)) {
  200. dev_err(dcmi->dev, "%s: No more buffer queued, cannot capture buffer\n",
  201. __func__);
  202. dcmi->errors_count++;
  203. dcmi->active = NULL;
  204. spin_unlock_irq(&dcmi->irqlock);
  205. return -EINVAL;
  206. }
  207. dcmi->active = list_entry(dcmi->buffers.next,
  208. struct dcmi_buf, list);
  209. list_del_init(&dcmi->active->list);
  210. spin_unlock_irq(&dcmi->irqlock);
  211. return dcmi_start_capture(dcmi);
  212. }
  213. static void dcmi_dma_callback(void *param)
  214. {
  215. struct stm32_dcmi *dcmi = (struct stm32_dcmi *)param;
  216. struct dma_chan *chan = dcmi->dma_chan;
  217. struct dma_tx_state state;
  218. enum dma_status status;
  219. struct dcmi_buf *buf = dcmi->active;
  220. /* Check DMA status */
  221. status = dmaengine_tx_status(chan, dcmi->dma_cookie, &state);
  222. switch (status) {
  223. case DMA_IN_PROGRESS:
  224. dev_dbg(dcmi->dev, "%s: Received DMA_IN_PROGRESS\n", __func__);
  225. break;
  226. case DMA_PAUSED:
  227. dev_err(dcmi->dev, "%s: Received DMA_PAUSED\n", __func__);
  228. break;
  229. case DMA_ERROR:
  230. dev_err(dcmi->dev, "%s: Received DMA_ERROR\n", __func__);
  231. break;
  232. case DMA_COMPLETE:
  233. dev_dbg(dcmi->dev, "%s: Received DMA_COMPLETE\n", __func__);
  234. /* Return buffer to V4L2 */
  235. dcmi_buffer_done(dcmi, buf, buf->size, 0);
  236. /* Restart capture */
  237. if (dcmi_restart_capture(dcmi))
  238. dev_err(dcmi->dev, "%s: Cannot restart capture on DMA complete\n",
  239. __func__);
  240. break;
  241. default:
  242. dev_err(dcmi->dev, "%s: Received unknown status\n", __func__);
  243. break;
  244. }
  245. }
  246. static int dcmi_start_dma(struct stm32_dcmi *dcmi,
  247. struct dcmi_buf *buf)
  248. {
  249. struct dma_async_tx_descriptor *desc = NULL;
  250. struct dma_slave_config config;
  251. int ret;
  252. memset(&config, 0, sizeof(config));
  253. config.src_addr = (dma_addr_t)dcmi->res->start + DCMI_DR;
  254. config.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
  255. config.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
  256. config.dst_maxburst = 4;
  257. /* Configure DMA channel */
  258. ret = dmaengine_slave_config(dcmi->dma_chan, &config);
  259. if (ret < 0) {
  260. dev_err(dcmi->dev, "%s: DMA channel config failed (%d)\n",
  261. __func__, ret);
  262. return ret;
  263. }
  264. /* Prepare a DMA transaction */
  265. desc = dmaengine_prep_slave_single(dcmi->dma_chan, buf->paddr,
  266. buf->size,
  267. DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT);
  268. if (!desc) {
  269. dev_err(dcmi->dev, "%s: DMA dmaengine_prep_slave_single failed for buffer size %zu\n",
  270. __func__, buf->size);
  271. return -EINVAL;
  272. }
  273. /* Set completion callback routine for notification */
  274. desc->callback = dcmi_dma_callback;
  275. desc->callback_param = dcmi;
  276. /* Push current DMA transaction in the pending queue */
  277. dcmi->dma_cookie = dmaengine_submit(desc);
  278. if (dma_submit_error(dcmi->dma_cookie)) {
  279. dev_err(dcmi->dev, "%s: DMA submission failed\n", __func__);
  280. return -ENXIO;
  281. }
  282. dma_async_issue_pending(dcmi->dma_chan);
  283. return 0;
  284. }
  285. static int dcmi_start_capture(struct stm32_dcmi *dcmi)
  286. {
  287. int ret;
  288. struct dcmi_buf *buf = dcmi->active;
  289. if (!buf)
  290. return -EINVAL;
  291. ret = dcmi_start_dma(dcmi, buf);
  292. if (ret) {
  293. dcmi->errors_count++;
  294. return ret;
  295. }
  296. /* Enable capture */
  297. reg_set(dcmi->regs, DCMI_CR, CR_CAPTURE);
  298. return 0;
  299. }
  300. static void dcmi_set_crop(struct stm32_dcmi *dcmi)
  301. {
  302. u32 size, start;
  303. /* Crop resolution */
  304. size = ((dcmi->crop.height - 1) << 16) |
  305. ((dcmi->crop.width << 1) - 1);
  306. reg_write(dcmi->regs, DCMI_CWSIZE, size);
  307. /* Crop start point */
  308. start = ((dcmi->crop.top) << 16) |
  309. ((dcmi->crop.left << 1));
  310. reg_write(dcmi->regs, DCMI_CWSTRT, start);
  311. dev_dbg(dcmi->dev, "Cropping to %ux%u@%u:%u\n",
  312. dcmi->crop.width, dcmi->crop.height,
  313. dcmi->crop.left, dcmi->crop.top);
  314. /* Enable crop */
  315. reg_set(dcmi->regs, DCMI_CR, CR_CROP);
  316. }
  317. static void dcmi_process_jpeg(struct stm32_dcmi *dcmi)
  318. {
  319. struct dma_tx_state state;
  320. enum dma_status status;
  321. struct dma_chan *chan = dcmi->dma_chan;
  322. struct dcmi_buf *buf = dcmi->active;
  323. if (!buf)
  324. return;
  325. /*
  326. * Because of variable JPEG buffer size sent by sensor,
  327. * DMA transfer never completes due to transfer size
  328. * never reached.
  329. * In order to ensure that all the JPEG data are transferred
  330. * in active buffer memory, DMA is drained.
  331. * Then DMA tx status gives the amount of data transferred
  332. * to memory, which is then returned to V4L2 through the active
  333. * buffer payload.
  334. */
  335. /* Drain DMA */
  336. dmaengine_synchronize(chan);
  337. /* Get DMA residue to get JPEG size */
  338. status = dmaengine_tx_status(chan, dcmi->dma_cookie, &state);
  339. if (status != DMA_ERROR && state.residue < buf->size) {
  340. /* Return JPEG buffer to V4L2 with received JPEG buffer size */
  341. dcmi_buffer_done(dcmi, buf, buf->size - state.residue, 0);
  342. } else {
  343. dcmi->errors_count++;
  344. dev_err(dcmi->dev, "%s: Cannot get JPEG size from DMA\n",
  345. __func__);
  346. /* Return JPEG buffer to V4L2 in ERROR state */
  347. dcmi_buffer_done(dcmi, buf, 0, -EIO);
  348. }
  349. /* Abort DMA operation */
  350. dmaengine_terminate_all(dcmi->dma_chan);
  351. /* Restart capture */
  352. if (dcmi_restart_capture(dcmi))
  353. dev_err(dcmi->dev, "%s: Cannot restart capture on JPEG received\n",
  354. __func__);
  355. }
  356. static irqreturn_t dcmi_irq_thread(int irq, void *arg)
  357. {
  358. struct stm32_dcmi *dcmi = arg;
  359. spin_lock_irq(&dcmi->irqlock);
  360. /* Stop capture is required */
  361. if (dcmi->state == STOPPING) {
  362. reg_clear(dcmi->regs, DCMI_IER, IT_FRAME | IT_OVR | IT_ERR);
  363. dcmi->state = STOPPED;
  364. complete(&dcmi->complete);
  365. spin_unlock_irq(&dcmi->irqlock);
  366. return IRQ_HANDLED;
  367. }
  368. if ((dcmi->misr & IT_OVR) || (dcmi->misr & IT_ERR)) {
  369. dcmi->errors_count++;
  370. if (dcmi->misr & IT_OVR)
  371. dcmi->overrun_count++;
  372. }
  373. if (dcmi->sd_format->fourcc == V4L2_PIX_FMT_JPEG &&
  374. dcmi->misr & IT_FRAME) {
  375. /* JPEG received */
  376. spin_unlock_irq(&dcmi->irqlock);
  377. dcmi_process_jpeg(dcmi);
  378. return IRQ_HANDLED;
  379. }
  380. spin_unlock_irq(&dcmi->irqlock);
  381. return IRQ_HANDLED;
  382. }
  383. static irqreturn_t dcmi_irq_callback(int irq, void *arg)
  384. {
  385. struct stm32_dcmi *dcmi = arg;
  386. unsigned long flags;
  387. spin_lock_irqsave(&dcmi->irqlock, flags);
  388. dcmi->misr = reg_read(dcmi->regs, DCMI_MIS);
  389. /* Clear interrupt */
  390. reg_set(dcmi->regs, DCMI_ICR, IT_FRAME | IT_OVR | IT_ERR);
  391. spin_unlock_irqrestore(&dcmi->irqlock, flags);
  392. return IRQ_WAKE_THREAD;
  393. }
  394. static int dcmi_queue_setup(struct vb2_queue *vq,
  395. unsigned int *nbuffers,
  396. unsigned int *nplanes,
  397. unsigned int sizes[],
  398. struct device *alloc_devs[])
  399. {
  400. struct stm32_dcmi *dcmi = vb2_get_drv_priv(vq);
  401. unsigned int size;
  402. size = dcmi->fmt.fmt.pix.sizeimage;
  403. /* Make sure the image size is large enough */
  404. if (*nplanes)
  405. return sizes[0] < size ? -EINVAL : 0;
  406. *nplanes = 1;
  407. sizes[0] = size;
  408. dcmi->active = NULL;
  409. dev_dbg(dcmi->dev, "Setup queue, count=%d, size=%d\n",
  410. *nbuffers, size);
  411. return 0;
  412. }
  413. static int dcmi_buf_init(struct vb2_buffer *vb)
  414. {
  415. struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
  416. struct dcmi_buf *buf = container_of(vbuf, struct dcmi_buf, vb);
  417. INIT_LIST_HEAD(&buf->list);
  418. return 0;
  419. }
  420. static int dcmi_buf_prepare(struct vb2_buffer *vb)
  421. {
  422. struct stm32_dcmi *dcmi = vb2_get_drv_priv(vb->vb2_queue);
  423. struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
  424. struct dcmi_buf *buf = container_of(vbuf, struct dcmi_buf, vb);
  425. unsigned long size;
  426. size = dcmi->fmt.fmt.pix.sizeimage;
  427. if (vb2_plane_size(vb, 0) < size) {
  428. dev_err(dcmi->dev, "%s data will not fit into plane (%lu < %lu)\n",
  429. __func__, vb2_plane_size(vb, 0), size);
  430. return -EINVAL;
  431. }
  432. vb2_set_plane_payload(vb, 0, size);
  433. if (!buf->prepared) {
  434. /* Get memory addresses */
  435. buf->paddr =
  436. vb2_dma_contig_plane_dma_addr(&buf->vb.vb2_buf, 0);
  437. buf->size = vb2_plane_size(&buf->vb.vb2_buf, 0);
  438. buf->prepared = true;
  439. vb2_set_plane_payload(&buf->vb.vb2_buf, 0, buf->size);
  440. dev_dbg(dcmi->dev, "buffer[%d] phy=%pad size=%zu\n",
  441. vb->index, &buf->paddr, buf->size);
  442. }
  443. return 0;
  444. }
  445. static void dcmi_buf_queue(struct vb2_buffer *vb)
  446. {
  447. struct stm32_dcmi *dcmi = vb2_get_drv_priv(vb->vb2_queue);
  448. struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
  449. struct dcmi_buf *buf = container_of(vbuf, struct dcmi_buf, vb);
  450. spin_lock_irq(&dcmi->irqlock);
  451. if (dcmi->state == RUNNING && !dcmi->active) {
  452. dcmi->active = buf;
  453. dev_dbg(dcmi->dev, "Starting capture on buffer[%d] queued\n",
  454. buf->vb.vb2_buf.index);
  455. spin_unlock_irq(&dcmi->irqlock);
  456. if (dcmi_start_capture(dcmi))
  457. dev_err(dcmi->dev, "%s: Cannot restart capture on overflow or error\n",
  458. __func__);
  459. } else {
  460. /* Enqueue to video buffers list */
  461. list_add_tail(&buf->list, &dcmi->buffers);
  462. spin_unlock_irq(&dcmi->irqlock);
  463. }
  464. }
  465. static int dcmi_start_streaming(struct vb2_queue *vq, unsigned int count)
  466. {
  467. struct stm32_dcmi *dcmi = vb2_get_drv_priv(vq);
  468. struct dcmi_buf *buf, *node;
  469. u32 val = 0;
  470. int ret;
  471. ret = clk_enable(dcmi->mclk);
  472. if (ret) {
  473. dev_err(dcmi->dev, "%s: Failed to start streaming, cannot enable clock\n",
  474. __func__);
  475. goto err_release_buffers;
  476. }
  477. /* Enable stream on the sub device */
  478. ret = v4l2_subdev_call(dcmi->entity.subdev, video, s_stream, 1);
  479. if (ret && ret != -ENOIOCTLCMD) {
  480. dev_err(dcmi->dev, "%s: Failed to start streaming, subdev streamon error",
  481. __func__);
  482. goto err_disable_clock;
  483. }
  484. spin_lock_irq(&dcmi->irqlock);
  485. /* Set bus width */
  486. switch (dcmi->bus.bus_width) {
  487. case 14:
  488. val |= CR_EDM_0 | CR_EDM_1;
  489. break;
  490. case 12:
  491. val |= CR_EDM_1;
  492. break;
  493. case 10:
  494. val |= CR_EDM_0;
  495. break;
  496. default:
  497. /* Set bus width to 8 bits by default */
  498. break;
  499. }
  500. /* Set vertical synchronization polarity */
  501. if (dcmi->bus.flags & V4L2_MBUS_VSYNC_ACTIVE_HIGH)
  502. val |= CR_VSPOL;
  503. /* Set horizontal synchronization polarity */
  504. if (dcmi->bus.flags & V4L2_MBUS_HSYNC_ACTIVE_HIGH)
  505. val |= CR_HSPOL;
  506. /* Set pixel clock polarity */
  507. if (dcmi->bus.flags & V4L2_MBUS_PCLK_SAMPLE_RISING)
  508. val |= CR_PCKPOL;
  509. reg_write(dcmi->regs, DCMI_CR, val);
  510. /* Set crop */
  511. if (dcmi->do_crop)
  512. dcmi_set_crop(dcmi);
  513. /* Enable jpeg capture */
  514. if (dcmi->sd_format->fourcc == V4L2_PIX_FMT_JPEG)
  515. reg_set(dcmi->regs, DCMI_CR, CR_CM);/* Snapshot mode */
  516. /* Enable dcmi */
  517. reg_set(dcmi->regs, DCMI_CR, CR_ENABLE);
  518. dcmi->state = RUNNING;
  519. dcmi->sequence = 0;
  520. dcmi->errors_count = 0;
  521. dcmi->overrun_count = 0;
  522. dcmi->buffers_count = 0;
  523. dcmi->active = NULL;
  524. /*
  525. * Start transfer if at least one buffer has been queued,
  526. * otherwise transfer is deferred at buffer queueing
  527. */
  528. if (list_empty(&dcmi->buffers)) {
  529. dev_dbg(dcmi->dev, "Start streaming is deferred to next buffer queueing\n");
  530. spin_unlock_irq(&dcmi->irqlock);
  531. return 0;
  532. }
  533. dcmi->active = list_entry(dcmi->buffers.next, struct dcmi_buf, list);
  534. list_del_init(&dcmi->active->list);
  535. dev_dbg(dcmi->dev, "Start streaming, starting capture\n");
  536. spin_unlock_irq(&dcmi->irqlock);
  537. ret = dcmi_start_capture(dcmi);
  538. if (ret) {
  539. dev_err(dcmi->dev, "%s: Start streaming failed, cannot start capture\n",
  540. __func__);
  541. goto err_subdev_streamoff;
  542. }
  543. /* Enable interruptions */
  544. reg_set(dcmi->regs, DCMI_IER, IT_FRAME | IT_OVR | IT_ERR);
  545. return 0;
  546. err_subdev_streamoff:
  547. v4l2_subdev_call(dcmi->entity.subdev, video, s_stream, 0);
  548. err_disable_clock:
  549. clk_disable(dcmi->mclk);
  550. err_release_buffers:
  551. spin_lock_irq(&dcmi->irqlock);
  552. /*
  553. * Return all buffers to vb2 in QUEUED state.
  554. * This will give ownership back to userspace
  555. */
  556. if (dcmi->active) {
  557. buf = dcmi->active;
  558. vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_QUEUED);
  559. dcmi->active = NULL;
  560. }
  561. list_for_each_entry_safe(buf, node, &dcmi->buffers, list) {
  562. list_del_init(&buf->list);
  563. vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_QUEUED);
  564. }
  565. spin_unlock_irq(&dcmi->irqlock);
  566. return ret;
  567. }
  568. static void dcmi_stop_streaming(struct vb2_queue *vq)
  569. {
  570. struct stm32_dcmi *dcmi = vb2_get_drv_priv(vq);
  571. struct dcmi_buf *buf, *node;
  572. unsigned long time_ms = msecs_to_jiffies(TIMEOUT_MS);
  573. long timeout;
  574. int ret;
  575. /* Disable stream on the sub device */
  576. ret = v4l2_subdev_call(dcmi->entity.subdev, video, s_stream, 0);
  577. if (ret && ret != -ENOIOCTLCMD)
  578. dev_err(dcmi->dev, "%s: Failed to stop streaming, subdev streamoff error (%d)\n",
  579. __func__, ret);
  580. spin_lock_irq(&dcmi->irqlock);
  581. dcmi->state = STOPPING;
  582. spin_unlock_irq(&dcmi->irqlock);
  583. timeout = wait_for_completion_interruptible_timeout(&dcmi->complete,
  584. time_ms);
  585. spin_lock_irq(&dcmi->irqlock);
  586. /* Disable interruptions */
  587. reg_clear(dcmi->regs, DCMI_IER, IT_FRAME | IT_OVR | IT_ERR);
  588. /* Disable DCMI */
  589. reg_clear(dcmi->regs, DCMI_CR, CR_ENABLE);
  590. if (!timeout) {
  591. dev_err(dcmi->dev, "%s: Timeout during stop streaming\n",
  592. __func__);
  593. dcmi->state = STOPPED;
  594. }
  595. /* Return all queued buffers to vb2 in ERROR state */
  596. if (dcmi->active) {
  597. buf = dcmi->active;
  598. vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
  599. dcmi->active = NULL;
  600. }
  601. list_for_each_entry_safe(buf, node, &dcmi->buffers, list) {
  602. list_del_init(&buf->list);
  603. vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
  604. }
  605. spin_unlock_irq(&dcmi->irqlock);
  606. /* Stop all pending DMA operations */
  607. dmaengine_terminate_all(dcmi->dma_chan);
  608. clk_disable(dcmi->mclk);
  609. if (dcmi->errors_count)
  610. dev_warn(dcmi->dev, "Some errors found while streaming: errors=%d (overrun=%d), buffers=%d\n",
  611. dcmi->errors_count, dcmi->overrun_count,
  612. dcmi->buffers_count);
  613. dev_dbg(dcmi->dev, "Stop streaming, errors=%d (overrun=%d), buffers=%d\n",
  614. dcmi->errors_count, dcmi->overrun_count,
  615. dcmi->buffers_count);
  616. }
  617. static const struct vb2_ops dcmi_video_qops = {
  618. .queue_setup = dcmi_queue_setup,
  619. .buf_init = dcmi_buf_init,
  620. .buf_prepare = dcmi_buf_prepare,
  621. .buf_queue = dcmi_buf_queue,
  622. .start_streaming = dcmi_start_streaming,
  623. .stop_streaming = dcmi_stop_streaming,
  624. .wait_prepare = vb2_ops_wait_prepare,
  625. .wait_finish = vb2_ops_wait_finish,
  626. };
  627. static int dcmi_g_fmt_vid_cap(struct file *file, void *priv,
  628. struct v4l2_format *fmt)
  629. {
  630. struct stm32_dcmi *dcmi = video_drvdata(file);
  631. *fmt = dcmi->fmt;
  632. return 0;
  633. }
  634. static const struct dcmi_format *find_format_by_fourcc(struct stm32_dcmi *dcmi,
  635. unsigned int fourcc)
  636. {
  637. unsigned int num_formats = dcmi->num_of_sd_formats;
  638. const struct dcmi_format *fmt;
  639. unsigned int i;
  640. for (i = 0; i < num_formats; i++) {
  641. fmt = dcmi->sd_formats[i];
  642. if (fmt->fourcc == fourcc)
  643. return fmt;
  644. }
  645. return NULL;
  646. }
  647. static void __find_outer_frame_size(struct stm32_dcmi *dcmi,
  648. struct v4l2_pix_format *pix,
  649. struct dcmi_framesize *framesize)
  650. {
  651. struct dcmi_framesize *match = NULL;
  652. unsigned int i;
  653. unsigned int min_err = UINT_MAX;
  654. for (i = 0; i < dcmi->num_of_sd_framesizes; i++) {
  655. struct dcmi_framesize *fsize = &dcmi->sd_framesizes[i];
  656. int w_err = (fsize->width - pix->width);
  657. int h_err = (fsize->height - pix->height);
  658. int err = w_err + h_err;
  659. if (w_err >= 0 && h_err >= 0 && err < min_err) {
  660. min_err = err;
  661. match = fsize;
  662. }
  663. }
  664. if (!match)
  665. match = &dcmi->sd_framesizes[0];
  666. *framesize = *match;
  667. }
  668. static int dcmi_try_fmt(struct stm32_dcmi *dcmi, struct v4l2_format *f,
  669. const struct dcmi_format **sd_format,
  670. struct dcmi_framesize *sd_framesize)
  671. {
  672. const struct dcmi_format *sd_fmt;
  673. struct dcmi_framesize sd_fsize;
  674. struct v4l2_pix_format *pix = &f->fmt.pix;
  675. struct v4l2_subdev_pad_config pad_cfg;
  676. struct v4l2_subdev_format format = {
  677. .which = V4L2_SUBDEV_FORMAT_TRY,
  678. };
  679. bool do_crop;
  680. int ret;
  681. sd_fmt = find_format_by_fourcc(dcmi, pix->pixelformat);
  682. if (!sd_fmt) {
  683. sd_fmt = dcmi->sd_formats[dcmi->num_of_sd_formats - 1];
  684. pix->pixelformat = sd_fmt->fourcc;
  685. }
  686. /* Limit to hardware capabilities */
  687. if (pix->pixelformat == V4L2_PIX_FMT_JPEG) {
  688. pix->width = clamp(pix->width, MIN_JPEG_WIDTH, MAX_JPEG_WIDTH);
  689. pix->height =
  690. clamp(pix->height, MIN_JPEG_HEIGHT, MAX_JPEG_HEIGHT);
  691. } else {
  692. pix->width = clamp(pix->width, MIN_WIDTH, MAX_WIDTH);
  693. pix->height = clamp(pix->height, MIN_HEIGHT, MAX_HEIGHT);
  694. }
  695. /* No crop if JPEG is requested */
  696. do_crop = dcmi->do_crop && (pix->pixelformat != V4L2_PIX_FMT_JPEG);
  697. if (do_crop && dcmi->num_of_sd_framesizes) {
  698. struct dcmi_framesize outer_sd_fsize;
  699. /*
  700. * If crop is requested and sensor have discrete frame sizes,
  701. * select the frame size that is just larger than request
  702. */
  703. __find_outer_frame_size(dcmi, pix, &outer_sd_fsize);
  704. pix->width = outer_sd_fsize.width;
  705. pix->height = outer_sd_fsize.height;
  706. }
  707. v4l2_fill_mbus_format(&format.format, pix, sd_fmt->mbus_code);
  708. ret = v4l2_subdev_call(dcmi->entity.subdev, pad, set_fmt,
  709. &pad_cfg, &format);
  710. if (ret < 0)
  711. return ret;
  712. /* Update pix regarding to what sensor can do */
  713. v4l2_fill_pix_format(pix, &format.format);
  714. /* Save resolution that sensor can actually do */
  715. sd_fsize.width = pix->width;
  716. sd_fsize.height = pix->height;
  717. if (do_crop) {
  718. struct v4l2_rect c = dcmi->crop;
  719. struct v4l2_rect max_rect;
  720. /*
  721. * Adjust crop by making the intersection between
  722. * format resolution request and crop request
  723. */
  724. max_rect.top = 0;
  725. max_rect.left = 0;
  726. max_rect.width = pix->width;
  727. max_rect.height = pix->height;
  728. v4l2_rect_map_inside(&c, &max_rect);
  729. c.top = clamp_t(s32, c.top, 0, pix->height - c.height);
  730. c.left = clamp_t(s32, c.left, 0, pix->width - c.width);
  731. dcmi->crop = c;
  732. /* Adjust format resolution request to crop */
  733. pix->width = dcmi->crop.width;
  734. pix->height = dcmi->crop.height;
  735. }
  736. pix->field = V4L2_FIELD_NONE;
  737. pix->bytesperline = pix->width * sd_fmt->bpp;
  738. pix->sizeimage = pix->bytesperline * pix->height;
  739. if (sd_format)
  740. *sd_format = sd_fmt;
  741. if (sd_framesize)
  742. *sd_framesize = sd_fsize;
  743. return 0;
  744. }
  745. static int dcmi_set_fmt(struct stm32_dcmi *dcmi, struct v4l2_format *f)
  746. {
  747. struct v4l2_subdev_format format = {
  748. .which = V4L2_SUBDEV_FORMAT_ACTIVE,
  749. };
  750. const struct dcmi_format *sd_format;
  751. struct dcmi_framesize sd_framesize;
  752. struct v4l2_mbus_framefmt *mf = &format.format;
  753. struct v4l2_pix_format *pix = &f->fmt.pix;
  754. int ret;
  755. /*
  756. * Try format, fmt.width/height could have been changed
  757. * to match sensor capability or crop request
  758. * sd_format & sd_framesize will contain what subdev
  759. * can do for this request.
  760. */
  761. ret = dcmi_try_fmt(dcmi, f, &sd_format, &sd_framesize);
  762. if (ret)
  763. return ret;
  764. /* Disable crop if JPEG is requested */
  765. if (pix->pixelformat == V4L2_PIX_FMT_JPEG)
  766. dcmi->do_crop = false;
  767. /* pix to mbus format */
  768. v4l2_fill_mbus_format(mf, pix,
  769. sd_format->mbus_code);
  770. mf->width = sd_framesize.width;
  771. mf->height = sd_framesize.height;
  772. ret = v4l2_subdev_call(dcmi->entity.subdev, pad,
  773. set_fmt, NULL, &format);
  774. if (ret < 0)
  775. return ret;
  776. dev_dbg(dcmi->dev, "Sensor format set to 0x%x %ux%u\n",
  777. mf->code, mf->width, mf->height);
  778. dev_dbg(dcmi->dev, "Buffer format set to %4.4s %ux%u\n",
  779. (char *)&pix->pixelformat,
  780. pix->width, pix->height);
  781. dcmi->fmt = *f;
  782. dcmi->sd_format = sd_format;
  783. dcmi->sd_framesize = sd_framesize;
  784. return 0;
  785. }
  786. static int dcmi_s_fmt_vid_cap(struct file *file, void *priv,
  787. struct v4l2_format *f)
  788. {
  789. struct stm32_dcmi *dcmi = video_drvdata(file);
  790. if (vb2_is_streaming(&dcmi->queue))
  791. return -EBUSY;
  792. return dcmi_set_fmt(dcmi, f);
  793. }
  794. static int dcmi_try_fmt_vid_cap(struct file *file, void *priv,
  795. struct v4l2_format *f)
  796. {
  797. struct stm32_dcmi *dcmi = video_drvdata(file);
  798. return dcmi_try_fmt(dcmi, f, NULL, NULL);
  799. }
  800. static int dcmi_enum_fmt_vid_cap(struct file *file, void *priv,
  801. struct v4l2_fmtdesc *f)
  802. {
  803. struct stm32_dcmi *dcmi = video_drvdata(file);
  804. if (f->index >= dcmi->num_of_sd_formats)
  805. return -EINVAL;
  806. f->pixelformat = dcmi->sd_formats[f->index]->fourcc;
  807. return 0;
  808. }
  809. static int dcmi_get_sensor_format(struct stm32_dcmi *dcmi,
  810. struct v4l2_pix_format *pix)
  811. {
  812. struct v4l2_subdev_format fmt = {
  813. .which = V4L2_SUBDEV_FORMAT_ACTIVE,
  814. };
  815. int ret;
  816. ret = v4l2_subdev_call(dcmi->entity.subdev, pad, get_fmt, NULL, &fmt);
  817. if (ret)
  818. return ret;
  819. v4l2_fill_pix_format(pix, &fmt.format);
  820. return 0;
  821. }
  822. static int dcmi_set_sensor_format(struct stm32_dcmi *dcmi,
  823. struct v4l2_pix_format *pix)
  824. {
  825. const struct dcmi_format *sd_fmt;
  826. struct v4l2_subdev_format format = {
  827. .which = V4L2_SUBDEV_FORMAT_TRY,
  828. };
  829. struct v4l2_subdev_pad_config pad_cfg;
  830. int ret;
  831. sd_fmt = find_format_by_fourcc(dcmi, pix->pixelformat);
  832. if (!sd_fmt) {
  833. sd_fmt = dcmi->sd_formats[dcmi->num_of_sd_formats - 1];
  834. pix->pixelformat = sd_fmt->fourcc;
  835. }
  836. v4l2_fill_mbus_format(&format.format, pix, sd_fmt->mbus_code);
  837. ret = v4l2_subdev_call(dcmi->entity.subdev, pad, set_fmt,
  838. &pad_cfg, &format);
  839. if (ret < 0)
  840. return ret;
  841. return 0;
  842. }
  843. static int dcmi_get_sensor_bounds(struct stm32_dcmi *dcmi,
  844. struct v4l2_rect *r)
  845. {
  846. struct v4l2_subdev_selection bounds = {
  847. .which = V4L2_SUBDEV_FORMAT_ACTIVE,
  848. .target = V4L2_SEL_TGT_CROP_BOUNDS,
  849. };
  850. unsigned int max_width, max_height, max_pixsize;
  851. struct v4l2_pix_format pix;
  852. unsigned int i;
  853. int ret;
  854. /*
  855. * Get sensor bounds first
  856. */
  857. ret = v4l2_subdev_call(dcmi->entity.subdev, pad, get_selection,
  858. NULL, &bounds);
  859. if (!ret)
  860. *r = bounds.r;
  861. if (ret != -ENOIOCTLCMD)
  862. return ret;
  863. /*
  864. * If selection is not implemented,
  865. * fallback by enumerating sensor frame sizes
  866. * and take the largest one
  867. */
  868. max_width = 0;
  869. max_height = 0;
  870. max_pixsize = 0;
  871. for (i = 0; i < dcmi->num_of_sd_framesizes; i++) {
  872. struct dcmi_framesize *fsize = &dcmi->sd_framesizes[i];
  873. unsigned int pixsize = fsize->width * fsize->height;
  874. if (pixsize > max_pixsize) {
  875. max_pixsize = pixsize;
  876. max_width = fsize->width;
  877. max_height = fsize->height;
  878. }
  879. }
  880. if (max_pixsize > 0) {
  881. r->top = 0;
  882. r->left = 0;
  883. r->width = max_width;
  884. r->height = max_height;
  885. return 0;
  886. }
  887. /*
  888. * If frame sizes enumeration is not implemented,
  889. * fallback by getting current sensor frame size
  890. */
  891. ret = dcmi_get_sensor_format(dcmi, &pix);
  892. if (ret)
  893. return ret;
  894. r->top = 0;
  895. r->left = 0;
  896. r->width = pix.width;
  897. r->height = pix.height;
  898. return 0;
  899. }
  900. static int dcmi_g_selection(struct file *file, void *fh,
  901. struct v4l2_selection *s)
  902. {
  903. struct stm32_dcmi *dcmi = video_drvdata(file);
  904. if (s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
  905. return -EINVAL;
  906. switch (s->target) {
  907. case V4L2_SEL_TGT_CROP_DEFAULT:
  908. case V4L2_SEL_TGT_CROP_BOUNDS:
  909. s->r = dcmi->sd_bounds;
  910. return 0;
  911. case V4L2_SEL_TGT_CROP:
  912. if (dcmi->do_crop) {
  913. s->r = dcmi->crop;
  914. } else {
  915. s->r.top = 0;
  916. s->r.left = 0;
  917. s->r.width = dcmi->fmt.fmt.pix.width;
  918. s->r.height = dcmi->fmt.fmt.pix.height;
  919. }
  920. break;
  921. default:
  922. return -EINVAL;
  923. }
  924. return 0;
  925. }
  926. static int dcmi_s_selection(struct file *file, void *priv,
  927. struct v4l2_selection *s)
  928. {
  929. struct stm32_dcmi *dcmi = video_drvdata(file);
  930. struct v4l2_rect r = s->r;
  931. struct v4l2_rect max_rect;
  932. struct v4l2_pix_format pix;
  933. if (s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE ||
  934. s->target != V4L2_SEL_TGT_CROP)
  935. return -EINVAL;
  936. /* Reset sensor resolution to max resolution */
  937. pix.pixelformat = dcmi->fmt.fmt.pix.pixelformat;
  938. pix.width = dcmi->sd_bounds.width;
  939. pix.height = dcmi->sd_bounds.height;
  940. dcmi_set_sensor_format(dcmi, &pix);
  941. /*
  942. * Make the intersection between
  943. * sensor resolution
  944. * and crop request
  945. */
  946. max_rect.top = 0;
  947. max_rect.left = 0;
  948. max_rect.width = pix.width;
  949. max_rect.height = pix.height;
  950. v4l2_rect_map_inside(&r, &max_rect);
  951. r.top = clamp_t(s32, r.top, 0, pix.height - r.height);
  952. r.left = clamp_t(s32, r.left, 0, pix.width - r.width);
  953. if (!(r.top == dcmi->sd_bounds.top &&
  954. r.left == dcmi->sd_bounds.left &&
  955. r.width == dcmi->sd_bounds.width &&
  956. r.height == dcmi->sd_bounds.height)) {
  957. /* Crop if request is different than sensor resolution */
  958. dcmi->do_crop = true;
  959. dcmi->crop = r;
  960. dev_dbg(dcmi->dev, "s_selection: crop %ux%u@(%u,%u) from %ux%u\n",
  961. r.width, r.height, r.left, r.top,
  962. pix.width, pix.height);
  963. } else {
  964. /* Disable crop */
  965. dcmi->do_crop = false;
  966. dev_dbg(dcmi->dev, "s_selection: crop is disabled\n");
  967. }
  968. s->r = r;
  969. return 0;
  970. }
  971. static int dcmi_querycap(struct file *file, void *priv,
  972. struct v4l2_capability *cap)
  973. {
  974. strlcpy(cap->driver, DRV_NAME, sizeof(cap->driver));
  975. strlcpy(cap->card, "STM32 Camera Memory Interface",
  976. sizeof(cap->card));
  977. strlcpy(cap->bus_info, "platform:dcmi", sizeof(cap->bus_info));
  978. return 0;
  979. }
  980. static int dcmi_enum_input(struct file *file, void *priv,
  981. struct v4l2_input *i)
  982. {
  983. if (i->index != 0)
  984. return -EINVAL;
  985. i->type = V4L2_INPUT_TYPE_CAMERA;
  986. strlcpy(i->name, "Camera", sizeof(i->name));
  987. return 0;
  988. }
  989. static int dcmi_g_input(struct file *file, void *priv, unsigned int *i)
  990. {
  991. *i = 0;
  992. return 0;
  993. }
  994. static int dcmi_s_input(struct file *file, void *priv, unsigned int i)
  995. {
  996. if (i > 0)
  997. return -EINVAL;
  998. return 0;
  999. }
  1000. static int dcmi_enum_framesizes(struct file *file, void *fh,
  1001. struct v4l2_frmsizeenum *fsize)
  1002. {
  1003. struct stm32_dcmi *dcmi = video_drvdata(file);
  1004. const struct dcmi_format *sd_fmt;
  1005. struct v4l2_subdev_frame_size_enum fse = {
  1006. .index = fsize->index,
  1007. .which = V4L2_SUBDEV_FORMAT_ACTIVE,
  1008. };
  1009. int ret;
  1010. sd_fmt = find_format_by_fourcc(dcmi, fsize->pixel_format);
  1011. if (!sd_fmt)
  1012. return -EINVAL;
  1013. fse.code = sd_fmt->mbus_code;
  1014. ret = v4l2_subdev_call(dcmi->entity.subdev, pad, enum_frame_size,
  1015. NULL, &fse);
  1016. if (ret)
  1017. return ret;
  1018. fsize->type = V4L2_FRMSIZE_TYPE_DISCRETE;
  1019. fsize->discrete.width = fse.max_width;
  1020. fsize->discrete.height = fse.max_height;
  1021. return 0;
  1022. }
  1023. static int dcmi_g_parm(struct file *file, void *priv,
  1024. struct v4l2_streamparm *p)
  1025. {
  1026. struct stm32_dcmi *dcmi = video_drvdata(file);
  1027. return v4l2_g_parm_cap(video_devdata(file), dcmi->entity.subdev, p);
  1028. }
  1029. static int dcmi_s_parm(struct file *file, void *priv,
  1030. struct v4l2_streamparm *p)
  1031. {
  1032. struct stm32_dcmi *dcmi = video_drvdata(file);
  1033. return v4l2_s_parm_cap(video_devdata(file), dcmi->entity.subdev, p);
  1034. }
  1035. static int dcmi_enum_frameintervals(struct file *file, void *fh,
  1036. struct v4l2_frmivalenum *fival)
  1037. {
  1038. struct stm32_dcmi *dcmi = video_drvdata(file);
  1039. const struct dcmi_format *sd_fmt;
  1040. struct v4l2_subdev_frame_interval_enum fie = {
  1041. .index = fival->index,
  1042. .width = fival->width,
  1043. .height = fival->height,
  1044. .which = V4L2_SUBDEV_FORMAT_ACTIVE,
  1045. };
  1046. int ret;
  1047. sd_fmt = find_format_by_fourcc(dcmi, fival->pixel_format);
  1048. if (!sd_fmt)
  1049. return -EINVAL;
  1050. fie.code = sd_fmt->mbus_code;
  1051. ret = v4l2_subdev_call(dcmi->entity.subdev, pad,
  1052. enum_frame_interval, NULL, &fie);
  1053. if (ret)
  1054. return ret;
  1055. fival->type = V4L2_FRMIVAL_TYPE_DISCRETE;
  1056. fival->discrete = fie.interval;
  1057. return 0;
  1058. }
  1059. static const struct of_device_id stm32_dcmi_of_match[] = {
  1060. { .compatible = "st,stm32-dcmi"},
  1061. { /* end node */ },
  1062. };
  1063. MODULE_DEVICE_TABLE(of, stm32_dcmi_of_match);
  1064. static int dcmi_open(struct file *file)
  1065. {
  1066. struct stm32_dcmi *dcmi = video_drvdata(file);
  1067. struct v4l2_subdev *sd = dcmi->entity.subdev;
  1068. int ret;
  1069. if (mutex_lock_interruptible(&dcmi->lock))
  1070. return -ERESTARTSYS;
  1071. ret = v4l2_fh_open(file);
  1072. if (ret < 0)
  1073. goto unlock;
  1074. if (!v4l2_fh_is_singular_file(file))
  1075. goto fh_rel;
  1076. ret = v4l2_subdev_call(sd, core, s_power, 1);
  1077. if (ret < 0 && ret != -ENOIOCTLCMD)
  1078. goto fh_rel;
  1079. ret = dcmi_set_fmt(dcmi, &dcmi->fmt);
  1080. if (ret)
  1081. v4l2_subdev_call(sd, core, s_power, 0);
  1082. fh_rel:
  1083. if (ret)
  1084. v4l2_fh_release(file);
  1085. unlock:
  1086. mutex_unlock(&dcmi->lock);
  1087. return ret;
  1088. }
  1089. static int dcmi_release(struct file *file)
  1090. {
  1091. struct stm32_dcmi *dcmi = video_drvdata(file);
  1092. struct v4l2_subdev *sd = dcmi->entity.subdev;
  1093. bool fh_singular;
  1094. int ret;
  1095. mutex_lock(&dcmi->lock);
  1096. fh_singular = v4l2_fh_is_singular_file(file);
  1097. ret = _vb2_fop_release(file, NULL);
  1098. if (fh_singular)
  1099. v4l2_subdev_call(sd, core, s_power, 0);
  1100. mutex_unlock(&dcmi->lock);
  1101. return ret;
  1102. }
  1103. static const struct v4l2_ioctl_ops dcmi_ioctl_ops = {
  1104. .vidioc_querycap = dcmi_querycap,
  1105. .vidioc_try_fmt_vid_cap = dcmi_try_fmt_vid_cap,
  1106. .vidioc_g_fmt_vid_cap = dcmi_g_fmt_vid_cap,
  1107. .vidioc_s_fmt_vid_cap = dcmi_s_fmt_vid_cap,
  1108. .vidioc_enum_fmt_vid_cap = dcmi_enum_fmt_vid_cap,
  1109. .vidioc_g_selection = dcmi_g_selection,
  1110. .vidioc_s_selection = dcmi_s_selection,
  1111. .vidioc_enum_input = dcmi_enum_input,
  1112. .vidioc_g_input = dcmi_g_input,
  1113. .vidioc_s_input = dcmi_s_input,
  1114. .vidioc_g_parm = dcmi_g_parm,
  1115. .vidioc_s_parm = dcmi_s_parm,
  1116. .vidioc_enum_framesizes = dcmi_enum_framesizes,
  1117. .vidioc_enum_frameintervals = dcmi_enum_frameintervals,
  1118. .vidioc_reqbufs = vb2_ioctl_reqbufs,
  1119. .vidioc_create_bufs = vb2_ioctl_create_bufs,
  1120. .vidioc_querybuf = vb2_ioctl_querybuf,
  1121. .vidioc_qbuf = vb2_ioctl_qbuf,
  1122. .vidioc_dqbuf = vb2_ioctl_dqbuf,
  1123. .vidioc_expbuf = vb2_ioctl_expbuf,
  1124. .vidioc_prepare_buf = vb2_ioctl_prepare_buf,
  1125. .vidioc_streamon = vb2_ioctl_streamon,
  1126. .vidioc_streamoff = vb2_ioctl_streamoff,
  1127. .vidioc_log_status = v4l2_ctrl_log_status,
  1128. .vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
  1129. .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
  1130. };
  1131. static const struct v4l2_file_operations dcmi_fops = {
  1132. .owner = THIS_MODULE,
  1133. .unlocked_ioctl = video_ioctl2,
  1134. .open = dcmi_open,
  1135. .release = dcmi_release,
  1136. .poll = vb2_fop_poll,
  1137. .mmap = vb2_fop_mmap,
  1138. #ifndef CONFIG_MMU
  1139. .get_unmapped_area = vb2_fop_get_unmapped_area,
  1140. #endif
  1141. .read = vb2_fop_read,
  1142. };
  1143. static int dcmi_set_default_fmt(struct stm32_dcmi *dcmi)
  1144. {
  1145. struct v4l2_format f = {
  1146. .type = V4L2_BUF_TYPE_VIDEO_CAPTURE,
  1147. .fmt.pix = {
  1148. .width = CIF_WIDTH,
  1149. .height = CIF_HEIGHT,
  1150. .field = V4L2_FIELD_NONE,
  1151. .pixelformat = dcmi->sd_formats[0]->fourcc,
  1152. },
  1153. };
  1154. int ret;
  1155. ret = dcmi_try_fmt(dcmi, &f, NULL, NULL);
  1156. if (ret)
  1157. return ret;
  1158. dcmi->sd_format = dcmi->sd_formats[0];
  1159. dcmi->fmt = f;
  1160. return 0;
  1161. }
  1162. static const struct dcmi_format dcmi_formats[] = {
  1163. {
  1164. .fourcc = V4L2_PIX_FMT_RGB565,
  1165. .mbus_code = MEDIA_BUS_FMT_RGB565_2X8_LE,
  1166. .bpp = 2,
  1167. }, {
  1168. .fourcc = V4L2_PIX_FMT_YUYV,
  1169. .mbus_code = MEDIA_BUS_FMT_YUYV8_2X8,
  1170. .bpp = 2,
  1171. }, {
  1172. .fourcc = V4L2_PIX_FMT_UYVY,
  1173. .mbus_code = MEDIA_BUS_FMT_UYVY8_2X8,
  1174. .bpp = 2,
  1175. }, {
  1176. .fourcc = V4L2_PIX_FMT_JPEG,
  1177. .mbus_code = MEDIA_BUS_FMT_JPEG_1X8,
  1178. .bpp = 1,
  1179. },
  1180. };
  1181. static int dcmi_formats_init(struct stm32_dcmi *dcmi)
  1182. {
  1183. const struct dcmi_format *sd_fmts[ARRAY_SIZE(dcmi_formats)];
  1184. unsigned int num_fmts = 0, i, j;
  1185. struct v4l2_subdev *subdev = dcmi->entity.subdev;
  1186. struct v4l2_subdev_mbus_code_enum mbus_code = {
  1187. .which = V4L2_SUBDEV_FORMAT_ACTIVE,
  1188. };
  1189. while (!v4l2_subdev_call(subdev, pad, enum_mbus_code,
  1190. NULL, &mbus_code)) {
  1191. for (i = 0; i < ARRAY_SIZE(dcmi_formats); i++) {
  1192. if (dcmi_formats[i].mbus_code != mbus_code.code)
  1193. continue;
  1194. /* Code supported, have we got this fourcc yet? */
  1195. for (j = 0; j < num_fmts; j++)
  1196. if (sd_fmts[j]->fourcc ==
  1197. dcmi_formats[i].fourcc)
  1198. /* Already available */
  1199. break;
  1200. if (j == num_fmts)
  1201. /* New */
  1202. sd_fmts[num_fmts++] = dcmi_formats + i;
  1203. }
  1204. mbus_code.index++;
  1205. }
  1206. if (!num_fmts)
  1207. return -ENXIO;
  1208. dcmi->num_of_sd_formats = num_fmts;
  1209. dcmi->sd_formats = devm_kcalloc(dcmi->dev,
  1210. num_fmts, sizeof(struct dcmi_format *),
  1211. GFP_KERNEL);
  1212. if (!dcmi->sd_formats) {
  1213. dev_err(dcmi->dev, "Could not allocate memory\n");
  1214. return -ENOMEM;
  1215. }
  1216. memcpy(dcmi->sd_formats, sd_fmts,
  1217. num_fmts * sizeof(struct dcmi_format *));
  1218. dcmi->sd_format = dcmi->sd_formats[0];
  1219. return 0;
  1220. }
  1221. static int dcmi_framesizes_init(struct stm32_dcmi *dcmi)
  1222. {
  1223. unsigned int num_fsize = 0;
  1224. struct v4l2_subdev *subdev = dcmi->entity.subdev;
  1225. struct v4l2_subdev_frame_size_enum fse = {
  1226. .which = V4L2_SUBDEV_FORMAT_ACTIVE,
  1227. .code = dcmi->sd_format->mbus_code,
  1228. };
  1229. unsigned int ret;
  1230. unsigned int i;
  1231. /* Allocate discrete framesizes array */
  1232. while (!v4l2_subdev_call(subdev, pad, enum_frame_size,
  1233. NULL, &fse))
  1234. fse.index++;
  1235. num_fsize = fse.index;
  1236. if (!num_fsize)
  1237. return 0;
  1238. dcmi->num_of_sd_framesizes = num_fsize;
  1239. dcmi->sd_framesizes = devm_kcalloc(dcmi->dev, num_fsize,
  1240. sizeof(struct dcmi_framesize),
  1241. GFP_KERNEL);
  1242. if (!dcmi->sd_framesizes) {
  1243. dev_err(dcmi->dev, "Could not allocate memory\n");
  1244. return -ENOMEM;
  1245. }
  1246. /* Fill array with sensor supported framesizes */
  1247. dev_dbg(dcmi->dev, "Sensor supports %u frame sizes:\n", num_fsize);
  1248. for (i = 0; i < dcmi->num_of_sd_framesizes; i++) {
  1249. fse.index = i;
  1250. ret = v4l2_subdev_call(subdev, pad, enum_frame_size,
  1251. NULL, &fse);
  1252. if (ret)
  1253. return ret;
  1254. dcmi->sd_framesizes[fse.index].width = fse.max_width;
  1255. dcmi->sd_framesizes[fse.index].height = fse.max_height;
  1256. dev_dbg(dcmi->dev, "%ux%u\n", fse.max_width, fse.max_height);
  1257. }
  1258. return 0;
  1259. }
  1260. static int dcmi_graph_notify_complete(struct v4l2_async_notifier *notifier)
  1261. {
  1262. struct stm32_dcmi *dcmi = notifier_to_dcmi(notifier);
  1263. int ret;
  1264. dcmi->vdev->ctrl_handler = dcmi->entity.subdev->ctrl_handler;
  1265. ret = dcmi_formats_init(dcmi);
  1266. if (ret) {
  1267. dev_err(dcmi->dev, "No supported mediabus format found\n");
  1268. return ret;
  1269. }
  1270. ret = dcmi_framesizes_init(dcmi);
  1271. if (ret) {
  1272. dev_err(dcmi->dev, "Could not initialize framesizes\n");
  1273. return ret;
  1274. }
  1275. ret = dcmi_get_sensor_bounds(dcmi, &dcmi->sd_bounds);
  1276. if (ret) {
  1277. dev_err(dcmi->dev, "Could not get sensor bounds\n");
  1278. return ret;
  1279. }
  1280. ret = dcmi_set_default_fmt(dcmi);
  1281. if (ret) {
  1282. dev_err(dcmi->dev, "Could not set default format\n");
  1283. return ret;
  1284. }
  1285. ret = video_register_device(dcmi->vdev, VFL_TYPE_GRABBER, -1);
  1286. if (ret) {
  1287. dev_err(dcmi->dev, "Failed to register video device\n");
  1288. return ret;
  1289. }
  1290. dev_dbg(dcmi->dev, "Device registered as %s\n",
  1291. video_device_node_name(dcmi->vdev));
  1292. return 0;
  1293. }
  1294. static void dcmi_graph_notify_unbind(struct v4l2_async_notifier *notifier,
  1295. struct v4l2_subdev *sd,
  1296. struct v4l2_async_subdev *asd)
  1297. {
  1298. struct stm32_dcmi *dcmi = notifier_to_dcmi(notifier);
  1299. dev_dbg(dcmi->dev, "Removing %s\n", video_device_node_name(dcmi->vdev));
  1300. /* Checks internaly if vdev has been init or not */
  1301. video_unregister_device(dcmi->vdev);
  1302. }
  1303. static int dcmi_graph_notify_bound(struct v4l2_async_notifier *notifier,
  1304. struct v4l2_subdev *subdev,
  1305. struct v4l2_async_subdev *asd)
  1306. {
  1307. struct stm32_dcmi *dcmi = notifier_to_dcmi(notifier);
  1308. dev_dbg(dcmi->dev, "Subdev %s bound\n", subdev->name);
  1309. dcmi->entity.subdev = subdev;
  1310. return 0;
  1311. }
  1312. static const struct v4l2_async_notifier_operations dcmi_graph_notify_ops = {
  1313. .bound = dcmi_graph_notify_bound,
  1314. .unbind = dcmi_graph_notify_unbind,
  1315. .complete = dcmi_graph_notify_complete,
  1316. };
  1317. static int dcmi_graph_parse(struct stm32_dcmi *dcmi, struct device_node *node)
  1318. {
  1319. struct device_node *ep = NULL;
  1320. struct device_node *remote;
  1321. while (1) {
  1322. ep = of_graph_get_next_endpoint(node, ep);
  1323. if (!ep)
  1324. return -EINVAL;
  1325. remote = of_graph_get_remote_port_parent(ep);
  1326. if (!remote) {
  1327. of_node_put(ep);
  1328. return -EINVAL;
  1329. }
  1330. /* Remote node to connect */
  1331. dcmi->entity.node = remote;
  1332. dcmi->entity.asd.match_type = V4L2_ASYNC_MATCH_FWNODE;
  1333. dcmi->entity.asd.match.fwnode = of_fwnode_handle(remote);
  1334. return 0;
  1335. }
  1336. }
  1337. static int dcmi_graph_init(struct stm32_dcmi *dcmi)
  1338. {
  1339. struct v4l2_async_subdev **subdevs = NULL;
  1340. int ret;
  1341. /* Parse the graph to extract a list of subdevice DT nodes. */
  1342. ret = dcmi_graph_parse(dcmi, dcmi->dev->of_node);
  1343. if (ret < 0) {
  1344. dev_err(dcmi->dev, "Graph parsing failed\n");
  1345. return ret;
  1346. }
  1347. /* Register the subdevices notifier. */
  1348. subdevs = devm_kzalloc(dcmi->dev, sizeof(*subdevs), GFP_KERNEL);
  1349. if (!subdevs) {
  1350. of_node_put(dcmi->entity.node);
  1351. return -ENOMEM;
  1352. }
  1353. subdevs[0] = &dcmi->entity.asd;
  1354. dcmi->notifier.subdevs = subdevs;
  1355. dcmi->notifier.num_subdevs = 1;
  1356. dcmi->notifier.ops = &dcmi_graph_notify_ops;
  1357. ret = v4l2_async_notifier_register(&dcmi->v4l2_dev, &dcmi->notifier);
  1358. if (ret < 0) {
  1359. dev_err(dcmi->dev, "Notifier registration failed\n");
  1360. of_node_put(dcmi->entity.node);
  1361. return ret;
  1362. }
  1363. return 0;
  1364. }
  1365. static int dcmi_probe(struct platform_device *pdev)
  1366. {
  1367. struct device_node *np = pdev->dev.of_node;
  1368. const struct of_device_id *match = NULL;
  1369. struct v4l2_fwnode_endpoint ep;
  1370. struct stm32_dcmi *dcmi;
  1371. struct vb2_queue *q;
  1372. struct dma_chan *chan;
  1373. struct clk *mclk;
  1374. int irq;
  1375. int ret = 0;
  1376. match = of_match_device(of_match_ptr(stm32_dcmi_of_match), &pdev->dev);
  1377. if (!match) {
  1378. dev_err(&pdev->dev, "Could not find a match in devicetree\n");
  1379. return -ENODEV;
  1380. }
  1381. dcmi = devm_kzalloc(&pdev->dev, sizeof(struct stm32_dcmi), GFP_KERNEL);
  1382. if (!dcmi)
  1383. return -ENOMEM;
  1384. dcmi->rstc = devm_reset_control_get_exclusive(&pdev->dev, NULL);
  1385. if (IS_ERR(dcmi->rstc)) {
  1386. dev_err(&pdev->dev, "Could not get reset control\n");
  1387. return -ENODEV;
  1388. }
  1389. /* Get bus characteristics from devicetree */
  1390. np = of_graph_get_next_endpoint(np, NULL);
  1391. if (!np) {
  1392. dev_err(&pdev->dev, "Could not find the endpoint\n");
  1393. of_node_put(np);
  1394. return -ENODEV;
  1395. }
  1396. ret = v4l2_fwnode_endpoint_parse(of_fwnode_handle(np), &ep);
  1397. if (ret) {
  1398. dev_err(&pdev->dev, "Could not parse the endpoint\n");
  1399. of_node_put(np);
  1400. return -ENODEV;
  1401. }
  1402. if (ep.bus_type == V4L2_MBUS_CSI2) {
  1403. dev_err(&pdev->dev, "CSI bus not supported\n");
  1404. of_node_put(np);
  1405. return -ENODEV;
  1406. }
  1407. dcmi->bus.flags = ep.bus.parallel.flags;
  1408. dcmi->bus.bus_width = ep.bus.parallel.bus_width;
  1409. dcmi->bus.data_shift = ep.bus.parallel.data_shift;
  1410. of_node_put(np);
  1411. irq = platform_get_irq(pdev, 0);
  1412. if (irq <= 0) {
  1413. dev_err(&pdev->dev, "Could not get irq\n");
  1414. return -ENODEV;
  1415. }
  1416. dcmi->res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  1417. if (!dcmi->res) {
  1418. dev_err(&pdev->dev, "Could not get resource\n");
  1419. return -ENODEV;
  1420. }
  1421. dcmi->regs = devm_ioremap_resource(&pdev->dev, dcmi->res);
  1422. if (IS_ERR(dcmi->regs)) {
  1423. dev_err(&pdev->dev, "Could not map registers\n");
  1424. return PTR_ERR(dcmi->regs);
  1425. }
  1426. ret = devm_request_threaded_irq(&pdev->dev, irq, dcmi_irq_callback,
  1427. dcmi_irq_thread, IRQF_ONESHOT,
  1428. dev_name(&pdev->dev), dcmi);
  1429. if (ret) {
  1430. dev_err(&pdev->dev, "Unable to request irq %d\n", irq);
  1431. return -ENODEV;
  1432. }
  1433. mclk = devm_clk_get(&pdev->dev, "mclk");
  1434. if (IS_ERR(mclk)) {
  1435. dev_err(&pdev->dev, "Unable to get mclk\n");
  1436. return PTR_ERR(mclk);
  1437. }
  1438. chan = dma_request_slave_channel(&pdev->dev, "tx");
  1439. if (!chan) {
  1440. dev_info(&pdev->dev, "Unable to request DMA channel, defer probing\n");
  1441. return -EPROBE_DEFER;
  1442. }
  1443. ret = clk_prepare(mclk);
  1444. if (ret) {
  1445. dev_err(&pdev->dev, "Unable to prepare mclk %p\n", mclk);
  1446. goto err_dma_release;
  1447. }
  1448. spin_lock_init(&dcmi->irqlock);
  1449. mutex_init(&dcmi->lock);
  1450. init_completion(&dcmi->complete);
  1451. INIT_LIST_HEAD(&dcmi->buffers);
  1452. dcmi->dev = &pdev->dev;
  1453. dcmi->mclk = mclk;
  1454. dcmi->state = STOPPED;
  1455. dcmi->dma_chan = chan;
  1456. q = &dcmi->queue;
  1457. /* Initialize the top-level structure */
  1458. ret = v4l2_device_register(&pdev->dev, &dcmi->v4l2_dev);
  1459. if (ret)
  1460. goto err_clk_unprepare;
  1461. dcmi->vdev = video_device_alloc();
  1462. if (!dcmi->vdev) {
  1463. ret = -ENOMEM;
  1464. goto err_device_unregister;
  1465. }
  1466. /* Video node */
  1467. dcmi->vdev->fops = &dcmi_fops;
  1468. dcmi->vdev->v4l2_dev = &dcmi->v4l2_dev;
  1469. dcmi->vdev->queue = &dcmi->queue;
  1470. strlcpy(dcmi->vdev->name, KBUILD_MODNAME, sizeof(dcmi->vdev->name));
  1471. dcmi->vdev->release = video_device_release;
  1472. dcmi->vdev->ioctl_ops = &dcmi_ioctl_ops;
  1473. dcmi->vdev->lock = &dcmi->lock;
  1474. dcmi->vdev->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING |
  1475. V4L2_CAP_READWRITE;
  1476. video_set_drvdata(dcmi->vdev, dcmi);
  1477. /* Buffer queue */
  1478. q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
  1479. q->io_modes = VB2_MMAP | VB2_READ | VB2_DMABUF;
  1480. q->lock = &dcmi->lock;
  1481. q->drv_priv = dcmi;
  1482. q->buf_struct_size = sizeof(struct dcmi_buf);
  1483. q->ops = &dcmi_video_qops;
  1484. q->mem_ops = &vb2_dma_contig_memops;
  1485. q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
  1486. q->min_buffers_needed = 2;
  1487. q->dev = &pdev->dev;
  1488. ret = vb2_queue_init(q);
  1489. if (ret < 0) {
  1490. dev_err(&pdev->dev, "Failed to initialize vb2 queue\n");
  1491. goto err_device_release;
  1492. }
  1493. ret = dcmi_graph_init(dcmi);
  1494. if (ret < 0)
  1495. goto err_device_release;
  1496. /* Reset device */
  1497. ret = reset_control_assert(dcmi->rstc);
  1498. if (ret) {
  1499. dev_err(&pdev->dev, "Failed to assert the reset line\n");
  1500. goto err_device_release;
  1501. }
  1502. usleep_range(3000, 5000);
  1503. ret = reset_control_deassert(dcmi->rstc);
  1504. if (ret) {
  1505. dev_err(&pdev->dev, "Failed to deassert the reset line\n");
  1506. goto err_device_release;
  1507. }
  1508. dev_info(&pdev->dev, "Probe done\n");
  1509. platform_set_drvdata(pdev, dcmi);
  1510. return 0;
  1511. err_device_release:
  1512. video_device_release(dcmi->vdev);
  1513. err_device_unregister:
  1514. v4l2_device_unregister(&dcmi->v4l2_dev);
  1515. err_clk_unprepare:
  1516. clk_unprepare(dcmi->mclk);
  1517. err_dma_release:
  1518. dma_release_channel(dcmi->dma_chan);
  1519. return ret;
  1520. }
  1521. static int dcmi_remove(struct platform_device *pdev)
  1522. {
  1523. struct stm32_dcmi *dcmi = platform_get_drvdata(pdev);
  1524. v4l2_async_notifier_unregister(&dcmi->notifier);
  1525. v4l2_device_unregister(&dcmi->v4l2_dev);
  1526. clk_unprepare(dcmi->mclk);
  1527. dma_release_channel(dcmi->dma_chan);
  1528. return 0;
  1529. }
  1530. static struct platform_driver stm32_dcmi_driver = {
  1531. .probe = dcmi_probe,
  1532. .remove = dcmi_remove,
  1533. .driver = {
  1534. .name = DRV_NAME,
  1535. .of_match_table = of_match_ptr(stm32_dcmi_of_match),
  1536. },
  1537. };
  1538. module_platform_driver(stm32_dcmi_driver);
  1539. MODULE_AUTHOR("Yannick Fertre <yannick.fertre@st.com>");
  1540. MODULE_AUTHOR("Hugues Fruchet <hugues.fruchet@st.com>");
  1541. MODULE_DESCRIPTION("STMicroelectronics STM32 Digital Camera Memory Interface driver");
  1542. MODULE_LICENSE("GPL");
  1543. MODULE_SUPPORTED_DEVICE("video");