stm32-dcmi.c 45 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Driver for STM32 Digital Camera Memory Interface
  4. *
  5. * Copyright (C) STMicroelectronics SA 2017
  6. * Authors: Yannick Fertre <yannick.fertre@st.com>
  7. * Hugues Fruchet <hugues.fruchet@st.com>
  8. * for STMicroelectronics.
  9. *
  10. * This driver is based on atmel_isi.c
  11. *
  12. */
  13. #include <linux/clk.h>
  14. #include <linux/completion.h>
  15. #include <linux/delay.h>
  16. #include <linux/dmaengine.h>
  17. #include <linux/init.h>
  18. #include <linux/interrupt.h>
  19. #include <linux/kernel.h>
  20. #include <linux/module.h>
  21. #include <linux/of.h>
  22. #include <linux/of_device.h>
  23. #include <linux/of_graph.h>
  24. #include <linux/pinctrl/consumer.h>
  25. #include <linux/platform_device.h>
  26. #include <linux/pm_runtime.h>
  27. #include <linux/reset.h>
  28. #include <linux/videodev2.h>
  29. #include <media/v4l2-ctrls.h>
  30. #include <media/v4l2-dev.h>
  31. #include <media/v4l2-device.h>
  32. #include <media/v4l2-event.h>
  33. #include <media/v4l2-fwnode.h>
  34. #include <media/v4l2-image-sizes.h>
  35. #include <media/v4l2-ioctl.h>
  36. #include <media/v4l2-rect.h>
  37. #include <media/videobuf2-dma-contig.h>
  38. #define DRV_NAME "stm32-dcmi"
  39. /* Registers offset for DCMI */
  40. #define DCMI_CR 0x00 /* Control Register */
  41. #define DCMI_SR 0x04 /* Status Register */
  42. #define DCMI_RIS 0x08 /* Raw Interrupt Status register */
  43. #define DCMI_IER 0x0C /* Interrupt Enable Register */
  44. #define DCMI_MIS 0x10 /* Masked Interrupt Status register */
  45. #define DCMI_ICR 0x14 /* Interrupt Clear Register */
  46. #define DCMI_ESCR 0x18 /* Embedded Synchronization Code Register */
  47. #define DCMI_ESUR 0x1C /* Embedded Synchronization Unmask Register */
  48. #define DCMI_CWSTRT 0x20 /* Crop Window STaRT */
  49. #define DCMI_CWSIZE 0x24 /* Crop Window SIZE */
  50. #define DCMI_DR 0x28 /* Data Register */
  51. #define DCMI_IDR 0x2C /* IDentifier Register */
  52. /* Bits definition for control register (DCMI_CR) */
  53. #define CR_CAPTURE BIT(0)
  54. #define CR_CM BIT(1)
  55. #define CR_CROP BIT(2)
  56. #define CR_JPEG BIT(3)
  57. #define CR_ESS BIT(4)
  58. #define CR_PCKPOL BIT(5)
  59. #define CR_HSPOL BIT(6)
  60. #define CR_VSPOL BIT(7)
  61. #define CR_FCRC_0 BIT(8)
  62. #define CR_FCRC_1 BIT(9)
  63. #define CR_EDM_0 BIT(10)
  64. #define CR_EDM_1 BIT(11)
  65. #define CR_ENABLE BIT(14)
  66. /* Bits definition for status register (DCMI_SR) */
  67. #define SR_HSYNC BIT(0)
  68. #define SR_VSYNC BIT(1)
  69. #define SR_FNE BIT(2)
  70. /*
  71. * Bits definition for interrupt registers
  72. * (DCMI_RIS, DCMI_IER, DCMI_MIS, DCMI_ICR)
  73. */
  74. #define IT_FRAME BIT(0)
  75. #define IT_OVR BIT(1)
  76. #define IT_ERR BIT(2)
  77. #define IT_VSYNC BIT(3)
  78. #define IT_LINE BIT(4)
  79. enum state {
  80. STOPPED = 0,
  81. WAIT_FOR_BUFFER,
  82. RUNNING,
  83. };
  84. #define MIN_WIDTH 16U
  85. #define MAX_WIDTH 2592U
  86. #define MIN_HEIGHT 16U
  87. #define MAX_HEIGHT 2592U
  88. #define TIMEOUT_MS 1000
  89. struct dcmi_graph_entity {
  90. struct device_node *node;
  91. struct v4l2_async_subdev asd;
  92. struct v4l2_subdev *subdev;
  93. };
  94. struct dcmi_format {
  95. u32 fourcc;
  96. u32 mbus_code;
  97. u8 bpp;
  98. };
  99. struct dcmi_framesize {
  100. u32 width;
  101. u32 height;
  102. };
  103. struct dcmi_buf {
  104. struct vb2_v4l2_buffer vb;
  105. bool prepared;
  106. dma_addr_t paddr;
  107. size_t size;
  108. struct list_head list;
  109. };
  110. struct stm32_dcmi {
  111. /* Protects the access of variables shared within the interrupt */
  112. spinlock_t irqlock;
  113. struct device *dev;
  114. void __iomem *regs;
  115. struct resource *res;
  116. struct reset_control *rstc;
  117. int sequence;
  118. struct list_head buffers;
  119. struct dcmi_buf *active;
  120. struct v4l2_device v4l2_dev;
  121. struct video_device *vdev;
  122. struct v4l2_async_notifier notifier;
  123. struct dcmi_graph_entity entity;
  124. struct v4l2_format fmt;
  125. struct v4l2_rect crop;
  126. bool do_crop;
  127. const struct dcmi_format **sd_formats;
  128. unsigned int num_of_sd_formats;
  129. const struct dcmi_format *sd_format;
  130. struct dcmi_framesize *sd_framesizes;
  131. unsigned int num_of_sd_framesizes;
  132. struct dcmi_framesize sd_framesize;
  133. struct v4l2_rect sd_bounds;
  134. /* Protect this data structure */
  135. struct mutex lock;
  136. struct vb2_queue queue;
  137. struct v4l2_fwnode_bus_parallel bus;
  138. struct completion complete;
  139. struct clk *mclk;
  140. enum state state;
  141. struct dma_chan *dma_chan;
  142. dma_cookie_t dma_cookie;
  143. u32 misr;
  144. int errors_count;
  145. int overrun_count;
  146. int buffers_count;
  147. };
  148. static inline struct stm32_dcmi *notifier_to_dcmi(struct v4l2_async_notifier *n)
  149. {
  150. return container_of(n, struct stm32_dcmi, notifier);
  151. }
  152. static inline u32 reg_read(void __iomem *base, u32 reg)
  153. {
  154. return readl_relaxed(base + reg);
  155. }
  156. static inline void reg_write(void __iomem *base, u32 reg, u32 val)
  157. {
  158. writel_relaxed(val, base + reg);
  159. }
  160. static inline void reg_set(void __iomem *base, u32 reg, u32 mask)
  161. {
  162. reg_write(base, reg, reg_read(base, reg) | mask);
  163. }
  164. static inline void reg_clear(void __iomem *base, u32 reg, u32 mask)
  165. {
  166. reg_write(base, reg, reg_read(base, reg) & ~mask);
  167. }
  168. static int dcmi_start_capture(struct stm32_dcmi *dcmi, struct dcmi_buf *buf);
  169. static void dcmi_buffer_done(struct stm32_dcmi *dcmi,
  170. struct dcmi_buf *buf,
  171. size_t bytesused,
  172. int err)
  173. {
  174. struct vb2_v4l2_buffer *vbuf;
  175. if (!buf)
  176. return;
  177. list_del_init(&buf->list);
  178. vbuf = &buf->vb;
  179. vbuf->sequence = dcmi->sequence++;
  180. vbuf->field = V4L2_FIELD_NONE;
  181. vbuf->vb2_buf.timestamp = ktime_get_ns();
  182. vb2_set_plane_payload(&vbuf->vb2_buf, 0, bytesused);
  183. vb2_buffer_done(&vbuf->vb2_buf,
  184. err ? VB2_BUF_STATE_ERROR : VB2_BUF_STATE_DONE);
  185. dev_dbg(dcmi->dev, "buffer[%d] done seq=%d, bytesused=%zu\n",
  186. vbuf->vb2_buf.index, vbuf->sequence, bytesused);
  187. dcmi->buffers_count++;
  188. dcmi->active = NULL;
  189. }
  190. static int dcmi_restart_capture(struct stm32_dcmi *dcmi)
  191. {
  192. struct dcmi_buf *buf;
  193. spin_lock_irq(&dcmi->irqlock);
  194. if (dcmi->state != RUNNING) {
  195. spin_unlock_irq(&dcmi->irqlock);
  196. return -EINVAL;
  197. }
  198. /* Restart a new DMA transfer with next buffer */
  199. if (list_empty(&dcmi->buffers)) {
  200. dev_dbg(dcmi->dev, "Capture restart is deferred to next buffer queueing\n");
  201. dcmi->state = WAIT_FOR_BUFFER;
  202. spin_unlock_irq(&dcmi->irqlock);
  203. return 0;
  204. }
  205. buf = list_entry(dcmi->buffers.next, struct dcmi_buf, list);
  206. dcmi->active = buf;
  207. spin_unlock_irq(&dcmi->irqlock);
  208. return dcmi_start_capture(dcmi, buf);
  209. }
  210. static void dcmi_dma_callback(void *param)
  211. {
  212. struct stm32_dcmi *dcmi = (struct stm32_dcmi *)param;
  213. struct dma_tx_state state;
  214. enum dma_status status;
  215. struct dcmi_buf *buf = dcmi->active;
  216. spin_lock_irq(&dcmi->irqlock);
  217. /* Check DMA status */
  218. status = dmaengine_tx_status(dcmi->dma_chan, dcmi->dma_cookie, &state);
  219. switch (status) {
  220. case DMA_IN_PROGRESS:
  221. dev_dbg(dcmi->dev, "%s: Received DMA_IN_PROGRESS\n", __func__);
  222. break;
  223. case DMA_PAUSED:
  224. dev_err(dcmi->dev, "%s: Received DMA_PAUSED\n", __func__);
  225. break;
  226. case DMA_ERROR:
  227. dev_err(dcmi->dev, "%s: Received DMA_ERROR\n", __func__);
  228. /* Return buffer to V4L2 in error state */
  229. dcmi_buffer_done(dcmi, buf, 0, -EIO);
  230. break;
  231. case DMA_COMPLETE:
  232. dev_dbg(dcmi->dev, "%s: Received DMA_COMPLETE\n", __func__);
  233. /* Return buffer to V4L2 */
  234. dcmi_buffer_done(dcmi, buf, buf->size, 0);
  235. spin_unlock_irq(&dcmi->irqlock);
  236. /* Restart capture */
  237. if (dcmi_restart_capture(dcmi))
  238. dev_err(dcmi->dev, "%s: Cannot restart capture on DMA complete\n",
  239. __func__);
  240. return;
  241. default:
  242. dev_err(dcmi->dev, "%s: Received unknown status\n", __func__);
  243. break;
  244. }
  245. spin_unlock_irq(&dcmi->irqlock);
  246. }
  247. static int dcmi_start_dma(struct stm32_dcmi *dcmi,
  248. struct dcmi_buf *buf)
  249. {
  250. struct dma_async_tx_descriptor *desc = NULL;
  251. struct dma_slave_config config;
  252. int ret;
  253. memset(&config, 0, sizeof(config));
  254. config.src_addr = (dma_addr_t)dcmi->res->start + DCMI_DR;
  255. config.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
  256. config.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
  257. config.dst_maxburst = 4;
  258. /* Configure DMA channel */
  259. ret = dmaengine_slave_config(dcmi->dma_chan, &config);
  260. if (ret < 0) {
  261. dev_err(dcmi->dev, "%s: DMA channel config failed (%d)\n",
  262. __func__, ret);
  263. return ret;
  264. }
  265. /* Prepare a DMA transaction */
  266. desc = dmaengine_prep_slave_single(dcmi->dma_chan, buf->paddr,
  267. buf->size,
  268. DMA_DEV_TO_MEM,
  269. DMA_PREP_INTERRUPT);
  270. if (!desc) {
  271. dev_err(dcmi->dev, "%s: DMA dmaengine_prep_slave_single failed for buffer phy=%pad size=%zu\n",
  272. __func__, &buf->paddr, buf->size);
  273. return -EINVAL;
  274. }
  275. /* Set completion callback routine for notification */
  276. desc->callback = dcmi_dma_callback;
  277. desc->callback_param = dcmi;
  278. /* Push current DMA transaction in the pending queue */
  279. dcmi->dma_cookie = dmaengine_submit(desc);
  280. if (dma_submit_error(dcmi->dma_cookie)) {
  281. dev_err(dcmi->dev, "%s: DMA submission failed\n", __func__);
  282. return -ENXIO;
  283. }
  284. dma_async_issue_pending(dcmi->dma_chan);
  285. return 0;
  286. }
  287. static int dcmi_start_capture(struct stm32_dcmi *dcmi, struct dcmi_buf *buf)
  288. {
  289. int ret;
  290. if (!buf)
  291. return -EINVAL;
  292. ret = dcmi_start_dma(dcmi, buf);
  293. if (ret) {
  294. dcmi->errors_count++;
  295. return ret;
  296. }
  297. /* Enable capture */
  298. reg_set(dcmi->regs, DCMI_CR, CR_CAPTURE);
  299. return 0;
  300. }
  301. static void dcmi_set_crop(struct stm32_dcmi *dcmi)
  302. {
  303. u32 size, start;
  304. /* Crop resolution */
  305. size = ((dcmi->crop.height - 1) << 16) |
  306. ((dcmi->crop.width << 1) - 1);
  307. reg_write(dcmi->regs, DCMI_CWSIZE, size);
  308. /* Crop start point */
  309. start = ((dcmi->crop.top) << 16) |
  310. ((dcmi->crop.left << 1));
  311. reg_write(dcmi->regs, DCMI_CWSTRT, start);
  312. dev_dbg(dcmi->dev, "Cropping to %ux%u@%u:%u\n",
  313. dcmi->crop.width, dcmi->crop.height,
  314. dcmi->crop.left, dcmi->crop.top);
  315. /* Enable crop */
  316. reg_set(dcmi->regs, DCMI_CR, CR_CROP);
  317. }
  318. static void dcmi_process_jpeg(struct stm32_dcmi *dcmi)
  319. {
  320. struct dma_tx_state state;
  321. enum dma_status status;
  322. struct dcmi_buf *buf = dcmi->active;
  323. if (!buf)
  324. return;
  325. /*
  326. * Because of variable JPEG buffer size sent by sensor,
  327. * DMA transfer never completes due to transfer size never reached.
  328. * In order to ensure that all the JPEG data are transferred
  329. * in active buffer memory, DMA is drained.
  330. * Then DMA tx status gives the amount of data transferred
  331. * to memory, which is then returned to V4L2 through the active
  332. * buffer payload.
  333. */
  334. /* Drain DMA */
  335. dmaengine_synchronize(dcmi->dma_chan);
  336. /* Get DMA residue to get JPEG size */
  337. status = dmaengine_tx_status(dcmi->dma_chan, dcmi->dma_cookie, &state);
  338. if (status != DMA_ERROR && state.residue < buf->size) {
  339. /* Return JPEG buffer to V4L2 with received JPEG buffer size */
  340. dcmi_buffer_done(dcmi, buf, buf->size - state.residue, 0);
  341. } else {
  342. dcmi->errors_count++;
  343. dev_err(dcmi->dev, "%s: Cannot get JPEG size from DMA\n",
  344. __func__);
  345. /* Return JPEG buffer to V4L2 in ERROR state */
  346. dcmi_buffer_done(dcmi, buf, 0, -EIO);
  347. }
  348. /* Abort DMA operation */
  349. dmaengine_terminate_all(dcmi->dma_chan);
  350. /* Restart capture */
  351. if (dcmi_restart_capture(dcmi))
  352. dev_err(dcmi->dev, "%s: Cannot restart capture on JPEG received\n",
  353. __func__);
  354. }
  355. static irqreturn_t dcmi_irq_thread(int irq, void *arg)
  356. {
  357. struct stm32_dcmi *dcmi = arg;
  358. spin_lock_irq(&dcmi->irqlock);
  359. if ((dcmi->misr & IT_OVR) || (dcmi->misr & IT_ERR)) {
  360. dcmi->errors_count++;
  361. if (dcmi->misr & IT_OVR)
  362. dcmi->overrun_count++;
  363. }
  364. if (dcmi->sd_format->fourcc == V4L2_PIX_FMT_JPEG &&
  365. dcmi->misr & IT_FRAME) {
  366. /* JPEG received */
  367. spin_unlock_irq(&dcmi->irqlock);
  368. dcmi_process_jpeg(dcmi);
  369. return IRQ_HANDLED;
  370. }
  371. spin_unlock_irq(&dcmi->irqlock);
  372. return IRQ_HANDLED;
  373. }
  374. static irqreturn_t dcmi_irq_callback(int irq, void *arg)
  375. {
  376. struct stm32_dcmi *dcmi = arg;
  377. unsigned long flags;
  378. spin_lock_irqsave(&dcmi->irqlock, flags);
  379. dcmi->misr = reg_read(dcmi->regs, DCMI_MIS);
  380. /* Clear interrupt */
  381. reg_set(dcmi->regs, DCMI_ICR, IT_FRAME | IT_OVR | IT_ERR);
  382. spin_unlock_irqrestore(&dcmi->irqlock, flags);
  383. return IRQ_WAKE_THREAD;
  384. }
  385. static int dcmi_queue_setup(struct vb2_queue *vq,
  386. unsigned int *nbuffers,
  387. unsigned int *nplanes,
  388. unsigned int sizes[],
  389. struct device *alloc_devs[])
  390. {
  391. struct stm32_dcmi *dcmi = vb2_get_drv_priv(vq);
  392. unsigned int size;
  393. size = dcmi->fmt.fmt.pix.sizeimage;
  394. /* Make sure the image size is large enough */
  395. if (*nplanes)
  396. return sizes[0] < size ? -EINVAL : 0;
  397. *nplanes = 1;
  398. sizes[0] = size;
  399. dev_dbg(dcmi->dev, "Setup queue, count=%d, size=%d\n",
  400. *nbuffers, size);
  401. return 0;
  402. }
  403. static int dcmi_buf_init(struct vb2_buffer *vb)
  404. {
  405. struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
  406. struct dcmi_buf *buf = container_of(vbuf, struct dcmi_buf, vb);
  407. INIT_LIST_HEAD(&buf->list);
  408. return 0;
  409. }
  410. static int dcmi_buf_prepare(struct vb2_buffer *vb)
  411. {
  412. struct stm32_dcmi *dcmi = vb2_get_drv_priv(vb->vb2_queue);
  413. struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
  414. struct dcmi_buf *buf = container_of(vbuf, struct dcmi_buf, vb);
  415. unsigned long size;
  416. size = dcmi->fmt.fmt.pix.sizeimage;
  417. if (vb2_plane_size(vb, 0) < size) {
  418. dev_err(dcmi->dev, "%s data will not fit into plane (%lu < %lu)\n",
  419. __func__, vb2_plane_size(vb, 0), size);
  420. return -EINVAL;
  421. }
  422. vb2_set_plane_payload(vb, 0, size);
  423. if (!buf->prepared) {
  424. /* Get memory addresses */
  425. buf->paddr =
  426. vb2_dma_contig_plane_dma_addr(&buf->vb.vb2_buf, 0);
  427. buf->size = vb2_plane_size(&buf->vb.vb2_buf, 0);
  428. buf->prepared = true;
  429. vb2_set_plane_payload(&buf->vb.vb2_buf, 0, buf->size);
  430. dev_dbg(dcmi->dev, "buffer[%d] phy=%pad size=%zu\n",
  431. vb->index, &buf->paddr, buf->size);
  432. }
  433. return 0;
  434. }
  435. static void dcmi_buf_queue(struct vb2_buffer *vb)
  436. {
  437. struct stm32_dcmi *dcmi = vb2_get_drv_priv(vb->vb2_queue);
  438. struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
  439. struct dcmi_buf *buf = container_of(vbuf, struct dcmi_buf, vb);
  440. spin_lock_irq(&dcmi->irqlock);
  441. /* Enqueue to video buffers list */
  442. list_add_tail(&buf->list, &dcmi->buffers);
  443. if (dcmi->state == WAIT_FOR_BUFFER) {
  444. dcmi->state = RUNNING;
  445. dcmi->active = buf;
  446. dev_dbg(dcmi->dev, "Starting capture on buffer[%d] queued\n",
  447. buf->vb.vb2_buf.index);
  448. spin_unlock_irq(&dcmi->irqlock);
  449. if (dcmi_start_capture(dcmi, buf))
  450. dev_err(dcmi->dev, "%s: Cannot restart capture on overflow or error\n",
  451. __func__);
  452. return;
  453. }
  454. spin_unlock_irq(&dcmi->irqlock);
  455. }
  456. static int dcmi_start_streaming(struct vb2_queue *vq, unsigned int count)
  457. {
  458. struct stm32_dcmi *dcmi = vb2_get_drv_priv(vq);
  459. struct dcmi_buf *buf, *node;
  460. u32 val = 0;
  461. int ret;
  462. ret = pm_runtime_get_sync(dcmi->dev);
  463. if (ret) {
  464. dev_err(dcmi->dev, "%s: Failed to start streaming, cannot get sync\n",
  465. __func__);
  466. goto err_release_buffers;
  467. }
  468. /* Enable stream on the sub device */
  469. ret = v4l2_subdev_call(dcmi->entity.subdev, video, s_stream, 1);
  470. if (ret && ret != -ENOIOCTLCMD) {
  471. dev_err(dcmi->dev, "%s: Failed to start streaming, subdev streamon error",
  472. __func__);
  473. goto err_pm_put;
  474. }
  475. spin_lock_irq(&dcmi->irqlock);
  476. /* Set bus width */
  477. switch (dcmi->bus.bus_width) {
  478. case 14:
  479. val |= CR_EDM_0 | CR_EDM_1;
  480. break;
  481. case 12:
  482. val |= CR_EDM_1;
  483. break;
  484. case 10:
  485. val |= CR_EDM_0;
  486. break;
  487. default:
  488. /* Set bus width to 8 bits by default */
  489. break;
  490. }
  491. /* Set vertical synchronization polarity */
  492. if (dcmi->bus.flags & V4L2_MBUS_VSYNC_ACTIVE_HIGH)
  493. val |= CR_VSPOL;
  494. /* Set horizontal synchronization polarity */
  495. if (dcmi->bus.flags & V4L2_MBUS_HSYNC_ACTIVE_HIGH)
  496. val |= CR_HSPOL;
  497. /* Set pixel clock polarity */
  498. if (dcmi->bus.flags & V4L2_MBUS_PCLK_SAMPLE_RISING)
  499. val |= CR_PCKPOL;
  500. reg_write(dcmi->regs, DCMI_CR, val);
  501. /* Set crop */
  502. if (dcmi->do_crop)
  503. dcmi_set_crop(dcmi);
  504. /* Enable jpeg capture */
  505. if (dcmi->sd_format->fourcc == V4L2_PIX_FMT_JPEG)
  506. reg_set(dcmi->regs, DCMI_CR, CR_CM);/* Snapshot mode */
  507. /* Enable dcmi */
  508. reg_set(dcmi->regs, DCMI_CR, CR_ENABLE);
  509. dcmi->sequence = 0;
  510. dcmi->errors_count = 0;
  511. dcmi->overrun_count = 0;
  512. dcmi->buffers_count = 0;
  513. /*
  514. * Start transfer if at least one buffer has been queued,
  515. * otherwise transfer is deferred at buffer queueing
  516. */
  517. if (list_empty(&dcmi->buffers)) {
  518. dev_dbg(dcmi->dev, "Start streaming is deferred to next buffer queueing\n");
  519. dcmi->state = WAIT_FOR_BUFFER;
  520. spin_unlock_irq(&dcmi->irqlock);
  521. return 0;
  522. }
  523. buf = list_entry(dcmi->buffers.next, struct dcmi_buf, list);
  524. dcmi->active = buf;
  525. dcmi->state = RUNNING;
  526. dev_dbg(dcmi->dev, "Start streaming, starting capture\n");
  527. spin_unlock_irq(&dcmi->irqlock);
  528. ret = dcmi_start_capture(dcmi, buf);
  529. if (ret) {
  530. dev_err(dcmi->dev, "%s: Start streaming failed, cannot start capture\n",
  531. __func__);
  532. goto err_subdev_streamoff;
  533. }
  534. /* Enable interruptions */
  535. reg_set(dcmi->regs, DCMI_IER, IT_FRAME | IT_OVR | IT_ERR);
  536. return 0;
  537. err_subdev_streamoff:
  538. v4l2_subdev_call(dcmi->entity.subdev, video, s_stream, 0);
  539. err_pm_put:
  540. pm_runtime_put(dcmi->dev);
  541. err_release_buffers:
  542. spin_lock_irq(&dcmi->irqlock);
  543. /*
  544. * Return all buffers to vb2 in QUEUED state.
  545. * This will give ownership back to userspace
  546. */
  547. list_for_each_entry_safe(buf, node, &dcmi->buffers, list) {
  548. list_del_init(&buf->list);
  549. vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_QUEUED);
  550. }
  551. dcmi->active = NULL;
  552. spin_unlock_irq(&dcmi->irqlock);
  553. return ret;
  554. }
  555. static void dcmi_stop_streaming(struct vb2_queue *vq)
  556. {
  557. struct stm32_dcmi *dcmi = vb2_get_drv_priv(vq);
  558. struct dcmi_buf *buf, *node;
  559. int ret;
  560. /* Disable stream on the sub device */
  561. ret = v4l2_subdev_call(dcmi->entity.subdev, video, s_stream, 0);
  562. if (ret && ret != -ENOIOCTLCMD)
  563. dev_err(dcmi->dev, "%s: Failed to stop streaming, subdev streamoff error (%d)\n",
  564. __func__, ret);
  565. spin_lock_irq(&dcmi->irqlock);
  566. /* Disable interruptions */
  567. reg_clear(dcmi->regs, DCMI_IER, IT_FRAME | IT_OVR | IT_ERR);
  568. /* Disable DCMI */
  569. reg_clear(dcmi->regs, DCMI_CR, CR_ENABLE);
  570. /* Return all queued buffers to vb2 in ERROR state */
  571. list_for_each_entry_safe(buf, node, &dcmi->buffers, list) {
  572. list_del_init(&buf->list);
  573. vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
  574. }
  575. dcmi->active = NULL;
  576. dcmi->state = STOPPED;
  577. spin_unlock_irq(&dcmi->irqlock);
  578. /* Stop all pending DMA operations */
  579. dmaengine_terminate_all(dcmi->dma_chan);
  580. pm_runtime_put(dcmi->dev);
  581. if (dcmi->errors_count)
  582. dev_warn(dcmi->dev, "Some errors found while streaming: errors=%d (overrun=%d), buffers=%d\n",
  583. dcmi->errors_count, dcmi->overrun_count,
  584. dcmi->buffers_count);
  585. dev_dbg(dcmi->dev, "Stop streaming, errors=%d (overrun=%d), buffers=%d\n",
  586. dcmi->errors_count, dcmi->overrun_count,
  587. dcmi->buffers_count);
  588. }
  589. static const struct vb2_ops dcmi_video_qops = {
  590. .queue_setup = dcmi_queue_setup,
  591. .buf_init = dcmi_buf_init,
  592. .buf_prepare = dcmi_buf_prepare,
  593. .buf_queue = dcmi_buf_queue,
  594. .start_streaming = dcmi_start_streaming,
  595. .stop_streaming = dcmi_stop_streaming,
  596. .wait_prepare = vb2_ops_wait_prepare,
  597. .wait_finish = vb2_ops_wait_finish,
  598. };
  599. static int dcmi_g_fmt_vid_cap(struct file *file, void *priv,
  600. struct v4l2_format *fmt)
  601. {
  602. struct stm32_dcmi *dcmi = video_drvdata(file);
  603. *fmt = dcmi->fmt;
  604. return 0;
  605. }
  606. static const struct dcmi_format *find_format_by_fourcc(struct stm32_dcmi *dcmi,
  607. unsigned int fourcc)
  608. {
  609. unsigned int num_formats = dcmi->num_of_sd_formats;
  610. const struct dcmi_format *fmt;
  611. unsigned int i;
  612. for (i = 0; i < num_formats; i++) {
  613. fmt = dcmi->sd_formats[i];
  614. if (fmt->fourcc == fourcc)
  615. return fmt;
  616. }
  617. return NULL;
  618. }
  619. static void __find_outer_frame_size(struct stm32_dcmi *dcmi,
  620. struct v4l2_pix_format *pix,
  621. struct dcmi_framesize *framesize)
  622. {
  623. struct dcmi_framesize *match = NULL;
  624. unsigned int i;
  625. unsigned int min_err = UINT_MAX;
  626. for (i = 0; i < dcmi->num_of_sd_framesizes; i++) {
  627. struct dcmi_framesize *fsize = &dcmi->sd_framesizes[i];
  628. int w_err = (fsize->width - pix->width);
  629. int h_err = (fsize->height - pix->height);
  630. int err = w_err + h_err;
  631. if (w_err >= 0 && h_err >= 0 && err < min_err) {
  632. min_err = err;
  633. match = fsize;
  634. }
  635. }
  636. if (!match)
  637. match = &dcmi->sd_framesizes[0];
  638. *framesize = *match;
  639. }
  640. static int dcmi_try_fmt(struct stm32_dcmi *dcmi, struct v4l2_format *f,
  641. const struct dcmi_format **sd_format,
  642. struct dcmi_framesize *sd_framesize)
  643. {
  644. const struct dcmi_format *sd_fmt;
  645. struct dcmi_framesize sd_fsize;
  646. struct v4l2_pix_format *pix = &f->fmt.pix;
  647. struct v4l2_subdev_pad_config pad_cfg;
  648. struct v4l2_subdev_format format = {
  649. .which = V4L2_SUBDEV_FORMAT_TRY,
  650. };
  651. bool do_crop;
  652. int ret;
  653. sd_fmt = find_format_by_fourcc(dcmi, pix->pixelformat);
  654. if (!sd_fmt) {
  655. sd_fmt = dcmi->sd_formats[dcmi->num_of_sd_formats - 1];
  656. pix->pixelformat = sd_fmt->fourcc;
  657. }
  658. /* Limit to hardware capabilities */
  659. pix->width = clamp(pix->width, MIN_WIDTH, MAX_WIDTH);
  660. pix->height = clamp(pix->height, MIN_HEIGHT, MAX_HEIGHT);
  661. /* No crop if JPEG is requested */
  662. do_crop = dcmi->do_crop && (pix->pixelformat != V4L2_PIX_FMT_JPEG);
  663. if (do_crop && dcmi->num_of_sd_framesizes) {
  664. struct dcmi_framesize outer_sd_fsize;
  665. /*
  666. * If crop is requested and sensor have discrete frame sizes,
  667. * select the frame size that is just larger than request
  668. */
  669. __find_outer_frame_size(dcmi, pix, &outer_sd_fsize);
  670. pix->width = outer_sd_fsize.width;
  671. pix->height = outer_sd_fsize.height;
  672. }
  673. v4l2_fill_mbus_format(&format.format, pix, sd_fmt->mbus_code);
  674. ret = v4l2_subdev_call(dcmi->entity.subdev, pad, set_fmt,
  675. &pad_cfg, &format);
  676. if (ret < 0)
  677. return ret;
  678. /* Update pix regarding to what sensor can do */
  679. v4l2_fill_pix_format(pix, &format.format);
  680. /* Save resolution that sensor can actually do */
  681. sd_fsize.width = pix->width;
  682. sd_fsize.height = pix->height;
  683. if (do_crop) {
  684. struct v4l2_rect c = dcmi->crop;
  685. struct v4l2_rect max_rect;
  686. /*
  687. * Adjust crop by making the intersection between
  688. * format resolution request and crop request
  689. */
  690. max_rect.top = 0;
  691. max_rect.left = 0;
  692. max_rect.width = pix->width;
  693. max_rect.height = pix->height;
  694. v4l2_rect_map_inside(&c, &max_rect);
  695. c.top = clamp_t(s32, c.top, 0, pix->height - c.height);
  696. c.left = clamp_t(s32, c.left, 0, pix->width - c.width);
  697. dcmi->crop = c;
  698. /* Adjust format resolution request to crop */
  699. pix->width = dcmi->crop.width;
  700. pix->height = dcmi->crop.height;
  701. }
  702. pix->field = V4L2_FIELD_NONE;
  703. pix->bytesperline = pix->width * sd_fmt->bpp;
  704. pix->sizeimage = pix->bytesperline * pix->height;
  705. if (sd_format)
  706. *sd_format = sd_fmt;
  707. if (sd_framesize)
  708. *sd_framesize = sd_fsize;
  709. return 0;
  710. }
  711. static int dcmi_set_fmt(struct stm32_dcmi *dcmi, struct v4l2_format *f)
  712. {
  713. struct v4l2_subdev_format format = {
  714. .which = V4L2_SUBDEV_FORMAT_ACTIVE,
  715. };
  716. const struct dcmi_format *sd_format;
  717. struct dcmi_framesize sd_framesize;
  718. struct v4l2_mbus_framefmt *mf = &format.format;
  719. struct v4l2_pix_format *pix = &f->fmt.pix;
  720. int ret;
  721. /*
  722. * Try format, fmt.width/height could have been changed
  723. * to match sensor capability or crop request
  724. * sd_format & sd_framesize will contain what subdev
  725. * can do for this request.
  726. */
  727. ret = dcmi_try_fmt(dcmi, f, &sd_format, &sd_framesize);
  728. if (ret)
  729. return ret;
  730. /* Disable crop if JPEG is requested */
  731. if (pix->pixelformat == V4L2_PIX_FMT_JPEG)
  732. dcmi->do_crop = false;
  733. /* pix to mbus format */
  734. v4l2_fill_mbus_format(mf, pix,
  735. sd_format->mbus_code);
  736. mf->width = sd_framesize.width;
  737. mf->height = sd_framesize.height;
  738. ret = v4l2_subdev_call(dcmi->entity.subdev, pad,
  739. set_fmt, NULL, &format);
  740. if (ret < 0)
  741. return ret;
  742. dev_dbg(dcmi->dev, "Sensor format set to 0x%x %ux%u\n",
  743. mf->code, mf->width, mf->height);
  744. dev_dbg(dcmi->dev, "Buffer format set to %4.4s %ux%u\n",
  745. (char *)&pix->pixelformat,
  746. pix->width, pix->height);
  747. dcmi->fmt = *f;
  748. dcmi->sd_format = sd_format;
  749. dcmi->sd_framesize = sd_framesize;
  750. return 0;
  751. }
  752. static int dcmi_s_fmt_vid_cap(struct file *file, void *priv,
  753. struct v4l2_format *f)
  754. {
  755. struct stm32_dcmi *dcmi = video_drvdata(file);
  756. if (vb2_is_streaming(&dcmi->queue))
  757. return -EBUSY;
  758. return dcmi_set_fmt(dcmi, f);
  759. }
  760. static int dcmi_try_fmt_vid_cap(struct file *file, void *priv,
  761. struct v4l2_format *f)
  762. {
  763. struct stm32_dcmi *dcmi = video_drvdata(file);
  764. return dcmi_try_fmt(dcmi, f, NULL, NULL);
  765. }
  766. static int dcmi_enum_fmt_vid_cap(struct file *file, void *priv,
  767. struct v4l2_fmtdesc *f)
  768. {
  769. struct stm32_dcmi *dcmi = video_drvdata(file);
  770. if (f->index >= dcmi->num_of_sd_formats)
  771. return -EINVAL;
  772. f->pixelformat = dcmi->sd_formats[f->index]->fourcc;
  773. return 0;
  774. }
  775. static int dcmi_get_sensor_format(struct stm32_dcmi *dcmi,
  776. struct v4l2_pix_format *pix)
  777. {
  778. struct v4l2_subdev_format fmt = {
  779. .which = V4L2_SUBDEV_FORMAT_ACTIVE,
  780. };
  781. int ret;
  782. ret = v4l2_subdev_call(dcmi->entity.subdev, pad, get_fmt, NULL, &fmt);
  783. if (ret)
  784. return ret;
  785. v4l2_fill_pix_format(pix, &fmt.format);
  786. return 0;
  787. }
  788. static int dcmi_set_sensor_format(struct stm32_dcmi *dcmi,
  789. struct v4l2_pix_format *pix)
  790. {
  791. const struct dcmi_format *sd_fmt;
  792. struct v4l2_subdev_format format = {
  793. .which = V4L2_SUBDEV_FORMAT_TRY,
  794. };
  795. struct v4l2_subdev_pad_config pad_cfg;
  796. int ret;
  797. sd_fmt = find_format_by_fourcc(dcmi, pix->pixelformat);
  798. if (!sd_fmt) {
  799. sd_fmt = dcmi->sd_formats[dcmi->num_of_sd_formats - 1];
  800. pix->pixelformat = sd_fmt->fourcc;
  801. }
  802. v4l2_fill_mbus_format(&format.format, pix, sd_fmt->mbus_code);
  803. ret = v4l2_subdev_call(dcmi->entity.subdev, pad, set_fmt,
  804. &pad_cfg, &format);
  805. if (ret < 0)
  806. return ret;
  807. return 0;
  808. }
  809. static int dcmi_get_sensor_bounds(struct stm32_dcmi *dcmi,
  810. struct v4l2_rect *r)
  811. {
  812. struct v4l2_subdev_selection bounds = {
  813. .which = V4L2_SUBDEV_FORMAT_ACTIVE,
  814. .target = V4L2_SEL_TGT_CROP_BOUNDS,
  815. };
  816. unsigned int max_width, max_height, max_pixsize;
  817. struct v4l2_pix_format pix;
  818. unsigned int i;
  819. int ret;
  820. /*
  821. * Get sensor bounds first
  822. */
  823. ret = v4l2_subdev_call(dcmi->entity.subdev, pad, get_selection,
  824. NULL, &bounds);
  825. if (!ret)
  826. *r = bounds.r;
  827. if (ret != -ENOIOCTLCMD)
  828. return ret;
  829. /*
  830. * If selection is not implemented,
  831. * fallback by enumerating sensor frame sizes
  832. * and take the largest one
  833. */
  834. max_width = 0;
  835. max_height = 0;
  836. max_pixsize = 0;
  837. for (i = 0; i < dcmi->num_of_sd_framesizes; i++) {
  838. struct dcmi_framesize *fsize = &dcmi->sd_framesizes[i];
  839. unsigned int pixsize = fsize->width * fsize->height;
  840. if (pixsize > max_pixsize) {
  841. max_pixsize = pixsize;
  842. max_width = fsize->width;
  843. max_height = fsize->height;
  844. }
  845. }
  846. if (max_pixsize > 0) {
  847. r->top = 0;
  848. r->left = 0;
  849. r->width = max_width;
  850. r->height = max_height;
  851. return 0;
  852. }
  853. /*
  854. * If frame sizes enumeration is not implemented,
  855. * fallback by getting current sensor frame size
  856. */
  857. ret = dcmi_get_sensor_format(dcmi, &pix);
  858. if (ret)
  859. return ret;
  860. r->top = 0;
  861. r->left = 0;
  862. r->width = pix.width;
  863. r->height = pix.height;
  864. return 0;
  865. }
  866. static int dcmi_g_selection(struct file *file, void *fh,
  867. struct v4l2_selection *s)
  868. {
  869. struct stm32_dcmi *dcmi = video_drvdata(file);
  870. if (s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
  871. return -EINVAL;
  872. switch (s->target) {
  873. case V4L2_SEL_TGT_CROP_DEFAULT:
  874. case V4L2_SEL_TGT_CROP_BOUNDS:
  875. s->r = dcmi->sd_bounds;
  876. return 0;
  877. case V4L2_SEL_TGT_CROP:
  878. if (dcmi->do_crop) {
  879. s->r = dcmi->crop;
  880. } else {
  881. s->r.top = 0;
  882. s->r.left = 0;
  883. s->r.width = dcmi->fmt.fmt.pix.width;
  884. s->r.height = dcmi->fmt.fmt.pix.height;
  885. }
  886. break;
  887. default:
  888. return -EINVAL;
  889. }
  890. return 0;
  891. }
  892. static int dcmi_s_selection(struct file *file, void *priv,
  893. struct v4l2_selection *s)
  894. {
  895. struct stm32_dcmi *dcmi = video_drvdata(file);
  896. struct v4l2_rect r = s->r;
  897. struct v4l2_rect max_rect;
  898. struct v4l2_pix_format pix;
  899. if (s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE ||
  900. s->target != V4L2_SEL_TGT_CROP)
  901. return -EINVAL;
  902. /* Reset sensor resolution to max resolution */
  903. pix.pixelformat = dcmi->fmt.fmt.pix.pixelformat;
  904. pix.width = dcmi->sd_bounds.width;
  905. pix.height = dcmi->sd_bounds.height;
  906. dcmi_set_sensor_format(dcmi, &pix);
  907. /*
  908. * Make the intersection between
  909. * sensor resolution
  910. * and crop request
  911. */
  912. max_rect.top = 0;
  913. max_rect.left = 0;
  914. max_rect.width = pix.width;
  915. max_rect.height = pix.height;
  916. v4l2_rect_map_inside(&r, &max_rect);
  917. r.top = clamp_t(s32, r.top, 0, pix.height - r.height);
  918. r.left = clamp_t(s32, r.left, 0, pix.width - r.width);
  919. if (!(r.top == dcmi->sd_bounds.top &&
  920. r.left == dcmi->sd_bounds.left &&
  921. r.width == dcmi->sd_bounds.width &&
  922. r.height == dcmi->sd_bounds.height)) {
  923. /* Crop if request is different than sensor resolution */
  924. dcmi->do_crop = true;
  925. dcmi->crop = r;
  926. dev_dbg(dcmi->dev, "s_selection: crop %ux%u@(%u,%u) from %ux%u\n",
  927. r.width, r.height, r.left, r.top,
  928. pix.width, pix.height);
  929. } else {
  930. /* Disable crop */
  931. dcmi->do_crop = false;
  932. dev_dbg(dcmi->dev, "s_selection: crop is disabled\n");
  933. }
  934. s->r = r;
  935. return 0;
  936. }
  937. static int dcmi_querycap(struct file *file, void *priv,
  938. struct v4l2_capability *cap)
  939. {
  940. strlcpy(cap->driver, DRV_NAME, sizeof(cap->driver));
  941. strlcpy(cap->card, "STM32 Camera Memory Interface",
  942. sizeof(cap->card));
  943. strlcpy(cap->bus_info, "platform:dcmi", sizeof(cap->bus_info));
  944. return 0;
  945. }
  946. static int dcmi_enum_input(struct file *file, void *priv,
  947. struct v4l2_input *i)
  948. {
  949. if (i->index != 0)
  950. return -EINVAL;
  951. i->type = V4L2_INPUT_TYPE_CAMERA;
  952. strlcpy(i->name, "Camera", sizeof(i->name));
  953. return 0;
  954. }
  955. static int dcmi_g_input(struct file *file, void *priv, unsigned int *i)
  956. {
  957. *i = 0;
  958. return 0;
  959. }
  960. static int dcmi_s_input(struct file *file, void *priv, unsigned int i)
  961. {
  962. if (i > 0)
  963. return -EINVAL;
  964. return 0;
  965. }
  966. static int dcmi_enum_framesizes(struct file *file, void *fh,
  967. struct v4l2_frmsizeenum *fsize)
  968. {
  969. struct stm32_dcmi *dcmi = video_drvdata(file);
  970. const struct dcmi_format *sd_fmt;
  971. struct v4l2_subdev_frame_size_enum fse = {
  972. .index = fsize->index,
  973. .which = V4L2_SUBDEV_FORMAT_ACTIVE,
  974. };
  975. int ret;
  976. sd_fmt = find_format_by_fourcc(dcmi, fsize->pixel_format);
  977. if (!sd_fmt)
  978. return -EINVAL;
  979. fse.code = sd_fmt->mbus_code;
  980. ret = v4l2_subdev_call(dcmi->entity.subdev, pad, enum_frame_size,
  981. NULL, &fse);
  982. if (ret)
  983. return ret;
  984. fsize->type = V4L2_FRMSIZE_TYPE_DISCRETE;
  985. fsize->discrete.width = fse.max_width;
  986. fsize->discrete.height = fse.max_height;
  987. return 0;
  988. }
  989. static int dcmi_g_parm(struct file *file, void *priv,
  990. struct v4l2_streamparm *p)
  991. {
  992. struct stm32_dcmi *dcmi = video_drvdata(file);
  993. return v4l2_g_parm_cap(video_devdata(file), dcmi->entity.subdev, p);
  994. }
  995. static int dcmi_s_parm(struct file *file, void *priv,
  996. struct v4l2_streamparm *p)
  997. {
  998. struct stm32_dcmi *dcmi = video_drvdata(file);
  999. return v4l2_s_parm_cap(video_devdata(file), dcmi->entity.subdev, p);
  1000. }
  1001. static int dcmi_enum_frameintervals(struct file *file, void *fh,
  1002. struct v4l2_frmivalenum *fival)
  1003. {
  1004. struct stm32_dcmi *dcmi = video_drvdata(file);
  1005. const struct dcmi_format *sd_fmt;
  1006. struct v4l2_subdev_frame_interval_enum fie = {
  1007. .index = fival->index,
  1008. .width = fival->width,
  1009. .height = fival->height,
  1010. .which = V4L2_SUBDEV_FORMAT_ACTIVE,
  1011. };
  1012. int ret;
  1013. sd_fmt = find_format_by_fourcc(dcmi, fival->pixel_format);
  1014. if (!sd_fmt)
  1015. return -EINVAL;
  1016. fie.code = sd_fmt->mbus_code;
  1017. ret = v4l2_subdev_call(dcmi->entity.subdev, pad,
  1018. enum_frame_interval, NULL, &fie);
  1019. if (ret)
  1020. return ret;
  1021. fival->type = V4L2_FRMIVAL_TYPE_DISCRETE;
  1022. fival->discrete = fie.interval;
  1023. return 0;
  1024. }
  1025. static const struct of_device_id stm32_dcmi_of_match[] = {
  1026. { .compatible = "st,stm32-dcmi"},
  1027. { /* end node */ },
  1028. };
  1029. MODULE_DEVICE_TABLE(of, stm32_dcmi_of_match);
  1030. static int dcmi_open(struct file *file)
  1031. {
  1032. struct stm32_dcmi *dcmi = video_drvdata(file);
  1033. struct v4l2_subdev *sd = dcmi->entity.subdev;
  1034. int ret;
  1035. if (mutex_lock_interruptible(&dcmi->lock))
  1036. return -ERESTARTSYS;
  1037. ret = v4l2_fh_open(file);
  1038. if (ret < 0)
  1039. goto unlock;
  1040. if (!v4l2_fh_is_singular_file(file))
  1041. goto fh_rel;
  1042. ret = v4l2_subdev_call(sd, core, s_power, 1);
  1043. if (ret < 0 && ret != -ENOIOCTLCMD)
  1044. goto fh_rel;
  1045. ret = dcmi_set_fmt(dcmi, &dcmi->fmt);
  1046. if (ret)
  1047. v4l2_subdev_call(sd, core, s_power, 0);
  1048. fh_rel:
  1049. if (ret)
  1050. v4l2_fh_release(file);
  1051. unlock:
  1052. mutex_unlock(&dcmi->lock);
  1053. return ret;
  1054. }
  1055. static int dcmi_release(struct file *file)
  1056. {
  1057. struct stm32_dcmi *dcmi = video_drvdata(file);
  1058. struct v4l2_subdev *sd = dcmi->entity.subdev;
  1059. bool fh_singular;
  1060. int ret;
  1061. mutex_lock(&dcmi->lock);
  1062. fh_singular = v4l2_fh_is_singular_file(file);
  1063. ret = _vb2_fop_release(file, NULL);
  1064. if (fh_singular)
  1065. v4l2_subdev_call(sd, core, s_power, 0);
  1066. mutex_unlock(&dcmi->lock);
  1067. return ret;
  1068. }
  1069. static const struct v4l2_ioctl_ops dcmi_ioctl_ops = {
  1070. .vidioc_querycap = dcmi_querycap,
  1071. .vidioc_try_fmt_vid_cap = dcmi_try_fmt_vid_cap,
  1072. .vidioc_g_fmt_vid_cap = dcmi_g_fmt_vid_cap,
  1073. .vidioc_s_fmt_vid_cap = dcmi_s_fmt_vid_cap,
  1074. .vidioc_enum_fmt_vid_cap = dcmi_enum_fmt_vid_cap,
  1075. .vidioc_g_selection = dcmi_g_selection,
  1076. .vidioc_s_selection = dcmi_s_selection,
  1077. .vidioc_enum_input = dcmi_enum_input,
  1078. .vidioc_g_input = dcmi_g_input,
  1079. .vidioc_s_input = dcmi_s_input,
  1080. .vidioc_g_parm = dcmi_g_parm,
  1081. .vidioc_s_parm = dcmi_s_parm,
  1082. .vidioc_enum_framesizes = dcmi_enum_framesizes,
  1083. .vidioc_enum_frameintervals = dcmi_enum_frameintervals,
  1084. .vidioc_reqbufs = vb2_ioctl_reqbufs,
  1085. .vidioc_create_bufs = vb2_ioctl_create_bufs,
  1086. .vidioc_querybuf = vb2_ioctl_querybuf,
  1087. .vidioc_qbuf = vb2_ioctl_qbuf,
  1088. .vidioc_dqbuf = vb2_ioctl_dqbuf,
  1089. .vidioc_expbuf = vb2_ioctl_expbuf,
  1090. .vidioc_prepare_buf = vb2_ioctl_prepare_buf,
  1091. .vidioc_streamon = vb2_ioctl_streamon,
  1092. .vidioc_streamoff = vb2_ioctl_streamoff,
  1093. .vidioc_log_status = v4l2_ctrl_log_status,
  1094. .vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
  1095. .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
  1096. };
  1097. static const struct v4l2_file_operations dcmi_fops = {
  1098. .owner = THIS_MODULE,
  1099. .unlocked_ioctl = video_ioctl2,
  1100. .open = dcmi_open,
  1101. .release = dcmi_release,
  1102. .poll = vb2_fop_poll,
  1103. .mmap = vb2_fop_mmap,
  1104. #ifndef CONFIG_MMU
  1105. .get_unmapped_area = vb2_fop_get_unmapped_area,
  1106. #endif
  1107. .read = vb2_fop_read,
  1108. };
  1109. static int dcmi_set_default_fmt(struct stm32_dcmi *dcmi)
  1110. {
  1111. struct v4l2_format f = {
  1112. .type = V4L2_BUF_TYPE_VIDEO_CAPTURE,
  1113. .fmt.pix = {
  1114. .width = CIF_WIDTH,
  1115. .height = CIF_HEIGHT,
  1116. .field = V4L2_FIELD_NONE,
  1117. .pixelformat = dcmi->sd_formats[0]->fourcc,
  1118. },
  1119. };
  1120. int ret;
  1121. ret = dcmi_try_fmt(dcmi, &f, NULL, NULL);
  1122. if (ret)
  1123. return ret;
  1124. dcmi->sd_format = dcmi->sd_formats[0];
  1125. dcmi->fmt = f;
  1126. return 0;
  1127. }
  1128. static const struct dcmi_format dcmi_formats[] = {
  1129. {
  1130. .fourcc = V4L2_PIX_FMT_RGB565,
  1131. .mbus_code = MEDIA_BUS_FMT_RGB565_2X8_LE,
  1132. .bpp = 2,
  1133. }, {
  1134. .fourcc = V4L2_PIX_FMT_YUYV,
  1135. .mbus_code = MEDIA_BUS_FMT_YUYV8_2X8,
  1136. .bpp = 2,
  1137. }, {
  1138. .fourcc = V4L2_PIX_FMT_UYVY,
  1139. .mbus_code = MEDIA_BUS_FMT_UYVY8_2X8,
  1140. .bpp = 2,
  1141. }, {
  1142. .fourcc = V4L2_PIX_FMT_JPEG,
  1143. .mbus_code = MEDIA_BUS_FMT_JPEG_1X8,
  1144. .bpp = 1,
  1145. },
  1146. };
  1147. static int dcmi_formats_init(struct stm32_dcmi *dcmi)
  1148. {
  1149. const struct dcmi_format *sd_fmts[ARRAY_SIZE(dcmi_formats)];
  1150. unsigned int num_fmts = 0, i, j;
  1151. struct v4l2_subdev *subdev = dcmi->entity.subdev;
  1152. struct v4l2_subdev_mbus_code_enum mbus_code = {
  1153. .which = V4L2_SUBDEV_FORMAT_ACTIVE,
  1154. };
  1155. while (!v4l2_subdev_call(subdev, pad, enum_mbus_code,
  1156. NULL, &mbus_code)) {
  1157. for (i = 0; i < ARRAY_SIZE(dcmi_formats); i++) {
  1158. if (dcmi_formats[i].mbus_code != mbus_code.code)
  1159. continue;
  1160. /* Code supported, have we got this fourcc yet? */
  1161. for (j = 0; j < num_fmts; j++)
  1162. if (sd_fmts[j]->fourcc ==
  1163. dcmi_formats[i].fourcc)
  1164. /* Already available */
  1165. break;
  1166. if (j == num_fmts)
  1167. /* New */
  1168. sd_fmts[num_fmts++] = dcmi_formats + i;
  1169. }
  1170. mbus_code.index++;
  1171. }
  1172. if (!num_fmts)
  1173. return -ENXIO;
  1174. dcmi->num_of_sd_formats = num_fmts;
  1175. dcmi->sd_formats = devm_kcalloc(dcmi->dev,
  1176. num_fmts, sizeof(struct dcmi_format *),
  1177. GFP_KERNEL);
  1178. if (!dcmi->sd_formats) {
  1179. dev_err(dcmi->dev, "Could not allocate memory\n");
  1180. return -ENOMEM;
  1181. }
  1182. memcpy(dcmi->sd_formats, sd_fmts,
  1183. num_fmts * sizeof(struct dcmi_format *));
  1184. dcmi->sd_format = dcmi->sd_formats[0];
  1185. return 0;
  1186. }
  1187. static int dcmi_framesizes_init(struct stm32_dcmi *dcmi)
  1188. {
  1189. unsigned int num_fsize = 0;
  1190. struct v4l2_subdev *subdev = dcmi->entity.subdev;
  1191. struct v4l2_subdev_frame_size_enum fse = {
  1192. .which = V4L2_SUBDEV_FORMAT_ACTIVE,
  1193. .code = dcmi->sd_format->mbus_code,
  1194. };
  1195. unsigned int ret;
  1196. unsigned int i;
  1197. /* Allocate discrete framesizes array */
  1198. while (!v4l2_subdev_call(subdev, pad, enum_frame_size,
  1199. NULL, &fse))
  1200. fse.index++;
  1201. num_fsize = fse.index;
  1202. if (!num_fsize)
  1203. return 0;
  1204. dcmi->num_of_sd_framesizes = num_fsize;
  1205. dcmi->sd_framesizes = devm_kcalloc(dcmi->dev, num_fsize,
  1206. sizeof(struct dcmi_framesize),
  1207. GFP_KERNEL);
  1208. if (!dcmi->sd_framesizes) {
  1209. dev_err(dcmi->dev, "Could not allocate memory\n");
  1210. return -ENOMEM;
  1211. }
  1212. /* Fill array with sensor supported framesizes */
  1213. dev_dbg(dcmi->dev, "Sensor supports %u frame sizes:\n", num_fsize);
  1214. for (i = 0; i < dcmi->num_of_sd_framesizes; i++) {
  1215. fse.index = i;
  1216. ret = v4l2_subdev_call(subdev, pad, enum_frame_size,
  1217. NULL, &fse);
  1218. if (ret)
  1219. return ret;
  1220. dcmi->sd_framesizes[fse.index].width = fse.max_width;
  1221. dcmi->sd_framesizes[fse.index].height = fse.max_height;
  1222. dev_dbg(dcmi->dev, "%ux%u\n", fse.max_width, fse.max_height);
  1223. }
  1224. return 0;
  1225. }
  1226. static int dcmi_graph_notify_complete(struct v4l2_async_notifier *notifier)
  1227. {
  1228. struct stm32_dcmi *dcmi = notifier_to_dcmi(notifier);
  1229. int ret;
  1230. dcmi->vdev->ctrl_handler = dcmi->entity.subdev->ctrl_handler;
  1231. ret = dcmi_formats_init(dcmi);
  1232. if (ret) {
  1233. dev_err(dcmi->dev, "No supported mediabus format found\n");
  1234. return ret;
  1235. }
  1236. ret = dcmi_framesizes_init(dcmi);
  1237. if (ret) {
  1238. dev_err(dcmi->dev, "Could not initialize framesizes\n");
  1239. return ret;
  1240. }
  1241. ret = dcmi_get_sensor_bounds(dcmi, &dcmi->sd_bounds);
  1242. if (ret) {
  1243. dev_err(dcmi->dev, "Could not get sensor bounds\n");
  1244. return ret;
  1245. }
  1246. ret = dcmi_set_default_fmt(dcmi);
  1247. if (ret) {
  1248. dev_err(dcmi->dev, "Could not set default format\n");
  1249. return ret;
  1250. }
  1251. ret = video_register_device(dcmi->vdev, VFL_TYPE_GRABBER, -1);
  1252. if (ret) {
  1253. dev_err(dcmi->dev, "Failed to register video device\n");
  1254. return ret;
  1255. }
  1256. dev_dbg(dcmi->dev, "Device registered as %s\n",
  1257. video_device_node_name(dcmi->vdev));
  1258. return 0;
  1259. }
  1260. static void dcmi_graph_notify_unbind(struct v4l2_async_notifier *notifier,
  1261. struct v4l2_subdev *sd,
  1262. struct v4l2_async_subdev *asd)
  1263. {
  1264. struct stm32_dcmi *dcmi = notifier_to_dcmi(notifier);
  1265. dev_dbg(dcmi->dev, "Removing %s\n", video_device_node_name(dcmi->vdev));
  1266. /* Checks internaly if vdev has been init or not */
  1267. video_unregister_device(dcmi->vdev);
  1268. }
  1269. static int dcmi_graph_notify_bound(struct v4l2_async_notifier *notifier,
  1270. struct v4l2_subdev *subdev,
  1271. struct v4l2_async_subdev *asd)
  1272. {
  1273. struct stm32_dcmi *dcmi = notifier_to_dcmi(notifier);
  1274. dev_dbg(dcmi->dev, "Subdev %s bound\n", subdev->name);
  1275. dcmi->entity.subdev = subdev;
  1276. return 0;
  1277. }
  1278. static const struct v4l2_async_notifier_operations dcmi_graph_notify_ops = {
  1279. .bound = dcmi_graph_notify_bound,
  1280. .unbind = dcmi_graph_notify_unbind,
  1281. .complete = dcmi_graph_notify_complete,
  1282. };
  1283. static int dcmi_graph_parse(struct stm32_dcmi *dcmi, struct device_node *node)
  1284. {
  1285. struct device_node *ep = NULL;
  1286. struct device_node *remote;
  1287. ep = of_graph_get_next_endpoint(node, ep);
  1288. if (!ep)
  1289. return -EINVAL;
  1290. remote = of_graph_get_remote_port_parent(ep);
  1291. of_node_put(ep);
  1292. if (!remote)
  1293. return -EINVAL;
  1294. /* Remote node to connect */
  1295. dcmi->entity.node = remote;
  1296. dcmi->entity.asd.match_type = V4L2_ASYNC_MATCH_FWNODE;
  1297. dcmi->entity.asd.match.fwnode = of_fwnode_handle(remote);
  1298. return 0;
  1299. }
  1300. static int dcmi_graph_init(struct stm32_dcmi *dcmi)
  1301. {
  1302. struct v4l2_async_subdev **subdevs = NULL;
  1303. int ret;
  1304. /* Parse the graph to extract a list of subdevice DT nodes. */
  1305. ret = dcmi_graph_parse(dcmi, dcmi->dev->of_node);
  1306. if (ret < 0) {
  1307. dev_err(dcmi->dev, "Graph parsing failed\n");
  1308. return ret;
  1309. }
  1310. /* Register the subdevices notifier. */
  1311. subdevs = devm_kzalloc(dcmi->dev, sizeof(*subdevs), GFP_KERNEL);
  1312. if (!subdevs) {
  1313. of_node_put(dcmi->entity.node);
  1314. return -ENOMEM;
  1315. }
  1316. subdevs[0] = &dcmi->entity.asd;
  1317. dcmi->notifier.subdevs = subdevs;
  1318. dcmi->notifier.num_subdevs = 1;
  1319. dcmi->notifier.ops = &dcmi_graph_notify_ops;
  1320. ret = v4l2_async_notifier_register(&dcmi->v4l2_dev, &dcmi->notifier);
  1321. if (ret < 0) {
  1322. dev_err(dcmi->dev, "Notifier registration failed\n");
  1323. of_node_put(dcmi->entity.node);
  1324. return ret;
  1325. }
  1326. return 0;
  1327. }
  1328. static int dcmi_probe(struct platform_device *pdev)
  1329. {
  1330. struct device_node *np = pdev->dev.of_node;
  1331. const struct of_device_id *match = NULL;
  1332. struct v4l2_fwnode_endpoint ep;
  1333. struct stm32_dcmi *dcmi;
  1334. struct vb2_queue *q;
  1335. struct dma_chan *chan;
  1336. struct clk *mclk;
  1337. int irq;
  1338. int ret = 0;
  1339. match = of_match_device(of_match_ptr(stm32_dcmi_of_match), &pdev->dev);
  1340. if (!match) {
  1341. dev_err(&pdev->dev, "Could not find a match in devicetree\n");
  1342. return -ENODEV;
  1343. }
  1344. dcmi = devm_kzalloc(&pdev->dev, sizeof(struct stm32_dcmi), GFP_KERNEL);
  1345. if (!dcmi)
  1346. return -ENOMEM;
  1347. dcmi->rstc = devm_reset_control_get_exclusive(&pdev->dev, NULL);
  1348. if (IS_ERR(dcmi->rstc)) {
  1349. dev_err(&pdev->dev, "Could not get reset control\n");
  1350. return -ENODEV;
  1351. }
  1352. /* Get bus characteristics from devicetree */
  1353. np = of_graph_get_next_endpoint(np, NULL);
  1354. if (!np) {
  1355. dev_err(&pdev->dev, "Could not find the endpoint\n");
  1356. of_node_put(np);
  1357. return -ENODEV;
  1358. }
  1359. ret = v4l2_fwnode_endpoint_parse(of_fwnode_handle(np), &ep);
  1360. of_node_put(np);
  1361. if (ret) {
  1362. dev_err(&pdev->dev, "Could not parse the endpoint\n");
  1363. return -ENODEV;
  1364. }
  1365. if (ep.bus_type == V4L2_MBUS_CSI2) {
  1366. dev_err(&pdev->dev, "CSI bus not supported\n");
  1367. return -ENODEV;
  1368. }
  1369. dcmi->bus.flags = ep.bus.parallel.flags;
  1370. dcmi->bus.bus_width = ep.bus.parallel.bus_width;
  1371. dcmi->bus.data_shift = ep.bus.parallel.data_shift;
  1372. irq = platform_get_irq(pdev, 0);
  1373. if (irq <= 0) {
  1374. dev_err(&pdev->dev, "Could not get irq\n");
  1375. return -ENODEV;
  1376. }
  1377. dcmi->res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  1378. if (!dcmi->res) {
  1379. dev_err(&pdev->dev, "Could not get resource\n");
  1380. return -ENODEV;
  1381. }
  1382. dcmi->regs = devm_ioremap_resource(&pdev->dev, dcmi->res);
  1383. if (IS_ERR(dcmi->regs)) {
  1384. dev_err(&pdev->dev, "Could not map registers\n");
  1385. return PTR_ERR(dcmi->regs);
  1386. }
  1387. ret = devm_request_threaded_irq(&pdev->dev, irq, dcmi_irq_callback,
  1388. dcmi_irq_thread, IRQF_ONESHOT,
  1389. dev_name(&pdev->dev), dcmi);
  1390. if (ret) {
  1391. dev_err(&pdev->dev, "Unable to request irq %d\n", irq);
  1392. return -ENODEV;
  1393. }
  1394. mclk = devm_clk_get(&pdev->dev, "mclk");
  1395. if (IS_ERR(mclk)) {
  1396. dev_err(&pdev->dev, "Unable to get mclk\n");
  1397. return PTR_ERR(mclk);
  1398. }
  1399. chan = dma_request_slave_channel(&pdev->dev, "tx");
  1400. if (!chan) {
  1401. dev_info(&pdev->dev, "Unable to request DMA channel, defer probing\n");
  1402. return -EPROBE_DEFER;
  1403. }
  1404. spin_lock_init(&dcmi->irqlock);
  1405. mutex_init(&dcmi->lock);
  1406. init_completion(&dcmi->complete);
  1407. INIT_LIST_HEAD(&dcmi->buffers);
  1408. dcmi->dev = &pdev->dev;
  1409. dcmi->mclk = mclk;
  1410. dcmi->state = STOPPED;
  1411. dcmi->dma_chan = chan;
  1412. q = &dcmi->queue;
  1413. /* Initialize the top-level structure */
  1414. ret = v4l2_device_register(&pdev->dev, &dcmi->v4l2_dev);
  1415. if (ret)
  1416. goto err_dma_release;
  1417. dcmi->vdev = video_device_alloc();
  1418. if (!dcmi->vdev) {
  1419. ret = -ENOMEM;
  1420. goto err_device_unregister;
  1421. }
  1422. /* Video node */
  1423. dcmi->vdev->fops = &dcmi_fops;
  1424. dcmi->vdev->v4l2_dev = &dcmi->v4l2_dev;
  1425. dcmi->vdev->queue = &dcmi->queue;
  1426. strlcpy(dcmi->vdev->name, KBUILD_MODNAME, sizeof(dcmi->vdev->name));
  1427. dcmi->vdev->release = video_device_release;
  1428. dcmi->vdev->ioctl_ops = &dcmi_ioctl_ops;
  1429. dcmi->vdev->lock = &dcmi->lock;
  1430. dcmi->vdev->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING |
  1431. V4L2_CAP_READWRITE;
  1432. video_set_drvdata(dcmi->vdev, dcmi);
  1433. /* Buffer queue */
  1434. q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
  1435. q->io_modes = VB2_MMAP | VB2_READ | VB2_DMABUF;
  1436. q->lock = &dcmi->lock;
  1437. q->drv_priv = dcmi;
  1438. q->buf_struct_size = sizeof(struct dcmi_buf);
  1439. q->ops = &dcmi_video_qops;
  1440. q->mem_ops = &vb2_dma_contig_memops;
  1441. q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
  1442. q->min_buffers_needed = 2;
  1443. q->dev = &pdev->dev;
  1444. ret = vb2_queue_init(q);
  1445. if (ret < 0) {
  1446. dev_err(&pdev->dev, "Failed to initialize vb2 queue\n");
  1447. goto err_device_release;
  1448. }
  1449. ret = dcmi_graph_init(dcmi);
  1450. if (ret < 0)
  1451. goto err_device_release;
  1452. /* Reset device */
  1453. ret = reset_control_assert(dcmi->rstc);
  1454. if (ret) {
  1455. dev_err(&pdev->dev, "Failed to assert the reset line\n");
  1456. goto err_device_release;
  1457. }
  1458. usleep_range(3000, 5000);
  1459. ret = reset_control_deassert(dcmi->rstc);
  1460. if (ret) {
  1461. dev_err(&pdev->dev, "Failed to deassert the reset line\n");
  1462. goto err_device_release;
  1463. }
  1464. dev_info(&pdev->dev, "Probe done\n");
  1465. platform_set_drvdata(pdev, dcmi);
  1466. pm_runtime_enable(&pdev->dev);
  1467. return 0;
  1468. err_device_release:
  1469. video_device_release(dcmi->vdev);
  1470. err_device_unregister:
  1471. v4l2_device_unregister(&dcmi->v4l2_dev);
  1472. err_dma_release:
  1473. dma_release_channel(dcmi->dma_chan);
  1474. return ret;
  1475. }
  1476. static int dcmi_remove(struct platform_device *pdev)
  1477. {
  1478. struct stm32_dcmi *dcmi = platform_get_drvdata(pdev);
  1479. pm_runtime_disable(&pdev->dev);
  1480. v4l2_async_notifier_unregister(&dcmi->notifier);
  1481. v4l2_device_unregister(&dcmi->v4l2_dev);
  1482. dma_release_channel(dcmi->dma_chan);
  1483. return 0;
  1484. }
  1485. static __maybe_unused int dcmi_runtime_suspend(struct device *dev)
  1486. {
  1487. struct stm32_dcmi *dcmi = dev_get_drvdata(dev);
  1488. clk_disable_unprepare(dcmi->mclk);
  1489. return 0;
  1490. }
  1491. static __maybe_unused int dcmi_runtime_resume(struct device *dev)
  1492. {
  1493. struct stm32_dcmi *dcmi = dev_get_drvdata(dev);
  1494. int ret;
  1495. ret = clk_prepare_enable(dcmi->mclk);
  1496. if (ret)
  1497. dev_err(dev, "%s: Failed to prepare_enable clock\n", __func__);
  1498. return ret;
  1499. }
  1500. static __maybe_unused int dcmi_suspend(struct device *dev)
  1501. {
  1502. /* disable clock */
  1503. pm_runtime_force_suspend(dev);
  1504. /* change pinctrl state */
  1505. pinctrl_pm_select_sleep_state(dev);
  1506. return 0;
  1507. }
  1508. static __maybe_unused int dcmi_resume(struct device *dev)
  1509. {
  1510. /* restore pinctl default state */
  1511. pinctrl_pm_select_default_state(dev);
  1512. /* clock enable */
  1513. pm_runtime_force_resume(dev);
  1514. return 0;
  1515. }
  1516. static const struct dev_pm_ops dcmi_pm_ops = {
  1517. SET_SYSTEM_SLEEP_PM_OPS(dcmi_suspend, dcmi_resume)
  1518. SET_RUNTIME_PM_OPS(dcmi_runtime_suspend,
  1519. dcmi_runtime_resume, NULL)
  1520. };
  1521. static struct platform_driver stm32_dcmi_driver = {
  1522. .probe = dcmi_probe,
  1523. .remove = dcmi_remove,
  1524. .driver = {
  1525. .name = DRV_NAME,
  1526. .of_match_table = of_match_ptr(stm32_dcmi_of_match),
  1527. .pm = &dcmi_pm_ops,
  1528. },
  1529. };
  1530. module_platform_driver(stm32_dcmi_driver);
  1531. MODULE_AUTHOR("Yannick Fertre <yannick.fertre@st.com>");
  1532. MODULE_AUTHOR("Hugues Fruchet <hugues.fruchet@st.com>");
  1533. MODULE_DESCRIPTION("STMicroelectronics STM32 Digital Camera Memory Interface driver");
  1534. MODULE_LICENSE("GPL");
  1535. MODULE_SUPPORTED_DEVICE("video");