rcar_drif.c 40 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503
  1. /*
  2. * R-Car Gen3 Digital Radio Interface (DRIF) driver
  3. *
  4. * Copyright (C) 2017 Renesas Electronics Corporation
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License as published by
  8. * the Free Software Foundation; either version 2 of the License, or
  9. * (at your option) any later version.
  10. *
  11. * This program is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  14. * GNU General Public License for more details.
  15. */
  16. /*
  17. * The R-Car DRIF is a receive only MSIOF like controller with an
  18. * external master device driving the SCK. It receives data into a FIFO,
  19. * then this driver uses the SYS-DMAC engine to move the data from
  20. * the device to memory.
  21. *
  22. * Each DRIF channel DRIFx (as per datasheet) contains two internal
  23. * channels DRIFx0 & DRIFx1 within itself with each having its own resources
  24. * like module clk, register set, irq and dma. These internal channels share
  25. * common CLK & SYNC from master. The two data pins D0 & D1 shall be
  26. * considered to represent the two internal channels. This internal split
  27. * is not visible to the master device.
  28. *
  29. * Depending on the master device, a DRIF channel can use
  30. * (1) both internal channels (D0 & D1) to receive data in parallel (or)
  31. * (2) one internal channel (D0 or D1) to receive data
  32. *
  33. * The primary design goal of this controller is to act as a Digital Radio
  34. * Interface that receives digital samples from a tuner device. Hence the
  35. * driver exposes the device as a V4L2 SDR device. In order to qualify as
  36. * a V4L2 SDR device, it should possess a tuner interface as mandated by the
  37. * framework. This driver expects a tuner driver (sub-device) to bind
  38. * asynchronously with this device and the combined drivers shall expose
  39. * a V4L2 compliant SDR device. The DRIF driver is independent of the
  40. * tuner vendor.
  41. *
  42. * The DRIF h/w can support I2S mode and Frame start synchronization pulse mode.
  43. * This driver is tested for I2S mode only because of the availability of
  44. * suitable master devices. Hence, not all configurable options of DRIF h/w
  45. * like lsb/msb first, syncdl, dtdl etc. are exposed via DT and I2S defaults
  46. * are used. These can be exposed later if needed after testing.
  47. */
  48. #include <linux/bitops.h>
  49. #include <linux/clk.h>
  50. #include <linux/dma-mapping.h>
  51. #include <linux/dmaengine.h>
  52. #include <linux/ioctl.h>
  53. #include <linux/iopoll.h>
  54. #include <linux/module.h>
  55. #include <linux/of_graph.h>
  56. #include <linux/of_device.h>
  57. #include <linux/platform_device.h>
  58. #include <linux/sched.h>
  59. #include <media/v4l2-async.h>
  60. #include <media/v4l2-ctrls.h>
  61. #include <media/v4l2-device.h>
  62. #include <media/v4l2-event.h>
  63. #include <media/v4l2-fh.h>
  64. #include <media/v4l2-ioctl.h>
  65. #include <media/videobuf2-v4l2.h>
  66. #include <media/videobuf2-vmalloc.h>
  67. /* DRIF register offsets */
  68. #define RCAR_DRIF_SITMDR1 0x00
  69. #define RCAR_DRIF_SITMDR2 0x04
  70. #define RCAR_DRIF_SITMDR3 0x08
  71. #define RCAR_DRIF_SIRMDR1 0x10
  72. #define RCAR_DRIF_SIRMDR2 0x14
  73. #define RCAR_DRIF_SIRMDR3 0x18
  74. #define RCAR_DRIF_SICTR 0x28
  75. #define RCAR_DRIF_SIFCTR 0x30
  76. #define RCAR_DRIF_SISTR 0x40
  77. #define RCAR_DRIF_SIIER 0x44
  78. #define RCAR_DRIF_SIRFDR 0x60
  79. #define RCAR_DRIF_RFOVF BIT(3) /* Receive FIFO overflow */
  80. #define RCAR_DRIF_RFUDF BIT(4) /* Receive FIFO underflow */
  81. #define RCAR_DRIF_RFSERR BIT(5) /* Receive frame sync error */
  82. #define RCAR_DRIF_REOF BIT(7) /* Frame reception end */
  83. #define RCAR_DRIF_RDREQ BIT(12) /* Receive data xfer req */
  84. #define RCAR_DRIF_RFFUL BIT(13) /* Receive FIFO full */
  85. /* SIRMDR1 */
  86. #define RCAR_DRIF_SIRMDR1_SYNCMD_FRAME (0 << 28)
  87. #define RCAR_DRIF_SIRMDR1_SYNCMD_LR (3 << 28)
  88. #define RCAR_DRIF_SIRMDR1_SYNCAC_POL_HIGH (0 << 25)
  89. #define RCAR_DRIF_SIRMDR1_SYNCAC_POL_LOW (1 << 25)
  90. #define RCAR_DRIF_SIRMDR1_MSB_FIRST (0 << 24)
  91. #define RCAR_DRIF_SIRMDR1_LSB_FIRST (1 << 24)
  92. #define RCAR_DRIF_SIRMDR1_DTDL_0 (0 << 20)
  93. #define RCAR_DRIF_SIRMDR1_DTDL_1 (1 << 20)
  94. #define RCAR_DRIF_SIRMDR1_DTDL_2 (2 << 20)
  95. #define RCAR_DRIF_SIRMDR1_DTDL_0PT5 (5 << 20)
  96. #define RCAR_DRIF_SIRMDR1_DTDL_1PT5 (6 << 20)
  97. #define RCAR_DRIF_SIRMDR1_SYNCDL_0 (0 << 20)
  98. #define RCAR_DRIF_SIRMDR1_SYNCDL_1 (1 << 20)
  99. #define RCAR_DRIF_SIRMDR1_SYNCDL_2 (2 << 20)
  100. #define RCAR_DRIF_SIRMDR1_SYNCDL_3 (3 << 20)
  101. #define RCAR_DRIF_SIRMDR1_SYNCDL_0PT5 (5 << 20)
  102. #define RCAR_DRIF_SIRMDR1_SYNCDL_1PT5 (6 << 20)
  103. #define RCAR_DRIF_MDR_GRPCNT(n) (((n) - 1) << 30)
  104. #define RCAR_DRIF_MDR_BITLEN(n) (((n) - 1) << 24)
  105. #define RCAR_DRIF_MDR_WDCNT(n) (((n) - 1) << 16)
  106. /* Hidden Transmit register that controls CLK & SYNC */
  107. #define RCAR_DRIF_SITMDR1_PCON BIT(30)
  108. #define RCAR_DRIF_SICTR_RX_RISING_EDGE BIT(26)
  109. #define RCAR_DRIF_SICTR_RX_EN BIT(8)
  110. #define RCAR_DRIF_SICTR_RESET BIT(0)
  111. /* Constants */
  112. #define RCAR_DRIF_NUM_HWBUFS 32
  113. #define RCAR_DRIF_MAX_DEVS 4
  114. #define RCAR_DRIF_DEFAULT_NUM_HWBUFS 16
  115. #define RCAR_DRIF_DEFAULT_HWBUF_SIZE (4 * PAGE_SIZE)
  116. #define RCAR_DRIF_MAX_CHANNEL 2
  117. #define RCAR_SDR_BUFFER_SIZE SZ_64K
  118. /* Internal buffer status flags */
  119. #define RCAR_DRIF_BUF_DONE BIT(0) /* DMA completed */
  120. #define RCAR_DRIF_BUF_OVERFLOW BIT(1) /* Overflow detected */
  121. #define to_rcar_drif_buf_pair(sdr, ch_num, idx) \
  122. (&((sdr)->ch[!(ch_num)]->buf[(idx)]))
  123. #define for_each_rcar_drif_channel(ch, ch_mask) \
  124. for_each_set_bit(ch, ch_mask, RCAR_DRIF_MAX_CHANNEL)
  125. /* Debug */
  126. #define rdrif_dbg(sdr, fmt, arg...) \
  127. dev_dbg(sdr->v4l2_dev.dev, fmt, ## arg)
  128. #define rdrif_err(sdr, fmt, arg...) \
  129. dev_err(sdr->v4l2_dev.dev, fmt, ## arg)
  130. /* Stream formats */
  131. struct rcar_drif_format {
  132. u32 pixelformat;
  133. u32 buffersize;
  134. u32 bitlen;
  135. u32 wdcnt;
  136. u32 num_ch;
  137. };
  138. /* Format descriptions for capture */
  139. static const struct rcar_drif_format formats[] = {
  140. {
  141. .pixelformat = V4L2_SDR_FMT_PCU16BE,
  142. .buffersize = RCAR_SDR_BUFFER_SIZE,
  143. .bitlen = 16,
  144. .wdcnt = 1,
  145. .num_ch = 2,
  146. },
  147. {
  148. .pixelformat = V4L2_SDR_FMT_PCU18BE,
  149. .buffersize = RCAR_SDR_BUFFER_SIZE,
  150. .bitlen = 18,
  151. .wdcnt = 1,
  152. .num_ch = 2,
  153. },
  154. {
  155. .pixelformat = V4L2_SDR_FMT_PCU20BE,
  156. .buffersize = RCAR_SDR_BUFFER_SIZE,
  157. .bitlen = 20,
  158. .wdcnt = 1,
  159. .num_ch = 2,
  160. },
  161. };
  162. /* Buffer for a received frame from one or both internal channels */
  163. struct rcar_drif_frame_buf {
  164. /* Common v4l buffer stuff -- must be first */
  165. struct vb2_v4l2_buffer vb;
  166. struct list_head list;
  167. };
  168. /* OF graph endpoint's V4L2 async data */
  169. struct rcar_drif_graph_ep {
  170. struct v4l2_subdev *subdev; /* Async matched subdev */
  171. struct v4l2_async_subdev asd; /* Async sub-device descriptor */
  172. };
  173. /* DMA buffer */
  174. struct rcar_drif_hwbuf {
  175. void *addr; /* CPU-side address */
  176. unsigned int status; /* Buffer status flags */
  177. };
  178. /* Internal channel */
  179. struct rcar_drif {
  180. struct rcar_drif_sdr *sdr; /* Group device */
  181. struct platform_device *pdev; /* Channel's pdev */
  182. void __iomem *base; /* Base register address */
  183. resource_size_t start; /* I/O resource offset */
  184. struct dma_chan *dmach; /* Reserved DMA channel */
  185. struct clk *clk; /* Module clock */
  186. struct rcar_drif_hwbuf buf[RCAR_DRIF_NUM_HWBUFS]; /* H/W bufs */
  187. dma_addr_t dma_handle; /* Handle for all bufs */
  188. unsigned int num; /* Channel number */
  189. bool acting_sdr; /* Channel acting as SDR device */
  190. };
  191. /* DRIF V4L2 SDR */
  192. struct rcar_drif_sdr {
  193. struct device *dev; /* Platform device */
  194. struct video_device *vdev; /* V4L2 SDR device */
  195. struct v4l2_device v4l2_dev; /* V4L2 device */
  196. /* Videobuf2 queue and queued buffers list */
  197. struct vb2_queue vb_queue;
  198. struct list_head queued_bufs;
  199. spinlock_t queued_bufs_lock; /* Protects queued_bufs */
  200. spinlock_t dma_lock; /* To serialize DMA cb of channels */
  201. struct mutex v4l2_mutex; /* To serialize ioctls */
  202. struct mutex vb_queue_mutex; /* To serialize streaming ioctls */
  203. struct v4l2_ctrl_handler ctrl_hdl; /* SDR control handler */
  204. struct v4l2_async_notifier notifier; /* For subdev (tuner) */
  205. struct rcar_drif_graph_ep ep; /* Endpoint V4L2 async data */
  206. /* Current V4L2 SDR format ptr */
  207. const struct rcar_drif_format *fmt;
  208. /* Device tree SYNC properties */
  209. u32 mdr1;
  210. /* Internals */
  211. struct rcar_drif *ch[RCAR_DRIF_MAX_CHANNEL]; /* DRIFx0,1 */
  212. unsigned long hw_ch_mask; /* Enabled channels per DT */
  213. unsigned long cur_ch_mask; /* Used channels for an SDR FMT */
  214. u32 num_hw_ch; /* Num of DT enabled channels */
  215. u32 num_cur_ch; /* Num of used channels */
  216. u32 hwbuf_size; /* Each DMA buffer size */
  217. u32 produced; /* Buffers produced by sdr dev */
  218. };
  219. /* Register access functions */
  220. static void rcar_drif_write(struct rcar_drif *ch, u32 offset, u32 data)
  221. {
  222. writel(data, ch->base + offset);
  223. }
  224. static u32 rcar_drif_read(struct rcar_drif *ch, u32 offset)
  225. {
  226. return readl(ch->base + offset);
  227. }
  228. /* Release DMA channels */
  229. static void rcar_drif_release_dmachannels(struct rcar_drif_sdr *sdr)
  230. {
  231. unsigned int i;
  232. for_each_rcar_drif_channel(i, &sdr->cur_ch_mask)
  233. if (sdr->ch[i]->dmach) {
  234. dma_release_channel(sdr->ch[i]->dmach);
  235. sdr->ch[i]->dmach = NULL;
  236. }
  237. }
  238. /* Allocate DMA channels */
  239. static int rcar_drif_alloc_dmachannels(struct rcar_drif_sdr *sdr)
  240. {
  241. struct dma_slave_config dma_cfg;
  242. unsigned int i;
  243. int ret;
  244. for_each_rcar_drif_channel(i, &sdr->cur_ch_mask) {
  245. struct rcar_drif *ch = sdr->ch[i];
  246. ch->dmach = dma_request_slave_channel(&ch->pdev->dev, "rx");
  247. if (!ch->dmach) {
  248. rdrif_err(sdr, "ch%u: dma channel req failed\n", i);
  249. ret = -ENODEV;
  250. goto dmach_error;
  251. }
  252. /* Configure slave */
  253. memset(&dma_cfg, 0, sizeof(dma_cfg));
  254. dma_cfg.src_addr = (phys_addr_t)(ch->start + RCAR_DRIF_SIRFDR);
  255. dma_cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
  256. ret = dmaengine_slave_config(ch->dmach, &dma_cfg);
  257. if (ret) {
  258. rdrif_err(sdr, "ch%u: dma slave config failed\n", i);
  259. goto dmach_error;
  260. }
  261. }
  262. return 0;
  263. dmach_error:
  264. rcar_drif_release_dmachannels(sdr);
  265. return ret;
  266. }
  267. /* Release queued vb2 buffers */
  268. static void rcar_drif_release_queued_bufs(struct rcar_drif_sdr *sdr,
  269. enum vb2_buffer_state state)
  270. {
  271. struct rcar_drif_frame_buf *fbuf, *tmp;
  272. unsigned long flags;
  273. spin_lock_irqsave(&sdr->queued_bufs_lock, flags);
  274. list_for_each_entry_safe(fbuf, tmp, &sdr->queued_bufs, list) {
  275. list_del(&fbuf->list);
  276. vb2_buffer_done(&fbuf->vb.vb2_buf, state);
  277. }
  278. spin_unlock_irqrestore(&sdr->queued_bufs_lock, flags);
  279. }
  280. /* Set MDR defaults */
  281. static inline void rcar_drif_set_mdr1(struct rcar_drif_sdr *sdr)
  282. {
  283. unsigned int i;
  284. /* Set defaults for enabled internal channels */
  285. for_each_rcar_drif_channel(i, &sdr->cur_ch_mask) {
  286. /* Refer MSIOF section in manual for this register setting */
  287. rcar_drif_write(sdr->ch[i], RCAR_DRIF_SITMDR1,
  288. RCAR_DRIF_SITMDR1_PCON);
  289. /* Setup MDR1 value */
  290. rcar_drif_write(sdr->ch[i], RCAR_DRIF_SIRMDR1, sdr->mdr1);
  291. rdrif_dbg(sdr, "ch%u: mdr1 = 0x%08x",
  292. i, rcar_drif_read(sdr->ch[i], RCAR_DRIF_SIRMDR1));
  293. }
  294. }
  295. /* Set DRIF receive format */
  296. static int rcar_drif_set_format(struct rcar_drif_sdr *sdr)
  297. {
  298. unsigned int i;
  299. rdrif_dbg(sdr, "setfmt: bitlen %u wdcnt %u num_ch %u\n",
  300. sdr->fmt->bitlen, sdr->fmt->wdcnt, sdr->fmt->num_ch);
  301. /* Sanity check */
  302. if (sdr->fmt->num_ch > sdr->num_cur_ch) {
  303. rdrif_err(sdr, "fmt num_ch %u cur_ch %u mismatch\n",
  304. sdr->fmt->num_ch, sdr->num_cur_ch);
  305. return -EINVAL;
  306. }
  307. /* Setup group, bitlen & wdcnt */
  308. for_each_rcar_drif_channel(i, &sdr->cur_ch_mask) {
  309. u32 mdr;
  310. /* Two groups */
  311. mdr = RCAR_DRIF_MDR_GRPCNT(2) |
  312. RCAR_DRIF_MDR_BITLEN(sdr->fmt->bitlen) |
  313. RCAR_DRIF_MDR_WDCNT(sdr->fmt->wdcnt);
  314. rcar_drif_write(sdr->ch[i], RCAR_DRIF_SIRMDR2, mdr);
  315. mdr = RCAR_DRIF_MDR_BITLEN(sdr->fmt->bitlen) |
  316. RCAR_DRIF_MDR_WDCNT(sdr->fmt->wdcnt);
  317. rcar_drif_write(sdr->ch[i], RCAR_DRIF_SIRMDR3, mdr);
  318. rdrif_dbg(sdr, "ch%u: new mdr[2,3] = 0x%08x, 0x%08x\n",
  319. i, rcar_drif_read(sdr->ch[i], RCAR_DRIF_SIRMDR2),
  320. rcar_drif_read(sdr->ch[i], RCAR_DRIF_SIRMDR3));
  321. }
  322. return 0;
  323. }
  324. /* Release DMA buffers */
  325. static void rcar_drif_release_buf(struct rcar_drif_sdr *sdr)
  326. {
  327. unsigned int i;
  328. for_each_rcar_drif_channel(i, &sdr->cur_ch_mask) {
  329. struct rcar_drif *ch = sdr->ch[i];
  330. /* First entry contains the dma buf ptr */
  331. if (ch->buf[0].addr) {
  332. dma_free_coherent(&ch->pdev->dev,
  333. sdr->hwbuf_size * RCAR_DRIF_NUM_HWBUFS,
  334. ch->buf[0].addr, ch->dma_handle);
  335. ch->buf[0].addr = NULL;
  336. }
  337. }
  338. }
  339. /* Request DMA buffers */
  340. static int rcar_drif_request_buf(struct rcar_drif_sdr *sdr)
  341. {
  342. int ret = -ENOMEM;
  343. unsigned int i, j;
  344. void *addr;
  345. for_each_rcar_drif_channel(i, &sdr->cur_ch_mask) {
  346. struct rcar_drif *ch = sdr->ch[i];
  347. /* Allocate DMA buffers */
  348. addr = dma_alloc_coherent(&ch->pdev->dev,
  349. sdr->hwbuf_size * RCAR_DRIF_NUM_HWBUFS,
  350. &ch->dma_handle, GFP_KERNEL);
  351. if (!addr) {
  352. rdrif_err(sdr,
  353. "ch%u: dma alloc failed. num hwbufs %u size %u\n",
  354. i, RCAR_DRIF_NUM_HWBUFS, sdr->hwbuf_size);
  355. goto error;
  356. }
  357. /* Split the chunk and populate bufctxt */
  358. for (j = 0; j < RCAR_DRIF_NUM_HWBUFS; j++) {
  359. ch->buf[j].addr = addr + (j * sdr->hwbuf_size);
  360. ch->buf[j].status = 0;
  361. }
  362. }
  363. return 0;
  364. error:
  365. return ret;
  366. }
  367. /* Setup vb_queue minimum buffer requirements */
  368. static int rcar_drif_queue_setup(struct vb2_queue *vq,
  369. unsigned int *num_buffers, unsigned int *num_planes,
  370. unsigned int sizes[], struct device *alloc_devs[])
  371. {
  372. struct rcar_drif_sdr *sdr = vb2_get_drv_priv(vq);
  373. /* Need at least 16 buffers */
  374. if (vq->num_buffers + *num_buffers < 16)
  375. *num_buffers = 16 - vq->num_buffers;
  376. *num_planes = 1;
  377. sizes[0] = PAGE_ALIGN(sdr->fmt->buffersize);
  378. rdrif_dbg(sdr, "num_bufs %d sizes[0] %d\n", *num_buffers, sizes[0]);
  379. return 0;
  380. }
  381. /* Enqueue buffer */
  382. static void rcar_drif_buf_queue(struct vb2_buffer *vb)
  383. {
  384. struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
  385. struct rcar_drif_sdr *sdr = vb2_get_drv_priv(vb->vb2_queue);
  386. struct rcar_drif_frame_buf *fbuf =
  387. container_of(vbuf, struct rcar_drif_frame_buf, vb);
  388. unsigned long flags;
  389. rdrif_dbg(sdr, "buf_queue idx %u\n", vb->index);
  390. spin_lock_irqsave(&sdr->queued_bufs_lock, flags);
  391. list_add_tail(&fbuf->list, &sdr->queued_bufs);
  392. spin_unlock_irqrestore(&sdr->queued_bufs_lock, flags);
  393. }
  394. /* Get a frame buf from list */
  395. static struct rcar_drif_frame_buf *
  396. rcar_drif_get_fbuf(struct rcar_drif_sdr *sdr)
  397. {
  398. struct rcar_drif_frame_buf *fbuf;
  399. unsigned long flags;
  400. spin_lock_irqsave(&sdr->queued_bufs_lock, flags);
  401. fbuf = list_first_entry_or_null(&sdr->queued_bufs, struct
  402. rcar_drif_frame_buf, list);
  403. if (!fbuf) {
  404. /*
  405. * App is late in enqueing buffers. Samples lost & there will
  406. * be a gap in sequence number when app recovers
  407. */
  408. rdrif_dbg(sdr, "\napp late: prod %u\n", sdr->produced);
  409. spin_unlock_irqrestore(&sdr->queued_bufs_lock, flags);
  410. return NULL;
  411. }
  412. list_del(&fbuf->list);
  413. spin_unlock_irqrestore(&sdr->queued_bufs_lock, flags);
  414. return fbuf;
  415. }
  416. /* Helpers to set/clear buf pair status */
  417. static inline bool rcar_drif_bufs_done(struct rcar_drif_hwbuf **buf)
  418. {
  419. return (buf[0]->status & buf[1]->status & RCAR_DRIF_BUF_DONE);
  420. }
  421. static inline bool rcar_drif_bufs_overflow(struct rcar_drif_hwbuf **buf)
  422. {
  423. return ((buf[0]->status | buf[1]->status) & RCAR_DRIF_BUF_OVERFLOW);
  424. }
  425. static inline void rcar_drif_bufs_clear(struct rcar_drif_hwbuf **buf,
  426. unsigned int bit)
  427. {
  428. unsigned int i;
  429. for (i = 0; i < RCAR_DRIF_MAX_CHANNEL; i++)
  430. buf[i]->status &= ~bit;
  431. }
  432. /* Channel DMA complete */
  433. static void rcar_drif_channel_complete(struct rcar_drif *ch, u32 idx)
  434. {
  435. u32 str;
  436. ch->buf[idx].status |= RCAR_DRIF_BUF_DONE;
  437. /* Check for DRIF errors */
  438. str = rcar_drif_read(ch, RCAR_DRIF_SISTR);
  439. if (unlikely(str & RCAR_DRIF_RFOVF)) {
  440. /* Writing the same clears it */
  441. rcar_drif_write(ch, RCAR_DRIF_SISTR, str);
  442. /* Overflow: some samples are lost */
  443. ch->buf[idx].status |= RCAR_DRIF_BUF_OVERFLOW;
  444. }
  445. }
  446. /* DMA callback for each stage */
  447. static void rcar_drif_dma_complete(void *dma_async_param)
  448. {
  449. struct rcar_drif *ch = dma_async_param;
  450. struct rcar_drif_sdr *sdr = ch->sdr;
  451. struct rcar_drif_hwbuf *buf[RCAR_DRIF_MAX_CHANNEL];
  452. struct rcar_drif_frame_buf *fbuf;
  453. bool overflow = false;
  454. u32 idx, produced;
  455. unsigned int i;
  456. spin_lock(&sdr->dma_lock);
  457. /* DMA can be terminated while the callback was waiting on lock */
  458. if (!vb2_is_streaming(&sdr->vb_queue)) {
  459. spin_unlock(&sdr->dma_lock);
  460. return;
  461. }
  462. idx = sdr->produced % RCAR_DRIF_NUM_HWBUFS;
  463. rcar_drif_channel_complete(ch, idx);
  464. if (sdr->num_cur_ch == RCAR_DRIF_MAX_CHANNEL) {
  465. buf[0] = ch->num ? to_rcar_drif_buf_pair(sdr, ch->num, idx) :
  466. &ch->buf[idx];
  467. buf[1] = ch->num ? &ch->buf[idx] :
  468. to_rcar_drif_buf_pair(sdr, ch->num, idx);
  469. /* Check if both DMA buffers are done */
  470. if (!rcar_drif_bufs_done(buf)) {
  471. spin_unlock(&sdr->dma_lock);
  472. return;
  473. }
  474. /* Clear buf done status */
  475. rcar_drif_bufs_clear(buf, RCAR_DRIF_BUF_DONE);
  476. if (rcar_drif_bufs_overflow(buf)) {
  477. overflow = true;
  478. /* Clear the flag in status */
  479. rcar_drif_bufs_clear(buf, RCAR_DRIF_BUF_OVERFLOW);
  480. }
  481. } else {
  482. buf[0] = &ch->buf[idx];
  483. if (buf[0]->status & RCAR_DRIF_BUF_OVERFLOW) {
  484. overflow = true;
  485. /* Clear the flag in status */
  486. buf[0]->status &= ~RCAR_DRIF_BUF_OVERFLOW;
  487. }
  488. }
  489. /* Buffer produced for consumption */
  490. produced = sdr->produced++;
  491. spin_unlock(&sdr->dma_lock);
  492. rdrif_dbg(sdr, "ch%u: prod %u\n", ch->num, produced);
  493. /* Get fbuf */
  494. fbuf = rcar_drif_get_fbuf(sdr);
  495. if (!fbuf)
  496. return;
  497. for (i = 0; i < RCAR_DRIF_MAX_CHANNEL; i++)
  498. memcpy(vb2_plane_vaddr(&fbuf->vb.vb2_buf, 0) +
  499. i * sdr->hwbuf_size, buf[i]->addr, sdr->hwbuf_size);
  500. fbuf->vb.field = V4L2_FIELD_NONE;
  501. fbuf->vb.sequence = produced;
  502. fbuf->vb.vb2_buf.timestamp = ktime_get_ns();
  503. vb2_set_plane_payload(&fbuf->vb.vb2_buf, 0, sdr->fmt->buffersize);
  504. /* Set error state on overflow */
  505. vb2_buffer_done(&fbuf->vb.vb2_buf,
  506. overflow ? VB2_BUF_STATE_ERROR : VB2_BUF_STATE_DONE);
  507. }
  508. static int rcar_drif_qbuf(struct rcar_drif *ch)
  509. {
  510. struct rcar_drif_sdr *sdr = ch->sdr;
  511. dma_addr_t addr = ch->dma_handle;
  512. struct dma_async_tx_descriptor *rxd;
  513. dma_cookie_t cookie;
  514. int ret = -EIO;
  515. /* Setup cyclic DMA with given buffers */
  516. rxd = dmaengine_prep_dma_cyclic(ch->dmach, addr,
  517. sdr->hwbuf_size * RCAR_DRIF_NUM_HWBUFS,
  518. sdr->hwbuf_size, DMA_DEV_TO_MEM,
  519. DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
  520. if (!rxd) {
  521. rdrif_err(sdr, "ch%u: prep dma cyclic failed\n", ch->num);
  522. return ret;
  523. }
  524. /* Submit descriptor */
  525. rxd->callback = rcar_drif_dma_complete;
  526. rxd->callback_param = ch;
  527. cookie = dmaengine_submit(rxd);
  528. if (dma_submit_error(cookie)) {
  529. rdrif_err(sdr, "ch%u: dma submit failed\n", ch->num);
  530. return ret;
  531. }
  532. dma_async_issue_pending(ch->dmach);
  533. return 0;
  534. }
  535. /* Enable reception */
  536. static int rcar_drif_enable_rx(struct rcar_drif_sdr *sdr)
  537. {
  538. unsigned int i;
  539. u32 ctr;
  540. int ret = -EINVAL;
  541. /*
  542. * When both internal channels are enabled, they can be synchronized
  543. * only by the master
  544. */
  545. /* Enable receive */
  546. for_each_rcar_drif_channel(i, &sdr->cur_ch_mask) {
  547. ctr = rcar_drif_read(sdr->ch[i], RCAR_DRIF_SICTR);
  548. ctr |= (RCAR_DRIF_SICTR_RX_RISING_EDGE |
  549. RCAR_DRIF_SICTR_RX_EN);
  550. rcar_drif_write(sdr->ch[i], RCAR_DRIF_SICTR, ctr);
  551. }
  552. /* Check receive enabled */
  553. for_each_rcar_drif_channel(i, &sdr->cur_ch_mask) {
  554. ret = readl_poll_timeout(sdr->ch[i]->base + RCAR_DRIF_SICTR,
  555. ctr, ctr & RCAR_DRIF_SICTR_RX_EN, 7, 100000);
  556. if (ret) {
  557. rdrif_err(sdr, "ch%u: rx en failed. ctr 0x%08x\n", i,
  558. rcar_drif_read(sdr->ch[i], RCAR_DRIF_SICTR));
  559. break;
  560. }
  561. }
  562. return ret;
  563. }
  564. /* Disable reception */
  565. static void rcar_drif_disable_rx(struct rcar_drif_sdr *sdr)
  566. {
  567. unsigned int i;
  568. u32 ctr;
  569. int ret;
  570. /* Disable receive */
  571. for_each_rcar_drif_channel(i, &sdr->cur_ch_mask) {
  572. ctr = rcar_drif_read(sdr->ch[i], RCAR_DRIF_SICTR);
  573. ctr &= ~RCAR_DRIF_SICTR_RX_EN;
  574. rcar_drif_write(sdr->ch[i], RCAR_DRIF_SICTR, ctr);
  575. }
  576. /* Check receive disabled */
  577. for_each_rcar_drif_channel(i, &sdr->cur_ch_mask) {
  578. ret = readl_poll_timeout(sdr->ch[i]->base + RCAR_DRIF_SICTR,
  579. ctr, !(ctr & RCAR_DRIF_SICTR_RX_EN), 7, 100000);
  580. if (ret)
  581. dev_warn(&sdr->vdev->dev,
  582. "ch%u: failed to disable rx. ctr 0x%08x\n",
  583. i, rcar_drif_read(sdr->ch[i], RCAR_DRIF_SICTR));
  584. }
  585. }
  586. /* Stop channel */
  587. static void rcar_drif_stop_channel(struct rcar_drif *ch)
  588. {
  589. /* Disable DMA receive interrupt */
  590. rcar_drif_write(ch, RCAR_DRIF_SIIER, 0x00000000);
  591. /* Terminate all DMA transfers */
  592. dmaengine_terminate_sync(ch->dmach);
  593. }
  594. /* Stop receive operation */
  595. static void rcar_drif_stop(struct rcar_drif_sdr *sdr)
  596. {
  597. unsigned int i;
  598. /* Disable Rx */
  599. rcar_drif_disable_rx(sdr);
  600. for_each_rcar_drif_channel(i, &sdr->cur_ch_mask)
  601. rcar_drif_stop_channel(sdr->ch[i]);
  602. }
  603. /* Start channel */
  604. static int rcar_drif_start_channel(struct rcar_drif *ch)
  605. {
  606. struct rcar_drif_sdr *sdr = ch->sdr;
  607. u32 ctr, str;
  608. int ret;
  609. /* Reset receive */
  610. rcar_drif_write(ch, RCAR_DRIF_SICTR, RCAR_DRIF_SICTR_RESET);
  611. ret = readl_poll_timeout(ch->base + RCAR_DRIF_SICTR, ctr,
  612. !(ctr & RCAR_DRIF_SICTR_RESET), 7, 100000);
  613. if (ret) {
  614. rdrif_err(sdr, "ch%u: failed to reset rx. ctr 0x%08x\n",
  615. ch->num, rcar_drif_read(ch, RCAR_DRIF_SICTR));
  616. return ret;
  617. }
  618. /* Queue buffers for DMA */
  619. ret = rcar_drif_qbuf(ch);
  620. if (ret)
  621. return ret;
  622. /* Clear status register flags */
  623. str = RCAR_DRIF_RFFUL | RCAR_DRIF_REOF | RCAR_DRIF_RFSERR |
  624. RCAR_DRIF_RFUDF | RCAR_DRIF_RFOVF;
  625. rcar_drif_write(ch, RCAR_DRIF_SISTR, str);
  626. /* Enable DMA receive interrupt */
  627. rcar_drif_write(ch, RCAR_DRIF_SIIER, 0x00009000);
  628. return ret;
  629. }
  630. /* Start receive operation */
  631. static int rcar_drif_start(struct rcar_drif_sdr *sdr)
  632. {
  633. unsigned long enabled = 0;
  634. unsigned int i;
  635. int ret;
  636. for_each_rcar_drif_channel(i, &sdr->cur_ch_mask) {
  637. ret = rcar_drif_start_channel(sdr->ch[i]);
  638. if (ret)
  639. goto start_error;
  640. enabled |= BIT(i);
  641. }
  642. ret = rcar_drif_enable_rx(sdr);
  643. if (ret)
  644. goto enable_error;
  645. sdr->produced = 0;
  646. return ret;
  647. enable_error:
  648. rcar_drif_disable_rx(sdr);
  649. start_error:
  650. for_each_rcar_drif_channel(i, &enabled)
  651. rcar_drif_stop_channel(sdr->ch[i]);
  652. return ret;
  653. }
  654. /* Start streaming */
  655. static int rcar_drif_start_streaming(struct vb2_queue *vq, unsigned int count)
  656. {
  657. struct rcar_drif_sdr *sdr = vb2_get_drv_priv(vq);
  658. unsigned long enabled = 0;
  659. unsigned int i;
  660. int ret;
  661. mutex_lock(&sdr->v4l2_mutex);
  662. for_each_rcar_drif_channel(i, &sdr->cur_ch_mask) {
  663. ret = clk_prepare_enable(sdr->ch[i]->clk);
  664. if (ret)
  665. goto error;
  666. enabled |= BIT(i);
  667. }
  668. /* Set default MDRx settings */
  669. rcar_drif_set_mdr1(sdr);
  670. /* Set new format */
  671. ret = rcar_drif_set_format(sdr);
  672. if (ret)
  673. goto error;
  674. if (sdr->num_cur_ch == RCAR_DRIF_MAX_CHANNEL)
  675. sdr->hwbuf_size = sdr->fmt->buffersize / RCAR_DRIF_MAX_CHANNEL;
  676. else
  677. sdr->hwbuf_size = sdr->fmt->buffersize;
  678. rdrif_dbg(sdr, "num hwbufs %u, hwbuf_size %u\n",
  679. RCAR_DRIF_NUM_HWBUFS, sdr->hwbuf_size);
  680. /* Alloc DMA channel */
  681. ret = rcar_drif_alloc_dmachannels(sdr);
  682. if (ret)
  683. goto error;
  684. /* Request buffers */
  685. ret = rcar_drif_request_buf(sdr);
  686. if (ret)
  687. goto error;
  688. /* Start Rx */
  689. ret = rcar_drif_start(sdr);
  690. if (ret)
  691. goto error;
  692. mutex_unlock(&sdr->v4l2_mutex);
  693. return ret;
  694. error:
  695. rcar_drif_release_queued_bufs(sdr, VB2_BUF_STATE_QUEUED);
  696. rcar_drif_release_buf(sdr);
  697. rcar_drif_release_dmachannels(sdr);
  698. for_each_rcar_drif_channel(i, &enabled)
  699. clk_disable_unprepare(sdr->ch[i]->clk);
  700. mutex_unlock(&sdr->v4l2_mutex);
  701. return ret;
  702. }
  703. /* Stop streaming */
  704. static void rcar_drif_stop_streaming(struct vb2_queue *vq)
  705. {
  706. struct rcar_drif_sdr *sdr = vb2_get_drv_priv(vq);
  707. unsigned int i;
  708. mutex_lock(&sdr->v4l2_mutex);
  709. /* Stop hardware streaming */
  710. rcar_drif_stop(sdr);
  711. /* Return all queued buffers to vb2 */
  712. rcar_drif_release_queued_bufs(sdr, VB2_BUF_STATE_ERROR);
  713. /* Release buf */
  714. rcar_drif_release_buf(sdr);
  715. /* Release DMA channel resources */
  716. rcar_drif_release_dmachannels(sdr);
  717. for_each_rcar_drif_channel(i, &sdr->cur_ch_mask)
  718. clk_disable_unprepare(sdr->ch[i]->clk);
  719. mutex_unlock(&sdr->v4l2_mutex);
  720. }
  721. /* Vb2 ops */
  722. static const struct vb2_ops rcar_drif_vb2_ops = {
  723. .queue_setup = rcar_drif_queue_setup,
  724. .buf_queue = rcar_drif_buf_queue,
  725. .start_streaming = rcar_drif_start_streaming,
  726. .stop_streaming = rcar_drif_stop_streaming,
  727. .wait_prepare = vb2_ops_wait_prepare,
  728. .wait_finish = vb2_ops_wait_finish,
  729. };
  730. static int rcar_drif_querycap(struct file *file, void *fh,
  731. struct v4l2_capability *cap)
  732. {
  733. struct rcar_drif_sdr *sdr = video_drvdata(file);
  734. strlcpy(cap->driver, KBUILD_MODNAME, sizeof(cap->driver));
  735. strlcpy(cap->card, sdr->vdev->name, sizeof(cap->card));
  736. snprintf(cap->bus_info, sizeof(cap->bus_info), "platform:%s",
  737. sdr->vdev->name);
  738. return 0;
  739. }
  740. static int rcar_drif_set_default_format(struct rcar_drif_sdr *sdr)
  741. {
  742. unsigned int i;
  743. for (i = 0; i < ARRAY_SIZE(formats); i++) {
  744. /* Matching fmt based on required channels is set as default */
  745. if (sdr->num_hw_ch == formats[i].num_ch) {
  746. sdr->fmt = &formats[i];
  747. sdr->cur_ch_mask = sdr->hw_ch_mask;
  748. sdr->num_cur_ch = sdr->num_hw_ch;
  749. dev_dbg(sdr->dev, "default fmt[%u]: mask %lu num %u\n",
  750. i, sdr->cur_ch_mask, sdr->num_cur_ch);
  751. return 0;
  752. }
  753. }
  754. return -EINVAL;
  755. }
  756. static int rcar_drif_enum_fmt_sdr_cap(struct file *file, void *priv,
  757. struct v4l2_fmtdesc *f)
  758. {
  759. if (f->index >= ARRAY_SIZE(formats))
  760. return -EINVAL;
  761. f->pixelformat = formats[f->index].pixelformat;
  762. return 0;
  763. }
  764. static int rcar_drif_g_fmt_sdr_cap(struct file *file, void *priv,
  765. struct v4l2_format *f)
  766. {
  767. struct rcar_drif_sdr *sdr = video_drvdata(file);
  768. f->fmt.sdr.pixelformat = sdr->fmt->pixelformat;
  769. f->fmt.sdr.buffersize = sdr->fmt->buffersize;
  770. return 0;
  771. }
  772. static int rcar_drif_s_fmt_sdr_cap(struct file *file, void *priv,
  773. struct v4l2_format *f)
  774. {
  775. struct rcar_drif_sdr *sdr = video_drvdata(file);
  776. struct vb2_queue *q = &sdr->vb_queue;
  777. unsigned int i;
  778. if (vb2_is_busy(q))
  779. return -EBUSY;
  780. for (i = 0; i < ARRAY_SIZE(formats); i++) {
  781. if (formats[i].pixelformat == f->fmt.sdr.pixelformat)
  782. break;
  783. }
  784. if (i == ARRAY_SIZE(formats))
  785. i = 0; /* Set the 1st format as default on no match */
  786. sdr->fmt = &formats[i];
  787. f->fmt.sdr.pixelformat = sdr->fmt->pixelformat;
  788. f->fmt.sdr.buffersize = formats[i].buffersize;
  789. memset(f->fmt.sdr.reserved, 0, sizeof(f->fmt.sdr.reserved));
  790. /*
  791. * If a format demands one channel only out of two
  792. * enabled channels, pick the 0th channel.
  793. */
  794. if (formats[i].num_ch < sdr->num_hw_ch) {
  795. sdr->cur_ch_mask = BIT(0);
  796. sdr->num_cur_ch = formats[i].num_ch;
  797. } else {
  798. sdr->cur_ch_mask = sdr->hw_ch_mask;
  799. sdr->num_cur_ch = sdr->num_hw_ch;
  800. }
  801. rdrif_dbg(sdr, "cur: idx %u mask %lu num %u\n",
  802. i, sdr->cur_ch_mask, sdr->num_cur_ch);
  803. return 0;
  804. }
  805. static int rcar_drif_try_fmt_sdr_cap(struct file *file, void *priv,
  806. struct v4l2_format *f)
  807. {
  808. unsigned int i;
  809. for (i = 0; i < ARRAY_SIZE(formats); i++) {
  810. if (formats[i].pixelformat == f->fmt.sdr.pixelformat) {
  811. f->fmt.sdr.buffersize = formats[i].buffersize;
  812. return 0;
  813. }
  814. }
  815. f->fmt.sdr.pixelformat = formats[0].pixelformat;
  816. f->fmt.sdr.buffersize = formats[0].buffersize;
  817. memset(f->fmt.sdr.reserved, 0, sizeof(f->fmt.sdr.reserved));
  818. return 0;
  819. }
  820. /* Tuner subdev ioctls */
  821. static int rcar_drif_enum_freq_bands(struct file *file, void *priv,
  822. struct v4l2_frequency_band *band)
  823. {
  824. struct rcar_drif_sdr *sdr = video_drvdata(file);
  825. return v4l2_subdev_call(sdr->ep.subdev, tuner, enum_freq_bands, band);
  826. }
  827. static int rcar_drif_g_frequency(struct file *file, void *priv,
  828. struct v4l2_frequency *f)
  829. {
  830. struct rcar_drif_sdr *sdr = video_drvdata(file);
  831. return v4l2_subdev_call(sdr->ep.subdev, tuner, g_frequency, f);
  832. }
  833. static int rcar_drif_s_frequency(struct file *file, void *priv,
  834. const struct v4l2_frequency *f)
  835. {
  836. struct rcar_drif_sdr *sdr = video_drvdata(file);
  837. return v4l2_subdev_call(sdr->ep.subdev, tuner, s_frequency, f);
  838. }
  839. static int rcar_drif_g_tuner(struct file *file, void *priv,
  840. struct v4l2_tuner *vt)
  841. {
  842. struct rcar_drif_sdr *sdr = video_drvdata(file);
  843. return v4l2_subdev_call(sdr->ep.subdev, tuner, g_tuner, vt);
  844. }
  845. static int rcar_drif_s_tuner(struct file *file, void *priv,
  846. const struct v4l2_tuner *vt)
  847. {
  848. struct rcar_drif_sdr *sdr = video_drvdata(file);
  849. return v4l2_subdev_call(sdr->ep.subdev, tuner, s_tuner, vt);
  850. }
  851. static const struct v4l2_ioctl_ops rcar_drif_ioctl_ops = {
  852. .vidioc_querycap = rcar_drif_querycap,
  853. .vidioc_enum_fmt_sdr_cap = rcar_drif_enum_fmt_sdr_cap,
  854. .vidioc_g_fmt_sdr_cap = rcar_drif_g_fmt_sdr_cap,
  855. .vidioc_s_fmt_sdr_cap = rcar_drif_s_fmt_sdr_cap,
  856. .vidioc_try_fmt_sdr_cap = rcar_drif_try_fmt_sdr_cap,
  857. .vidioc_reqbufs = vb2_ioctl_reqbufs,
  858. .vidioc_create_bufs = vb2_ioctl_create_bufs,
  859. .vidioc_prepare_buf = vb2_ioctl_prepare_buf,
  860. .vidioc_querybuf = vb2_ioctl_querybuf,
  861. .vidioc_qbuf = vb2_ioctl_qbuf,
  862. .vidioc_dqbuf = vb2_ioctl_dqbuf,
  863. .vidioc_streamon = vb2_ioctl_streamon,
  864. .vidioc_streamoff = vb2_ioctl_streamoff,
  865. .vidioc_s_frequency = rcar_drif_s_frequency,
  866. .vidioc_g_frequency = rcar_drif_g_frequency,
  867. .vidioc_s_tuner = rcar_drif_s_tuner,
  868. .vidioc_g_tuner = rcar_drif_g_tuner,
  869. .vidioc_enum_freq_bands = rcar_drif_enum_freq_bands,
  870. .vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
  871. .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
  872. .vidioc_log_status = v4l2_ctrl_log_status,
  873. };
  874. static const struct v4l2_file_operations rcar_drif_fops = {
  875. .owner = THIS_MODULE,
  876. .open = v4l2_fh_open,
  877. .release = vb2_fop_release,
  878. .read = vb2_fop_read,
  879. .poll = vb2_fop_poll,
  880. .mmap = vb2_fop_mmap,
  881. .unlocked_ioctl = video_ioctl2,
  882. };
  883. static int rcar_drif_sdr_register(struct rcar_drif_sdr *sdr)
  884. {
  885. int ret;
  886. /* Init video_device structure */
  887. sdr->vdev = video_device_alloc();
  888. if (!sdr->vdev)
  889. return -ENOMEM;
  890. snprintf(sdr->vdev->name, sizeof(sdr->vdev->name), "R-Car DRIF");
  891. sdr->vdev->fops = &rcar_drif_fops;
  892. sdr->vdev->ioctl_ops = &rcar_drif_ioctl_ops;
  893. sdr->vdev->release = video_device_release;
  894. sdr->vdev->lock = &sdr->v4l2_mutex;
  895. sdr->vdev->queue = &sdr->vb_queue;
  896. sdr->vdev->queue->lock = &sdr->vb_queue_mutex;
  897. sdr->vdev->ctrl_handler = &sdr->ctrl_hdl;
  898. sdr->vdev->v4l2_dev = &sdr->v4l2_dev;
  899. sdr->vdev->device_caps = V4L2_CAP_SDR_CAPTURE | V4L2_CAP_TUNER |
  900. V4L2_CAP_STREAMING | V4L2_CAP_READWRITE;
  901. video_set_drvdata(sdr->vdev, sdr);
  902. /* Register V4L2 SDR device */
  903. ret = video_register_device(sdr->vdev, VFL_TYPE_SDR, -1);
  904. if (ret) {
  905. video_device_release(sdr->vdev);
  906. sdr->vdev = NULL;
  907. dev_err(sdr->dev, "failed video_register_device (%d)\n", ret);
  908. }
  909. return ret;
  910. }
  911. static void rcar_drif_sdr_unregister(struct rcar_drif_sdr *sdr)
  912. {
  913. video_unregister_device(sdr->vdev);
  914. sdr->vdev = NULL;
  915. }
  916. /* Sub-device bound callback */
  917. static int rcar_drif_notify_bound(struct v4l2_async_notifier *notifier,
  918. struct v4l2_subdev *subdev,
  919. struct v4l2_async_subdev *asd)
  920. {
  921. struct rcar_drif_sdr *sdr =
  922. container_of(notifier, struct rcar_drif_sdr, notifier);
  923. if (sdr->ep.asd.match.fwnode !=
  924. of_fwnode_handle(subdev->dev->of_node)) {
  925. rdrif_err(sdr, "subdev %s cannot bind\n", subdev->name);
  926. return -EINVAL;
  927. }
  928. v4l2_set_subdev_hostdata(subdev, sdr);
  929. sdr->ep.subdev = subdev;
  930. rdrif_dbg(sdr, "bound asd %s\n", subdev->name);
  931. return 0;
  932. }
  933. /* Sub-device unbind callback */
  934. static void rcar_drif_notify_unbind(struct v4l2_async_notifier *notifier,
  935. struct v4l2_subdev *subdev,
  936. struct v4l2_async_subdev *asd)
  937. {
  938. struct rcar_drif_sdr *sdr =
  939. container_of(notifier, struct rcar_drif_sdr, notifier);
  940. if (sdr->ep.subdev != subdev) {
  941. rdrif_err(sdr, "subdev %s is not bound\n", subdev->name);
  942. return;
  943. }
  944. /* Free ctrl handler if initialized */
  945. v4l2_ctrl_handler_free(&sdr->ctrl_hdl);
  946. sdr->v4l2_dev.ctrl_handler = NULL;
  947. sdr->ep.subdev = NULL;
  948. rcar_drif_sdr_unregister(sdr);
  949. rdrif_dbg(sdr, "unbind asd %s\n", subdev->name);
  950. }
  951. /* Sub-device registered notification callback */
  952. static int rcar_drif_notify_complete(struct v4l2_async_notifier *notifier)
  953. {
  954. struct rcar_drif_sdr *sdr =
  955. container_of(notifier, struct rcar_drif_sdr, notifier);
  956. int ret;
  957. /*
  958. * The subdev tested at this point uses 4 controls. Using 10 as a worst
  959. * case scenario hint. When less controls are needed there will be some
  960. * unused memory and when more controls are needed the framework uses
  961. * hash to manage controls within this number.
  962. */
  963. ret = v4l2_ctrl_handler_init(&sdr->ctrl_hdl, 10);
  964. if (ret)
  965. return -ENOMEM;
  966. sdr->v4l2_dev.ctrl_handler = &sdr->ctrl_hdl;
  967. ret = v4l2_device_register_subdev_nodes(&sdr->v4l2_dev);
  968. if (ret) {
  969. rdrif_err(sdr, "failed: register subdev nodes ret %d\n", ret);
  970. goto error;
  971. }
  972. ret = v4l2_ctrl_add_handler(&sdr->ctrl_hdl,
  973. sdr->ep.subdev->ctrl_handler, NULL);
  974. if (ret) {
  975. rdrif_err(sdr, "failed: ctrl add hdlr ret %d\n", ret);
  976. goto error;
  977. }
  978. ret = rcar_drif_sdr_register(sdr);
  979. if (ret)
  980. goto error;
  981. return ret;
  982. error:
  983. v4l2_ctrl_handler_free(&sdr->ctrl_hdl);
  984. return ret;
  985. }
  986. static const struct v4l2_async_notifier_operations rcar_drif_notify_ops = {
  987. .bound = rcar_drif_notify_bound,
  988. .unbind = rcar_drif_notify_unbind,
  989. .complete = rcar_drif_notify_complete,
  990. };
  991. /* Read endpoint properties */
  992. static void rcar_drif_get_ep_properties(struct rcar_drif_sdr *sdr,
  993. struct fwnode_handle *fwnode)
  994. {
  995. u32 val;
  996. /* Set the I2S defaults for SIRMDR1*/
  997. sdr->mdr1 = RCAR_DRIF_SIRMDR1_SYNCMD_LR | RCAR_DRIF_SIRMDR1_MSB_FIRST |
  998. RCAR_DRIF_SIRMDR1_DTDL_1 | RCAR_DRIF_SIRMDR1_SYNCDL_0;
  999. /* Parse sync polarity from endpoint */
  1000. if (!fwnode_property_read_u32(fwnode, "sync-active", &val))
  1001. sdr->mdr1 |= val ? RCAR_DRIF_SIRMDR1_SYNCAC_POL_HIGH :
  1002. RCAR_DRIF_SIRMDR1_SYNCAC_POL_LOW;
  1003. else
  1004. sdr->mdr1 |= RCAR_DRIF_SIRMDR1_SYNCAC_POL_HIGH; /* default */
  1005. dev_dbg(sdr->dev, "mdr1 0x%08x\n", sdr->mdr1);
  1006. }
  1007. /* Parse sub-devs (tuner) to find a matching device */
  1008. static int rcar_drif_parse_subdevs(struct rcar_drif_sdr *sdr)
  1009. {
  1010. struct v4l2_async_notifier *notifier = &sdr->notifier;
  1011. struct fwnode_handle *fwnode, *ep;
  1012. notifier->subdevs = devm_kzalloc(sdr->dev, sizeof(*notifier->subdevs),
  1013. GFP_KERNEL);
  1014. if (!notifier->subdevs)
  1015. return -ENOMEM;
  1016. ep = fwnode_graph_get_next_endpoint(of_fwnode_handle(sdr->dev->of_node),
  1017. NULL);
  1018. if (!ep)
  1019. return 0;
  1020. notifier->subdevs[notifier->num_subdevs] = &sdr->ep.asd;
  1021. fwnode = fwnode_graph_get_remote_port_parent(ep);
  1022. if (!fwnode) {
  1023. dev_warn(sdr->dev, "bad remote port parent\n");
  1024. fwnode_handle_put(ep);
  1025. return -EINVAL;
  1026. }
  1027. sdr->ep.asd.match.fwnode = fwnode;
  1028. sdr->ep.asd.match_type = V4L2_ASYNC_MATCH_FWNODE;
  1029. notifier->num_subdevs++;
  1030. /* Get the endpoint properties */
  1031. rcar_drif_get_ep_properties(sdr, ep);
  1032. fwnode_handle_put(fwnode);
  1033. fwnode_handle_put(ep);
  1034. return 0;
  1035. }
  1036. /* Check if the given device is the primary bond */
  1037. static bool rcar_drif_primary_bond(struct platform_device *pdev)
  1038. {
  1039. return of_property_read_bool(pdev->dev.of_node, "renesas,primary-bond");
  1040. }
  1041. /* Check if both devices of the bond are enabled */
  1042. static struct device_node *rcar_drif_bond_enabled(struct platform_device *p)
  1043. {
  1044. struct device_node *np;
  1045. np = of_parse_phandle(p->dev.of_node, "renesas,bonding", 0);
  1046. if (np && of_device_is_available(np))
  1047. return np;
  1048. return NULL;
  1049. }
  1050. /* Check if the bonded device is probed */
  1051. static int rcar_drif_bond_available(struct rcar_drif_sdr *sdr,
  1052. struct device_node *np)
  1053. {
  1054. struct platform_device *pdev;
  1055. struct rcar_drif *ch;
  1056. int ret = 0;
  1057. pdev = of_find_device_by_node(np);
  1058. if (!pdev) {
  1059. dev_err(sdr->dev, "failed to get bonded device from node\n");
  1060. return -ENODEV;
  1061. }
  1062. device_lock(&pdev->dev);
  1063. ch = platform_get_drvdata(pdev);
  1064. if (ch) {
  1065. /* Update sdr data in the bonded device */
  1066. ch->sdr = sdr;
  1067. /* Update sdr with bonded device data */
  1068. sdr->ch[ch->num] = ch;
  1069. sdr->hw_ch_mask |= BIT(ch->num);
  1070. } else {
  1071. /* Defer */
  1072. dev_info(sdr->dev, "defer probe\n");
  1073. ret = -EPROBE_DEFER;
  1074. }
  1075. device_unlock(&pdev->dev);
  1076. put_device(&pdev->dev);
  1077. return ret;
  1078. }
  1079. /* V4L2 SDR device probe */
  1080. static int rcar_drif_sdr_probe(struct rcar_drif_sdr *sdr)
  1081. {
  1082. int ret;
  1083. /* Validate any supported format for enabled channels */
  1084. ret = rcar_drif_set_default_format(sdr);
  1085. if (ret) {
  1086. dev_err(sdr->dev, "failed to set default format\n");
  1087. return ret;
  1088. }
  1089. /* Set defaults */
  1090. sdr->hwbuf_size = RCAR_DRIF_DEFAULT_HWBUF_SIZE;
  1091. mutex_init(&sdr->v4l2_mutex);
  1092. mutex_init(&sdr->vb_queue_mutex);
  1093. spin_lock_init(&sdr->queued_bufs_lock);
  1094. spin_lock_init(&sdr->dma_lock);
  1095. INIT_LIST_HEAD(&sdr->queued_bufs);
  1096. /* Init videobuf2 queue structure */
  1097. sdr->vb_queue.type = V4L2_BUF_TYPE_SDR_CAPTURE;
  1098. sdr->vb_queue.io_modes = VB2_READ | VB2_MMAP | VB2_DMABUF;
  1099. sdr->vb_queue.drv_priv = sdr;
  1100. sdr->vb_queue.buf_struct_size = sizeof(struct rcar_drif_frame_buf);
  1101. sdr->vb_queue.ops = &rcar_drif_vb2_ops;
  1102. sdr->vb_queue.mem_ops = &vb2_vmalloc_memops;
  1103. sdr->vb_queue.timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
  1104. /* Init videobuf2 queue */
  1105. ret = vb2_queue_init(&sdr->vb_queue);
  1106. if (ret) {
  1107. dev_err(sdr->dev, "failed: vb2_queue_init ret %d\n", ret);
  1108. return ret;
  1109. }
  1110. /* Register the v4l2_device */
  1111. ret = v4l2_device_register(sdr->dev, &sdr->v4l2_dev);
  1112. if (ret) {
  1113. dev_err(sdr->dev, "failed: v4l2_device_register ret %d\n", ret);
  1114. return ret;
  1115. }
  1116. /*
  1117. * Parse subdevs after v4l2_device_register because if the subdev
  1118. * is already probed, bound and complete will be called immediately
  1119. */
  1120. ret = rcar_drif_parse_subdevs(sdr);
  1121. if (ret)
  1122. goto error;
  1123. sdr->notifier.ops = &rcar_drif_notify_ops;
  1124. /* Register notifier */
  1125. ret = v4l2_async_notifier_register(&sdr->v4l2_dev, &sdr->notifier);
  1126. if (ret < 0) {
  1127. dev_err(sdr->dev, "failed: notifier register ret %d\n", ret);
  1128. goto error;
  1129. }
  1130. return ret;
  1131. error:
  1132. v4l2_device_unregister(&sdr->v4l2_dev);
  1133. return ret;
  1134. }
  1135. /* V4L2 SDR device remove */
  1136. static void rcar_drif_sdr_remove(struct rcar_drif_sdr *sdr)
  1137. {
  1138. v4l2_async_notifier_unregister(&sdr->notifier);
  1139. v4l2_device_unregister(&sdr->v4l2_dev);
  1140. }
  1141. /* DRIF channel probe */
  1142. static int rcar_drif_probe(struct platform_device *pdev)
  1143. {
  1144. struct rcar_drif_sdr *sdr;
  1145. struct device_node *np;
  1146. struct rcar_drif *ch;
  1147. struct resource *res;
  1148. int ret;
  1149. /* Reserve memory for enabled channel */
  1150. ch = devm_kzalloc(&pdev->dev, sizeof(*ch), GFP_KERNEL);
  1151. if (!ch)
  1152. return -ENOMEM;
  1153. ch->pdev = pdev;
  1154. /* Module clock */
  1155. ch->clk = devm_clk_get(&pdev->dev, "fck");
  1156. if (IS_ERR(ch->clk)) {
  1157. ret = PTR_ERR(ch->clk);
  1158. dev_err(&pdev->dev, "clk get failed (%d)\n", ret);
  1159. return ret;
  1160. }
  1161. /* Register map */
  1162. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  1163. ch->base = devm_ioremap_resource(&pdev->dev, res);
  1164. if (IS_ERR(ch->base)) {
  1165. ret = PTR_ERR(ch->base);
  1166. dev_err(&pdev->dev, "ioremap failed (%d)\n", ret);
  1167. return ret;
  1168. }
  1169. ch->start = res->start;
  1170. platform_set_drvdata(pdev, ch);
  1171. /* Check if both channels of the bond are enabled */
  1172. np = rcar_drif_bond_enabled(pdev);
  1173. if (np) {
  1174. /* Check if current channel acting as primary-bond */
  1175. if (!rcar_drif_primary_bond(pdev)) {
  1176. ch->num = 1; /* Primary bond is channel 0 always */
  1177. of_node_put(np);
  1178. return 0;
  1179. }
  1180. }
  1181. /* Reserve memory for SDR structure */
  1182. sdr = devm_kzalloc(&pdev->dev, sizeof(*sdr), GFP_KERNEL);
  1183. if (!sdr) {
  1184. of_node_put(np);
  1185. return -ENOMEM;
  1186. }
  1187. ch->sdr = sdr;
  1188. sdr->dev = &pdev->dev;
  1189. /* Establish links between SDR and channel(s) */
  1190. sdr->ch[ch->num] = ch;
  1191. sdr->hw_ch_mask = BIT(ch->num);
  1192. if (np) {
  1193. /* Check if bonded device is ready */
  1194. ret = rcar_drif_bond_available(sdr, np);
  1195. of_node_put(np);
  1196. if (ret)
  1197. return ret;
  1198. }
  1199. sdr->num_hw_ch = hweight_long(sdr->hw_ch_mask);
  1200. return rcar_drif_sdr_probe(sdr);
  1201. }
  1202. /* DRIF channel remove */
  1203. static int rcar_drif_remove(struct platform_device *pdev)
  1204. {
  1205. struct rcar_drif *ch = platform_get_drvdata(pdev);
  1206. struct rcar_drif_sdr *sdr = ch->sdr;
  1207. /* Channel 0 will be the SDR instance */
  1208. if (ch->num)
  1209. return 0;
  1210. /* SDR instance */
  1211. rcar_drif_sdr_remove(sdr);
  1212. return 0;
  1213. }
  1214. /* FIXME: Implement suspend/resume support */
  1215. static int __maybe_unused rcar_drif_suspend(struct device *dev)
  1216. {
  1217. return 0;
  1218. }
  1219. static int __maybe_unused rcar_drif_resume(struct device *dev)
  1220. {
  1221. return 0;
  1222. }
  1223. static SIMPLE_DEV_PM_OPS(rcar_drif_pm_ops, rcar_drif_suspend,
  1224. rcar_drif_resume);
  1225. static const struct of_device_id rcar_drif_of_table[] = {
  1226. { .compatible = "renesas,rcar-gen3-drif" },
  1227. { }
  1228. };
  1229. MODULE_DEVICE_TABLE(of, rcar_drif_of_table);
  1230. #define RCAR_DRIF_DRV_NAME "rcar_drif"
  1231. static struct platform_driver rcar_drif_driver = {
  1232. .driver = {
  1233. .name = RCAR_DRIF_DRV_NAME,
  1234. .of_match_table = of_match_ptr(rcar_drif_of_table),
  1235. .pm = &rcar_drif_pm_ops,
  1236. },
  1237. .probe = rcar_drif_probe,
  1238. .remove = rcar_drif_remove,
  1239. };
  1240. module_platform_driver(rcar_drif_driver);
  1241. MODULE_DESCRIPTION("Renesas R-Car Gen3 DRIF driver");
  1242. MODULE_ALIAS("platform:" RCAR_DRIF_DRV_NAME);
  1243. MODULE_LICENSE("GPL v2");
  1244. MODULE_AUTHOR("Ramesh Shanmugasundaram <ramesh.shanmugasundaram@bp.renesas.com>");