rcar_fdp1.c 65 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445
  1. /*
  2. * Renesas RCar Fine Display Processor
  3. *
  4. * Video format converter and frame deinterlacer device.
  5. *
  6. * Author: Kieran Bingham, <kieran@bingham.xyz>
  7. * Copyright (c) 2016 Renesas Electronics Corporation.
  8. *
  9. * This code is developed and inspired from the vim2m, rcar_jpu,
  10. * m2m-deinterlace, and vsp1 drivers.
  11. *
  12. * This program is free software; you can redistribute it and/or modify
  13. * it under the terms of the GNU General Public License as published by the
  14. * Free Software Foundation; either version 2 of the
  15. * License, or (at your option) any later version
  16. */
  17. #include <linux/clk.h>
  18. #include <linux/delay.h>
  19. #include <linux/dma-mapping.h>
  20. #include <linux/fs.h>
  21. #include <linux/interrupt.h>
  22. #include <linux/module.h>
  23. #include <linux/of.h>
  24. #include <linux/of_device.h>
  25. #include <linux/platform_device.h>
  26. #include <linux/pm_runtime.h>
  27. #include <linux/sched.h>
  28. #include <linux/slab.h>
  29. #include <linux/timer.h>
  30. #include <media/rcar-fcp.h>
  31. #include <media/v4l2-ctrls.h>
  32. #include <media/v4l2-device.h>
  33. #include <media/v4l2-event.h>
  34. #include <media/v4l2-ioctl.h>
  35. #include <media/v4l2-mem2mem.h>
  36. #include <media/videobuf2-dma-contig.h>
  37. static unsigned int debug;
  38. module_param(debug, uint, 0644);
  39. MODULE_PARM_DESC(debug, "activate debug info");
  40. /* Minimum and maximum frame width/height */
  41. #define FDP1_MIN_W 80U
  42. #define FDP1_MIN_H 80U
  43. #define FDP1_MAX_W 3840U
  44. #define FDP1_MAX_H 2160U
  45. #define FDP1_MAX_PLANES 3U
  46. #define FDP1_MAX_STRIDE 8190U
  47. /* Flags that indicate a format can be used for capture/output */
  48. #define FDP1_CAPTURE BIT(0)
  49. #define FDP1_OUTPUT BIT(1)
  50. #define DRIVER_NAME "rcar_fdp1"
  51. /* Number of Job's to have available on the processing queue */
  52. #define FDP1_NUMBER_JOBS 8
  53. #define dprintk(fdp1, fmt, arg...) \
  54. v4l2_dbg(1, debug, &fdp1->v4l2_dev, "%s: " fmt, __func__, ## arg)
  55. /*
  56. * FDP1 registers and bits
  57. */
  58. /* FDP1 start register - Imm */
  59. #define FD1_CTL_CMD 0x0000
  60. #define FD1_CTL_CMD_STRCMD BIT(0)
  61. /* Sync generator register - Imm */
  62. #define FD1_CTL_SGCMD 0x0004
  63. #define FD1_CTL_SGCMD_SGEN BIT(0)
  64. /* Register set end register - Imm */
  65. #define FD1_CTL_REGEND 0x0008
  66. #define FD1_CTL_REGEND_REGEND BIT(0)
  67. /* Channel activation register - Vupdt */
  68. #define FD1_CTL_CHACT 0x000c
  69. #define FD1_CTL_CHACT_SMW BIT(9)
  70. #define FD1_CTL_CHACT_WR BIT(8)
  71. #define FD1_CTL_CHACT_SMR BIT(3)
  72. #define FD1_CTL_CHACT_RD2 BIT(2)
  73. #define FD1_CTL_CHACT_RD1 BIT(1)
  74. #define FD1_CTL_CHACT_RD0 BIT(0)
  75. /* Operation Mode Register - Vupdt */
  76. #define FD1_CTL_OPMODE 0x0010
  77. #define FD1_CTL_OPMODE_PRG BIT(4)
  78. #define FD1_CTL_OPMODE_VIMD_INTERRUPT (0 << 0)
  79. #define FD1_CTL_OPMODE_VIMD_BESTEFFORT (1 << 0)
  80. #define FD1_CTL_OPMODE_VIMD_NOINTERRUPT (2 << 0)
  81. #define FD1_CTL_VPERIOD 0x0014
  82. #define FD1_CTL_CLKCTRL 0x0018
  83. #define FD1_CTL_CLKCTRL_CSTP_N BIT(0)
  84. /* Software reset register */
  85. #define FD1_CTL_SRESET 0x001c
  86. #define FD1_CTL_SRESET_SRST BIT(0)
  87. /* Control status register (V-update-status) */
  88. #define FD1_CTL_STATUS 0x0024
  89. #define FD1_CTL_STATUS_VINT_CNT_MASK GENMASK(31, 16)
  90. #define FD1_CTL_STATUS_VINT_CNT_SHIFT 16
  91. #define FD1_CTL_STATUS_SGREGSET BIT(10)
  92. #define FD1_CTL_STATUS_SGVERR BIT(9)
  93. #define FD1_CTL_STATUS_SGFREND BIT(8)
  94. #define FD1_CTL_STATUS_BSY BIT(0)
  95. #define FD1_CTL_VCYCLE_STAT 0x0028
  96. /* Interrupt enable register */
  97. #define FD1_CTL_IRQENB 0x0038
  98. /* Interrupt status register */
  99. #define FD1_CTL_IRQSTA 0x003c
  100. /* Interrupt control register */
  101. #define FD1_CTL_IRQFSET 0x0040
  102. /* Common IRQ Bit settings */
  103. #define FD1_CTL_IRQ_VERE BIT(16)
  104. #define FD1_CTL_IRQ_VINTE BIT(4)
  105. #define FD1_CTL_IRQ_FREE BIT(0)
  106. #define FD1_CTL_IRQ_MASK (FD1_CTL_IRQ_VERE | \
  107. FD1_CTL_IRQ_VINTE | \
  108. FD1_CTL_IRQ_FREE)
  109. /* RPF */
  110. #define FD1_RPF_SIZE 0x0060
  111. #define FD1_RPF_SIZE_MASK GENMASK(12, 0)
  112. #define FD1_RPF_SIZE_H_SHIFT 16
  113. #define FD1_RPF_SIZE_V_SHIFT 0
  114. #define FD1_RPF_FORMAT 0x0064
  115. #define FD1_RPF_FORMAT_CIPM BIT(16)
  116. #define FD1_RPF_FORMAT_RSPYCS BIT(13)
  117. #define FD1_RPF_FORMAT_RSPUVS BIT(12)
  118. #define FD1_RPF_FORMAT_CF BIT(8)
  119. #define FD1_RPF_PSTRIDE 0x0068
  120. #define FD1_RPF_PSTRIDE_Y_SHIFT 16
  121. #define FD1_RPF_PSTRIDE_C_SHIFT 0
  122. /* RPF0 Source Component Y Address register */
  123. #define FD1_RPF0_ADDR_Y 0x006c
  124. /* RPF1 Current Picture Registers */
  125. #define FD1_RPF1_ADDR_Y 0x0078
  126. #define FD1_RPF1_ADDR_C0 0x007c
  127. #define FD1_RPF1_ADDR_C1 0x0080
  128. /* RPF2 next picture register */
  129. #define FD1_RPF2_ADDR_Y 0x0084
  130. #define FD1_RPF_SMSK_ADDR 0x0090
  131. #define FD1_RPF_SWAP 0x0094
  132. /* WPF */
  133. #define FD1_WPF_FORMAT 0x00c0
  134. #define FD1_WPF_FORMAT_PDV_SHIFT 24
  135. #define FD1_WPF_FORMAT_FCNL BIT(20)
  136. #define FD1_WPF_FORMAT_WSPYCS BIT(15)
  137. #define FD1_WPF_FORMAT_WSPUVS BIT(14)
  138. #define FD1_WPF_FORMAT_WRTM_601_16 (0 << 9)
  139. #define FD1_WPF_FORMAT_WRTM_601_0 (1 << 9)
  140. #define FD1_WPF_FORMAT_WRTM_709_16 (2 << 9)
  141. #define FD1_WPF_FORMAT_CSC BIT(8)
  142. #define FD1_WPF_RNDCTL 0x00c4
  143. #define FD1_WPF_RNDCTL_CBRM BIT(28)
  144. #define FD1_WPF_RNDCTL_CLMD_NOCLIP (0 << 12)
  145. #define FD1_WPF_RNDCTL_CLMD_CLIP_16_235 (1 << 12)
  146. #define FD1_WPF_RNDCTL_CLMD_CLIP_1_254 (2 << 12)
  147. #define FD1_WPF_PSTRIDE 0x00c8
  148. #define FD1_WPF_PSTRIDE_Y_SHIFT 16
  149. #define FD1_WPF_PSTRIDE_C_SHIFT 0
  150. /* WPF Destination picture */
  151. #define FD1_WPF_ADDR_Y 0x00cc
  152. #define FD1_WPF_ADDR_C0 0x00d0
  153. #define FD1_WPF_ADDR_C1 0x00d4
  154. #define FD1_WPF_SWAP 0x00d8
  155. #define FD1_WPF_SWAP_OSWAP_SHIFT 0
  156. #define FD1_WPF_SWAP_SSWAP_SHIFT 4
  157. /* WPF/RPF Common */
  158. #define FD1_RWPF_SWAP_BYTE BIT(0)
  159. #define FD1_RWPF_SWAP_WORD BIT(1)
  160. #define FD1_RWPF_SWAP_LWRD BIT(2)
  161. #define FD1_RWPF_SWAP_LLWD BIT(3)
  162. /* IPC */
  163. #define FD1_IPC_MODE 0x0100
  164. #define FD1_IPC_MODE_DLI BIT(8)
  165. #define FD1_IPC_MODE_DIM_ADAPT2D3D (0 << 0)
  166. #define FD1_IPC_MODE_DIM_FIXED2D (1 << 0)
  167. #define FD1_IPC_MODE_DIM_FIXED3D (2 << 0)
  168. #define FD1_IPC_MODE_DIM_PREVFIELD (3 << 0)
  169. #define FD1_IPC_MODE_DIM_NEXTFIELD (4 << 0)
  170. #define FD1_IPC_SMSK_THRESH 0x0104
  171. #define FD1_IPC_SMSK_THRESH_CONST 0x00010002
  172. #define FD1_IPC_COMB_DET 0x0108
  173. #define FD1_IPC_COMB_DET_CONST 0x00200040
  174. #define FD1_IPC_MOTDEC 0x010c
  175. #define FD1_IPC_MOTDEC_CONST 0x00008020
  176. /* DLI registers */
  177. #define FD1_IPC_DLI_BLEND 0x0120
  178. #define FD1_IPC_DLI_BLEND_CONST 0x0080ff02
  179. #define FD1_IPC_DLI_HGAIN 0x0124
  180. #define FD1_IPC_DLI_HGAIN_CONST 0x001000ff
  181. #define FD1_IPC_DLI_SPRS 0x0128
  182. #define FD1_IPC_DLI_SPRS_CONST 0x009004ff
  183. #define FD1_IPC_DLI_ANGLE 0x012c
  184. #define FD1_IPC_DLI_ANGLE_CONST 0x0004080c
  185. #define FD1_IPC_DLI_ISOPIX0 0x0130
  186. #define FD1_IPC_DLI_ISOPIX0_CONST 0xff10ff10
  187. #define FD1_IPC_DLI_ISOPIX1 0x0134
  188. #define FD1_IPC_DLI_ISOPIX1_CONST 0x0000ff10
  189. /* Sensor registers */
  190. #define FD1_IPC_SENSOR_TH0 0x0140
  191. #define FD1_IPC_SENSOR_TH0_CONST 0x20208080
  192. #define FD1_IPC_SENSOR_TH1 0x0144
  193. #define FD1_IPC_SENSOR_TH1_CONST 0
  194. #define FD1_IPC_SENSOR_CTL0 0x0170
  195. #define FD1_IPC_SENSOR_CTL0_CONST 0x00002201
  196. #define FD1_IPC_SENSOR_CTL1 0x0174
  197. #define FD1_IPC_SENSOR_CTL1_CONST 0
  198. #define FD1_IPC_SENSOR_CTL2 0x0178
  199. #define FD1_IPC_SENSOR_CTL2_X_SHIFT 16
  200. #define FD1_IPC_SENSOR_CTL2_Y_SHIFT 0
  201. #define FD1_IPC_SENSOR_CTL3 0x017c
  202. #define FD1_IPC_SENSOR_CTL3_0_SHIFT 16
  203. #define FD1_IPC_SENSOR_CTL3_1_SHIFT 0
  204. /* Line memory pixel number register */
  205. #define FD1_IPC_LMEM 0x01e0
  206. #define FD1_IPC_LMEM_LINEAR 1024
  207. #define FD1_IPC_LMEM_TILE 960
  208. /* Internal Data (HW Version) */
  209. #define FD1_IP_INTDATA 0x0800
  210. #define FD1_IP_H3 0x02010101
  211. #define FD1_IP_M3W 0x02010202
  212. /* LUTs */
  213. #define FD1_LUT_DIF_ADJ 0x1000
  214. #define FD1_LUT_SAD_ADJ 0x1400
  215. #define FD1_LUT_BLD_GAIN 0x1800
  216. #define FD1_LUT_DIF_GAIN 0x1c00
  217. #define FD1_LUT_MDET 0x2000
  218. /**
  219. * struct fdp1_fmt - The FDP1 internal format data
  220. * @fourcc: the fourcc code, to match the V4L2 API
  221. * @bpp: bits per pixel per plane
  222. * @num_planes: number of planes
  223. * @hsub: horizontal subsampling factor
  224. * @vsub: vertical subsampling factor
  225. * @fmt: 7-bit format code for the fdp1 hardware
  226. * @swap_yc: the Y and C components are swapped (Y comes before C)
  227. * @swap_uv: the U and V components are swapped (V comes before U)
  228. * @swap: swap register control
  229. * @types: types of queue this format is applicable to
  230. */
  231. struct fdp1_fmt {
  232. u32 fourcc;
  233. u8 bpp[3];
  234. u8 num_planes;
  235. u8 hsub;
  236. u8 vsub;
  237. u8 fmt;
  238. bool swap_yc;
  239. bool swap_uv;
  240. u8 swap;
  241. u8 types;
  242. };
  243. static const struct fdp1_fmt fdp1_formats[] = {
  244. /* RGB formats are only supported by the Write Pixel Formatter */
  245. { V4L2_PIX_FMT_RGB332, { 8, 0, 0 }, 1, 1, 1, 0x00, false, false,
  246. FD1_RWPF_SWAP_LLWD | FD1_RWPF_SWAP_LWRD |
  247. FD1_RWPF_SWAP_WORD | FD1_RWPF_SWAP_BYTE,
  248. FDP1_CAPTURE },
  249. { V4L2_PIX_FMT_XRGB444, { 16, 0, 0 }, 1, 1, 1, 0x01, false, false,
  250. FD1_RWPF_SWAP_LLWD | FD1_RWPF_SWAP_LWRD |
  251. FD1_RWPF_SWAP_WORD,
  252. FDP1_CAPTURE },
  253. { V4L2_PIX_FMT_XRGB555, { 16, 0, 0 }, 1, 1, 1, 0x04, false, false,
  254. FD1_RWPF_SWAP_LLWD | FD1_RWPF_SWAP_LWRD |
  255. FD1_RWPF_SWAP_WORD,
  256. FDP1_CAPTURE },
  257. { V4L2_PIX_FMT_RGB565, { 16, 0, 0 }, 1, 1, 1, 0x06, false, false,
  258. FD1_RWPF_SWAP_LLWD | FD1_RWPF_SWAP_LWRD |
  259. FD1_RWPF_SWAP_WORD,
  260. FDP1_CAPTURE },
  261. { V4L2_PIX_FMT_ABGR32, { 32, 0, 0 }, 1, 1, 1, 0x13, false, false,
  262. FD1_RWPF_SWAP_LLWD | FD1_RWPF_SWAP_LWRD,
  263. FDP1_CAPTURE },
  264. { V4L2_PIX_FMT_XBGR32, { 32, 0, 0 }, 1, 1, 1, 0x13, false, false,
  265. FD1_RWPF_SWAP_LLWD | FD1_RWPF_SWAP_LWRD,
  266. FDP1_CAPTURE },
  267. { V4L2_PIX_FMT_ARGB32, { 32, 0, 0 }, 1, 1, 1, 0x13, false, false,
  268. FD1_RWPF_SWAP_LLWD | FD1_RWPF_SWAP_LWRD |
  269. FD1_RWPF_SWAP_WORD | FD1_RWPF_SWAP_BYTE,
  270. FDP1_CAPTURE },
  271. { V4L2_PIX_FMT_XRGB32, { 32, 0, 0 }, 1, 1, 1, 0x13, false, false,
  272. FD1_RWPF_SWAP_LLWD | FD1_RWPF_SWAP_LWRD |
  273. FD1_RWPF_SWAP_WORD | FD1_RWPF_SWAP_BYTE,
  274. FDP1_CAPTURE },
  275. { V4L2_PIX_FMT_RGB24, { 24, 0, 0 }, 1, 1, 1, 0x15, false, false,
  276. FD1_RWPF_SWAP_LLWD | FD1_RWPF_SWAP_LWRD |
  277. FD1_RWPF_SWAP_WORD | FD1_RWPF_SWAP_BYTE,
  278. FDP1_CAPTURE },
  279. { V4L2_PIX_FMT_BGR24, { 24, 0, 0 }, 1, 1, 1, 0x18, false, false,
  280. FD1_RWPF_SWAP_LLWD | FD1_RWPF_SWAP_LWRD |
  281. FD1_RWPF_SWAP_WORD | FD1_RWPF_SWAP_BYTE,
  282. FDP1_CAPTURE },
  283. { V4L2_PIX_FMT_ARGB444, { 16, 0, 0 }, 1, 1, 1, 0x19, false, false,
  284. FD1_RWPF_SWAP_LLWD | FD1_RWPF_SWAP_LWRD |
  285. FD1_RWPF_SWAP_WORD,
  286. FDP1_CAPTURE },
  287. { V4L2_PIX_FMT_ARGB555, { 16, 0, 0 }, 1, 1, 1, 0x1b, false, false,
  288. FD1_RWPF_SWAP_LLWD | FD1_RWPF_SWAP_LWRD |
  289. FD1_RWPF_SWAP_WORD,
  290. FDP1_CAPTURE },
  291. /* YUV Formats are supported by Read and Write Pixel Formatters */
  292. { V4L2_PIX_FMT_NV16M, { 8, 16, 0 }, 2, 2, 1, 0x41, false, false,
  293. FD1_RWPF_SWAP_LLWD | FD1_RWPF_SWAP_LWRD |
  294. FD1_RWPF_SWAP_WORD | FD1_RWPF_SWAP_BYTE,
  295. FDP1_CAPTURE | FDP1_OUTPUT },
  296. { V4L2_PIX_FMT_NV61M, { 8, 16, 0 }, 2, 2, 1, 0x41, false, true,
  297. FD1_RWPF_SWAP_LLWD | FD1_RWPF_SWAP_LWRD |
  298. FD1_RWPF_SWAP_WORD | FD1_RWPF_SWAP_BYTE,
  299. FDP1_CAPTURE | FDP1_OUTPUT },
  300. { V4L2_PIX_FMT_NV12M, { 8, 16, 0 }, 2, 2, 2, 0x42, false, false,
  301. FD1_RWPF_SWAP_LLWD | FD1_RWPF_SWAP_LWRD |
  302. FD1_RWPF_SWAP_WORD | FD1_RWPF_SWAP_BYTE,
  303. FDP1_CAPTURE | FDP1_OUTPUT },
  304. { V4L2_PIX_FMT_NV21M, { 8, 16, 0 }, 2, 2, 2, 0x42, false, true,
  305. FD1_RWPF_SWAP_LLWD | FD1_RWPF_SWAP_LWRD |
  306. FD1_RWPF_SWAP_WORD | FD1_RWPF_SWAP_BYTE,
  307. FDP1_CAPTURE | FDP1_OUTPUT },
  308. { V4L2_PIX_FMT_UYVY, { 16, 0, 0 }, 1, 2, 1, 0x47, false, false,
  309. FD1_RWPF_SWAP_LLWD | FD1_RWPF_SWAP_LWRD |
  310. FD1_RWPF_SWAP_WORD | FD1_RWPF_SWAP_BYTE,
  311. FDP1_CAPTURE | FDP1_OUTPUT },
  312. { V4L2_PIX_FMT_VYUY, { 16, 0, 0 }, 1, 2, 1, 0x47, false, true,
  313. FD1_RWPF_SWAP_LLWD | FD1_RWPF_SWAP_LWRD |
  314. FD1_RWPF_SWAP_WORD | FD1_RWPF_SWAP_BYTE,
  315. FDP1_CAPTURE | FDP1_OUTPUT },
  316. { V4L2_PIX_FMT_YUYV, { 16, 0, 0 }, 1, 2, 1, 0x47, true, false,
  317. FD1_RWPF_SWAP_LLWD | FD1_RWPF_SWAP_LWRD |
  318. FD1_RWPF_SWAP_WORD | FD1_RWPF_SWAP_BYTE,
  319. FDP1_CAPTURE | FDP1_OUTPUT },
  320. { V4L2_PIX_FMT_YVYU, { 16, 0, 0 }, 1, 2, 1, 0x47, true, true,
  321. FD1_RWPF_SWAP_LLWD | FD1_RWPF_SWAP_LWRD |
  322. FD1_RWPF_SWAP_WORD | FD1_RWPF_SWAP_BYTE,
  323. FDP1_CAPTURE | FDP1_OUTPUT },
  324. { V4L2_PIX_FMT_YUV444M, { 8, 8, 8 }, 3, 1, 1, 0x4a, false, false,
  325. FD1_RWPF_SWAP_LLWD | FD1_RWPF_SWAP_LWRD |
  326. FD1_RWPF_SWAP_WORD | FD1_RWPF_SWAP_BYTE,
  327. FDP1_CAPTURE | FDP1_OUTPUT },
  328. { V4L2_PIX_FMT_YVU444M, { 8, 8, 8 }, 3, 1, 1, 0x4a, false, true,
  329. FD1_RWPF_SWAP_LLWD | FD1_RWPF_SWAP_LWRD |
  330. FD1_RWPF_SWAP_WORD | FD1_RWPF_SWAP_BYTE,
  331. FDP1_CAPTURE | FDP1_OUTPUT },
  332. { V4L2_PIX_FMT_YUV422M, { 8, 8, 8 }, 3, 2, 1, 0x4b, false, false,
  333. FD1_RWPF_SWAP_LLWD | FD1_RWPF_SWAP_LWRD |
  334. FD1_RWPF_SWAP_WORD | FD1_RWPF_SWAP_BYTE,
  335. FDP1_CAPTURE | FDP1_OUTPUT },
  336. { V4L2_PIX_FMT_YVU422M, { 8, 8, 8 }, 3, 2, 1, 0x4b, false, true,
  337. FD1_RWPF_SWAP_LLWD | FD1_RWPF_SWAP_LWRD |
  338. FD1_RWPF_SWAP_WORD | FD1_RWPF_SWAP_BYTE,
  339. FDP1_CAPTURE | FDP1_OUTPUT },
  340. { V4L2_PIX_FMT_YUV420M, { 8, 8, 8 }, 3, 2, 2, 0x4c, false, false,
  341. FD1_RWPF_SWAP_LLWD | FD1_RWPF_SWAP_LWRD |
  342. FD1_RWPF_SWAP_WORD | FD1_RWPF_SWAP_BYTE,
  343. FDP1_CAPTURE | FDP1_OUTPUT },
  344. { V4L2_PIX_FMT_YVU420M, { 8, 8, 8 }, 3, 2, 2, 0x4c, false, true,
  345. FD1_RWPF_SWAP_LLWD | FD1_RWPF_SWAP_LWRD |
  346. FD1_RWPF_SWAP_WORD | FD1_RWPF_SWAP_BYTE,
  347. FDP1_CAPTURE | FDP1_OUTPUT },
  348. };
  349. static int fdp1_fmt_is_rgb(const struct fdp1_fmt *fmt)
  350. {
  351. return fmt->fmt <= 0x1b; /* Last RGB code */
  352. }
  353. /*
  354. * FDP1 Lookup tables range from 0...255 only
  355. *
  356. * Each table must be less than 256 entries, and all tables
  357. * are padded out to 256 entries by duplicating the last value.
  358. */
  359. static const u8 fdp1_diff_adj[] = {
  360. 0x00, 0x24, 0x43, 0x5e, 0x76, 0x8c, 0x9e, 0xaf,
  361. 0xbd, 0xc9, 0xd4, 0xdd, 0xe4, 0xea, 0xef, 0xf3,
  362. 0xf6, 0xf9, 0xfb, 0xfc, 0xfd, 0xfe, 0xfe, 0xff,
  363. };
  364. static const u8 fdp1_sad_adj[] = {
  365. 0x00, 0x24, 0x43, 0x5e, 0x76, 0x8c, 0x9e, 0xaf,
  366. 0xbd, 0xc9, 0xd4, 0xdd, 0xe4, 0xea, 0xef, 0xf3,
  367. 0xf6, 0xf9, 0xfb, 0xfc, 0xfd, 0xfe, 0xfe, 0xff,
  368. };
  369. static const u8 fdp1_bld_gain[] = {
  370. 0x80,
  371. };
  372. static const u8 fdp1_dif_gain[] = {
  373. 0x80,
  374. };
  375. static const u8 fdp1_mdet[] = {
  376. 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
  377. 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
  378. 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
  379. 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f,
  380. 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27,
  381. 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f,
  382. 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37,
  383. 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f,
  384. 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47,
  385. 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f,
  386. 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57,
  387. 0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f,
  388. 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67,
  389. 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f,
  390. 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77,
  391. 0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f,
  392. 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
  393. 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
  394. 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
  395. 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f,
  396. 0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7,
  397. 0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf,
  398. 0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7,
  399. 0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf,
  400. 0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7,
  401. 0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf,
  402. 0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7,
  403. 0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf,
  404. 0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7,
  405. 0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef,
  406. 0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7,
  407. 0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff
  408. };
  409. /* Per-queue, driver-specific private data */
  410. struct fdp1_q_data {
  411. const struct fdp1_fmt *fmt;
  412. struct v4l2_pix_format_mplane format;
  413. unsigned int vsize;
  414. unsigned int stride_y;
  415. unsigned int stride_c;
  416. };
  417. static const struct fdp1_fmt *fdp1_find_format(u32 pixelformat)
  418. {
  419. const struct fdp1_fmt *fmt;
  420. unsigned int i;
  421. for (i = 0; i < ARRAY_SIZE(fdp1_formats); i++) {
  422. fmt = &fdp1_formats[i];
  423. if (fmt->fourcc == pixelformat)
  424. return fmt;
  425. }
  426. return NULL;
  427. }
  428. enum fdp1_deint_mode {
  429. FDP1_PROGRESSIVE = 0, /* Must be zero when !deinterlacing */
  430. FDP1_ADAPT2D3D,
  431. FDP1_FIXED2D,
  432. FDP1_FIXED3D,
  433. FDP1_PREVFIELD,
  434. FDP1_NEXTFIELD,
  435. };
  436. #define FDP1_DEINT_MODE_USES_NEXT(mode) \
  437. (mode == FDP1_ADAPT2D3D || \
  438. mode == FDP1_FIXED3D || \
  439. mode == FDP1_NEXTFIELD)
  440. #define FDP1_DEINT_MODE_USES_PREV(mode) \
  441. (mode == FDP1_ADAPT2D3D || \
  442. mode == FDP1_FIXED3D || \
  443. mode == FDP1_PREVFIELD)
  444. /*
  445. * FDP1 operates on potentially 3 fields, which are tracked
  446. * from the VB buffers using this context structure.
  447. * Will always be a field or a full frame, never two fields.
  448. */
  449. struct fdp1_field_buffer {
  450. struct vb2_v4l2_buffer *vb;
  451. dma_addr_t addrs[3];
  452. /* Should be NONE:TOP:BOTTOM only */
  453. enum v4l2_field field;
  454. /* Flag to indicate this is the last field in the vb */
  455. bool last_field;
  456. /* Buffer queue lists */
  457. struct list_head list;
  458. };
  459. struct fdp1_buffer {
  460. struct v4l2_m2m_buffer m2m_buf;
  461. struct fdp1_field_buffer fields[2];
  462. unsigned int num_fields;
  463. };
  464. static inline struct fdp1_buffer *to_fdp1_buffer(struct vb2_v4l2_buffer *vb)
  465. {
  466. return container_of(vb, struct fdp1_buffer, m2m_buf.vb);
  467. }
  468. struct fdp1_job {
  469. struct fdp1_field_buffer *previous;
  470. struct fdp1_field_buffer *active;
  471. struct fdp1_field_buffer *next;
  472. struct fdp1_field_buffer *dst;
  473. /* A job can only be on one list at a time */
  474. struct list_head list;
  475. };
  476. struct fdp1_dev {
  477. struct v4l2_device v4l2_dev;
  478. struct video_device vfd;
  479. struct mutex dev_mutex;
  480. spinlock_t irqlock;
  481. spinlock_t device_process_lock;
  482. void __iomem *regs;
  483. unsigned int irq;
  484. struct device *dev;
  485. /* Job Queues */
  486. struct fdp1_job jobs[FDP1_NUMBER_JOBS];
  487. struct list_head free_job_list;
  488. struct list_head queued_job_list;
  489. struct list_head hw_job_list;
  490. unsigned int clk_rate;
  491. struct rcar_fcp_device *fcp;
  492. struct v4l2_m2m_dev *m2m_dev;
  493. };
  494. struct fdp1_ctx {
  495. struct v4l2_fh fh;
  496. struct fdp1_dev *fdp1;
  497. struct v4l2_ctrl_handler hdl;
  498. unsigned int sequence;
  499. /* Processed buffers in this transaction */
  500. u8 num_processed;
  501. /* Transaction length (i.e. how many buffers per transaction) */
  502. u32 translen;
  503. /* Abort requested by m2m */
  504. int aborting;
  505. /* Deinterlace processing mode */
  506. enum fdp1_deint_mode deint_mode;
  507. /*
  508. * Adaptive 2D/3D mode uses a shared mask
  509. * This is allocated at streamon, if the ADAPT2D3D mode
  510. * is requested
  511. */
  512. unsigned int smsk_size;
  513. dma_addr_t smsk_addr[2];
  514. void *smsk_cpu;
  515. /* Capture pipeline, can specify an alpha value
  516. * for supported formats. 0-255 only
  517. */
  518. unsigned char alpha;
  519. /* Source and destination queue data */
  520. struct fdp1_q_data out_q; /* HW Source */
  521. struct fdp1_q_data cap_q; /* HW Destination */
  522. /*
  523. * Field Queues
  524. * Interlaced fields are used on 3 occasions, and tracked in this list.
  525. *
  526. * V4L2 Buffers are tracked inside the fdp1_buffer
  527. * and released when the last 'field' completes
  528. */
  529. struct list_head fields_queue;
  530. unsigned int buffers_queued;
  531. /*
  532. * For de-interlacing we need to track our previous buffer
  533. * while preparing our job lists.
  534. */
  535. struct fdp1_field_buffer *previous;
  536. };
  537. static inline struct fdp1_ctx *fh_to_ctx(struct v4l2_fh *fh)
  538. {
  539. return container_of(fh, struct fdp1_ctx, fh);
  540. }
  541. static struct fdp1_q_data *get_q_data(struct fdp1_ctx *ctx,
  542. enum v4l2_buf_type type)
  543. {
  544. if (V4L2_TYPE_IS_OUTPUT(type))
  545. return &ctx->out_q;
  546. else
  547. return &ctx->cap_q;
  548. }
  549. /*
  550. * list_remove_job: Take the first item off the specified job list
  551. *
  552. * Returns: pointer to a job, or NULL if the list is empty.
  553. */
  554. static struct fdp1_job *list_remove_job(struct fdp1_dev *fdp1,
  555. struct list_head *list)
  556. {
  557. struct fdp1_job *job;
  558. unsigned long flags;
  559. spin_lock_irqsave(&fdp1->irqlock, flags);
  560. job = list_first_entry_or_null(list, struct fdp1_job, list);
  561. if (job)
  562. list_del(&job->list);
  563. spin_unlock_irqrestore(&fdp1->irqlock, flags);
  564. return job;
  565. }
  566. /*
  567. * list_add_job: Add a job to the specified job list
  568. *
  569. * Returns: void - always succeeds
  570. */
  571. static void list_add_job(struct fdp1_dev *fdp1,
  572. struct list_head *list,
  573. struct fdp1_job *job)
  574. {
  575. unsigned long flags;
  576. spin_lock_irqsave(&fdp1->irqlock, flags);
  577. list_add_tail(&job->list, list);
  578. spin_unlock_irqrestore(&fdp1->irqlock, flags);
  579. }
  580. static struct fdp1_job *fdp1_job_alloc(struct fdp1_dev *fdp1)
  581. {
  582. return list_remove_job(fdp1, &fdp1->free_job_list);
  583. }
  584. static void fdp1_job_free(struct fdp1_dev *fdp1, struct fdp1_job *job)
  585. {
  586. /* Ensure that all residue from previous jobs is gone */
  587. memset(job, 0, sizeof(struct fdp1_job));
  588. list_add_job(fdp1, &fdp1->free_job_list, job);
  589. }
  590. static void queue_job(struct fdp1_dev *fdp1, struct fdp1_job *job)
  591. {
  592. list_add_job(fdp1, &fdp1->queued_job_list, job);
  593. }
  594. static struct fdp1_job *get_queued_job(struct fdp1_dev *fdp1)
  595. {
  596. return list_remove_job(fdp1, &fdp1->queued_job_list);
  597. }
  598. static void queue_hw_job(struct fdp1_dev *fdp1, struct fdp1_job *job)
  599. {
  600. list_add_job(fdp1, &fdp1->hw_job_list, job);
  601. }
  602. static struct fdp1_job *get_hw_queued_job(struct fdp1_dev *fdp1)
  603. {
  604. return list_remove_job(fdp1, &fdp1->hw_job_list);
  605. }
  606. /*
  607. * Buffer lists handling
  608. */
  609. static void fdp1_field_complete(struct fdp1_ctx *ctx,
  610. struct fdp1_field_buffer *fbuf)
  611. {
  612. /* job->previous may be on the first field */
  613. if (!fbuf)
  614. return;
  615. if (fbuf->last_field)
  616. v4l2_m2m_buf_done(fbuf->vb, VB2_BUF_STATE_DONE);
  617. }
  618. static void fdp1_queue_field(struct fdp1_ctx *ctx,
  619. struct fdp1_field_buffer *fbuf)
  620. {
  621. unsigned long flags;
  622. spin_lock_irqsave(&ctx->fdp1->irqlock, flags);
  623. list_add_tail(&fbuf->list, &ctx->fields_queue);
  624. spin_unlock_irqrestore(&ctx->fdp1->irqlock, flags);
  625. ctx->buffers_queued++;
  626. }
  627. static struct fdp1_field_buffer *fdp1_dequeue_field(struct fdp1_ctx *ctx)
  628. {
  629. struct fdp1_field_buffer *fbuf;
  630. unsigned long flags;
  631. ctx->buffers_queued--;
  632. spin_lock_irqsave(&ctx->fdp1->irqlock, flags);
  633. fbuf = list_first_entry_or_null(&ctx->fields_queue,
  634. struct fdp1_field_buffer, list);
  635. if (fbuf)
  636. list_del(&fbuf->list);
  637. spin_unlock_irqrestore(&ctx->fdp1->irqlock, flags);
  638. return fbuf;
  639. }
  640. /*
  641. * Return the next field in the queue - or NULL,
  642. * without removing the item from the list
  643. */
  644. static struct fdp1_field_buffer *fdp1_peek_queued_field(struct fdp1_ctx *ctx)
  645. {
  646. struct fdp1_field_buffer *fbuf;
  647. unsigned long flags;
  648. spin_lock_irqsave(&ctx->fdp1->irqlock, flags);
  649. fbuf = list_first_entry_or_null(&ctx->fields_queue,
  650. struct fdp1_field_buffer, list);
  651. spin_unlock_irqrestore(&ctx->fdp1->irqlock, flags);
  652. return fbuf;
  653. }
  654. static u32 fdp1_read(struct fdp1_dev *fdp1, unsigned int reg)
  655. {
  656. u32 value = ioread32(fdp1->regs + reg);
  657. if (debug >= 2)
  658. dprintk(fdp1, "Read 0x%08x from 0x%04x\n", value, reg);
  659. return value;
  660. }
  661. static void fdp1_write(struct fdp1_dev *fdp1, u32 val, unsigned int reg)
  662. {
  663. if (debug >= 2)
  664. dprintk(fdp1, "Write 0x%08x to 0x%04x\n", val, reg);
  665. iowrite32(val, fdp1->regs + reg);
  666. }
  667. /* IPC registers are to be programmed with constant values */
  668. static void fdp1_set_ipc_dli(struct fdp1_ctx *ctx)
  669. {
  670. struct fdp1_dev *fdp1 = ctx->fdp1;
  671. fdp1_write(fdp1, FD1_IPC_SMSK_THRESH_CONST, FD1_IPC_SMSK_THRESH);
  672. fdp1_write(fdp1, FD1_IPC_COMB_DET_CONST, FD1_IPC_COMB_DET);
  673. fdp1_write(fdp1, FD1_IPC_MOTDEC_CONST, FD1_IPC_MOTDEC);
  674. fdp1_write(fdp1, FD1_IPC_DLI_BLEND_CONST, FD1_IPC_DLI_BLEND);
  675. fdp1_write(fdp1, FD1_IPC_DLI_HGAIN_CONST, FD1_IPC_DLI_HGAIN);
  676. fdp1_write(fdp1, FD1_IPC_DLI_SPRS_CONST, FD1_IPC_DLI_SPRS);
  677. fdp1_write(fdp1, FD1_IPC_DLI_ANGLE_CONST, FD1_IPC_DLI_ANGLE);
  678. fdp1_write(fdp1, FD1_IPC_DLI_ISOPIX0_CONST, FD1_IPC_DLI_ISOPIX0);
  679. fdp1_write(fdp1, FD1_IPC_DLI_ISOPIX1_CONST, FD1_IPC_DLI_ISOPIX1);
  680. }
  681. static void fdp1_set_ipc_sensor(struct fdp1_ctx *ctx)
  682. {
  683. struct fdp1_dev *fdp1 = ctx->fdp1;
  684. struct fdp1_q_data *src_q_data = &ctx->out_q;
  685. unsigned int x0, x1;
  686. unsigned int hsize = src_q_data->format.width;
  687. unsigned int vsize = src_q_data->format.height;
  688. x0 = hsize / 3;
  689. x1 = 2 * hsize / 3;
  690. fdp1_write(fdp1, FD1_IPC_SENSOR_TH0_CONST, FD1_IPC_SENSOR_TH0);
  691. fdp1_write(fdp1, FD1_IPC_SENSOR_TH1_CONST, FD1_IPC_SENSOR_TH1);
  692. fdp1_write(fdp1, FD1_IPC_SENSOR_CTL0_CONST, FD1_IPC_SENSOR_CTL0);
  693. fdp1_write(fdp1, FD1_IPC_SENSOR_CTL1_CONST, FD1_IPC_SENSOR_CTL1);
  694. fdp1_write(fdp1, ((hsize - 1) << FD1_IPC_SENSOR_CTL2_X_SHIFT) |
  695. ((vsize - 1) << FD1_IPC_SENSOR_CTL2_Y_SHIFT),
  696. FD1_IPC_SENSOR_CTL2);
  697. fdp1_write(fdp1, (x0 << FD1_IPC_SENSOR_CTL3_0_SHIFT) |
  698. (x1 << FD1_IPC_SENSOR_CTL3_1_SHIFT),
  699. FD1_IPC_SENSOR_CTL3);
  700. }
  701. /*
  702. * fdp1_write_lut: Write a padded LUT to the hw
  703. *
  704. * FDP1 uses constant data for de-interlacing processing,
  705. * with large tables. These hardware tables are all 256 bytes
  706. * long, however they often contain repeated data at the end.
  707. *
  708. * The last byte of the table is written to all remaining entries.
  709. */
  710. static void fdp1_write_lut(struct fdp1_dev *fdp1, const u8 *lut,
  711. unsigned int len, unsigned int base)
  712. {
  713. unsigned int i;
  714. u8 pad;
  715. /* Tables larger than the hw are clipped */
  716. len = min(len, 256u);
  717. for (i = 0; i < len; i++)
  718. fdp1_write(fdp1, lut[i], base + (i*4));
  719. /* Tables are padded with the last entry */
  720. pad = lut[i-1];
  721. for (; i < 256; i++)
  722. fdp1_write(fdp1, pad, base + (i*4));
  723. }
  724. static void fdp1_set_lut(struct fdp1_dev *fdp1)
  725. {
  726. fdp1_write_lut(fdp1, fdp1_diff_adj, ARRAY_SIZE(fdp1_diff_adj),
  727. FD1_LUT_DIF_ADJ);
  728. fdp1_write_lut(fdp1, fdp1_sad_adj, ARRAY_SIZE(fdp1_sad_adj),
  729. FD1_LUT_SAD_ADJ);
  730. fdp1_write_lut(fdp1, fdp1_bld_gain, ARRAY_SIZE(fdp1_bld_gain),
  731. FD1_LUT_BLD_GAIN);
  732. fdp1_write_lut(fdp1, fdp1_dif_gain, ARRAY_SIZE(fdp1_dif_gain),
  733. FD1_LUT_DIF_GAIN);
  734. fdp1_write_lut(fdp1, fdp1_mdet, ARRAY_SIZE(fdp1_mdet),
  735. FD1_LUT_MDET);
  736. }
  737. static void fdp1_configure_rpf(struct fdp1_ctx *ctx,
  738. struct fdp1_job *job)
  739. {
  740. struct fdp1_dev *fdp1 = ctx->fdp1;
  741. u32 picture_size;
  742. u32 pstride;
  743. u32 format;
  744. u32 smsk_addr;
  745. struct fdp1_q_data *q_data = &ctx->out_q;
  746. /* Picture size is common to Source and Destination frames */
  747. picture_size = (q_data->format.width << FD1_RPF_SIZE_H_SHIFT)
  748. | (q_data->vsize << FD1_RPF_SIZE_V_SHIFT);
  749. /* Strides */
  750. pstride = q_data->stride_y << FD1_RPF_PSTRIDE_Y_SHIFT;
  751. if (q_data->format.num_planes > 1)
  752. pstride |= q_data->stride_c << FD1_RPF_PSTRIDE_C_SHIFT;
  753. /* Format control */
  754. format = q_data->fmt->fmt;
  755. if (q_data->fmt->swap_yc)
  756. format |= FD1_RPF_FORMAT_RSPYCS;
  757. if (q_data->fmt->swap_uv)
  758. format |= FD1_RPF_FORMAT_RSPUVS;
  759. if (job->active->field == V4L2_FIELD_BOTTOM) {
  760. format |= FD1_RPF_FORMAT_CF; /* Set for Bottom field */
  761. smsk_addr = ctx->smsk_addr[0];
  762. } else {
  763. smsk_addr = ctx->smsk_addr[1];
  764. }
  765. /* Deint mode is non-zero when deinterlacing */
  766. if (ctx->deint_mode)
  767. format |= FD1_RPF_FORMAT_CIPM;
  768. fdp1_write(fdp1, format, FD1_RPF_FORMAT);
  769. fdp1_write(fdp1, q_data->fmt->swap, FD1_RPF_SWAP);
  770. fdp1_write(fdp1, picture_size, FD1_RPF_SIZE);
  771. fdp1_write(fdp1, pstride, FD1_RPF_PSTRIDE);
  772. fdp1_write(fdp1, smsk_addr, FD1_RPF_SMSK_ADDR);
  773. /* Previous Field Channel (CH0) */
  774. if (job->previous)
  775. fdp1_write(fdp1, job->previous->addrs[0], FD1_RPF0_ADDR_Y);
  776. /* Current Field Channel (CH1) */
  777. fdp1_write(fdp1, job->active->addrs[0], FD1_RPF1_ADDR_Y);
  778. fdp1_write(fdp1, job->active->addrs[1], FD1_RPF1_ADDR_C0);
  779. fdp1_write(fdp1, job->active->addrs[2], FD1_RPF1_ADDR_C1);
  780. /* Next Field Channel (CH2) */
  781. if (job->next)
  782. fdp1_write(fdp1, job->next->addrs[0], FD1_RPF2_ADDR_Y);
  783. }
  784. static void fdp1_configure_wpf(struct fdp1_ctx *ctx,
  785. struct fdp1_job *job)
  786. {
  787. struct fdp1_dev *fdp1 = ctx->fdp1;
  788. struct fdp1_q_data *src_q_data = &ctx->out_q;
  789. struct fdp1_q_data *q_data = &ctx->cap_q;
  790. u32 pstride;
  791. u32 format;
  792. u32 swap;
  793. u32 rndctl;
  794. pstride = q_data->format.plane_fmt[0].bytesperline
  795. << FD1_WPF_PSTRIDE_Y_SHIFT;
  796. if (q_data->format.num_planes > 1)
  797. pstride |= q_data->format.plane_fmt[1].bytesperline
  798. << FD1_WPF_PSTRIDE_C_SHIFT;
  799. format = q_data->fmt->fmt; /* Output Format Code */
  800. if (q_data->fmt->swap_yc)
  801. format |= FD1_WPF_FORMAT_WSPYCS;
  802. if (q_data->fmt->swap_uv)
  803. format |= FD1_WPF_FORMAT_WSPUVS;
  804. if (fdp1_fmt_is_rgb(q_data->fmt)) {
  805. /* Enable Colour Space conversion */
  806. format |= FD1_WPF_FORMAT_CSC;
  807. /* Set WRTM */
  808. if (src_q_data->format.ycbcr_enc == V4L2_YCBCR_ENC_709)
  809. format |= FD1_WPF_FORMAT_WRTM_709_16;
  810. else if (src_q_data->format.quantization ==
  811. V4L2_QUANTIZATION_FULL_RANGE)
  812. format |= FD1_WPF_FORMAT_WRTM_601_0;
  813. else
  814. format |= FD1_WPF_FORMAT_WRTM_601_16;
  815. }
  816. /* Set an alpha value into the Pad Value */
  817. format |= ctx->alpha << FD1_WPF_FORMAT_PDV_SHIFT;
  818. /* Determine picture rounding and clipping */
  819. rndctl = FD1_WPF_RNDCTL_CBRM; /* Rounding Off */
  820. rndctl |= FD1_WPF_RNDCTL_CLMD_NOCLIP;
  821. /* WPF Swap needs both ISWAP and OSWAP setting */
  822. swap = q_data->fmt->swap << FD1_WPF_SWAP_OSWAP_SHIFT;
  823. swap |= src_q_data->fmt->swap << FD1_WPF_SWAP_SSWAP_SHIFT;
  824. fdp1_write(fdp1, format, FD1_WPF_FORMAT);
  825. fdp1_write(fdp1, rndctl, FD1_WPF_RNDCTL);
  826. fdp1_write(fdp1, swap, FD1_WPF_SWAP);
  827. fdp1_write(fdp1, pstride, FD1_WPF_PSTRIDE);
  828. fdp1_write(fdp1, job->dst->addrs[0], FD1_WPF_ADDR_Y);
  829. fdp1_write(fdp1, job->dst->addrs[1], FD1_WPF_ADDR_C0);
  830. fdp1_write(fdp1, job->dst->addrs[2], FD1_WPF_ADDR_C1);
  831. }
  832. static void fdp1_configure_deint_mode(struct fdp1_ctx *ctx,
  833. struct fdp1_job *job)
  834. {
  835. struct fdp1_dev *fdp1 = ctx->fdp1;
  836. u32 opmode = FD1_CTL_OPMODE_VIMD_NOINTERRUPT;
  837. u32 ipcmode = FD1_IPC_MODE_DLI; /* Always set */
  838. u32 channels = FD1_CTL_CHACT_WR | FD1_CTL_CHACT_RD1; /* Always on */
  839. /* De-interlacing Mode */
  840. switch (ctx->deint_mode) {
  841. default:
  842. case FDP1_PROGRESSIVE:
  843. dprintk(fdp1, "Progressive Mode\n");
  844. opmode |= FD1_CTL_OPMODE_PRG;
  845. ipcmode |= FD1_IPC_MODE_DIM_FIXED2D;
  846. break;
  847. case FDP1_ADAPT2D3D:
  848. dprintk(fdp1, "Adapt2D3D Mode\n");
  849. if (ctx->sequence == 0 || ctx->aborting)
  850. ipcmode |= FD1_IPC_MODE_DIM_FIXED2D;
  851. else
  852. ipcmode |= FD1_IPC_MODE_DIM_ADAPT2D3D;
  853. if (ctx->sequence > 1) {
  854. channels |= FD1_CTL_CHACT_SMW;
  855. channels |= FD1_CTL_CHACT_RD0 | FD1_CTL_CHACT_RD2;
  856. }
  857. if (ctx->sequence > 2)
  858. channels |= FD1_CTL_CHACT_SMR;
  859. break;
  860. case FDP1_FIXED3D:
  861. dprintk(fdp1, "Fixed 3D Mode\n");
  862. ipcmode |= FD1_IPC_MODE_DIM_FIXED3D;
  863. /* Except for first and last frame, enable all channels */
  864. if (!(ctx->sequence == 0 || ctx->aborting))
  865. channels |= FD1_CTL_CHACT_RD0 | FD1_CTL_CHACT_RD2;
  866. break;
  867. case FDP1_FIXED2D:
  868. dprintk(fdp1, "Fixed 2D Mode\n");
  869. ipcmode |= FD1_IPC_MODE_DIM_FIXED2D;
  870. /* No extra channels enabled */
  871. break;
  872. case FDP1_PREVFIELD:
  873. dprintk(fdp1, "Previous Field Mode\n");
  874. ipcmode |= FD1_IPC_MODE_DIM_PREVFIELD;
  875. channels |= FD1_CTL_CHACT_RD0; /* Previous */
  876. break;
  877. case FDP1_NEXTFIELD:
  878. dprintk(fdp1, "Next Field Mode\n");
  879. ipcmode |= FD1_IPC_MODE_DIM_NEXTFIELD;
  880. channels |= FD1_CTL_CHACT_RD2; /* Next */
  881. break;
  882. }
  883. fdp1_write(fdp1, channels, FD1_CTL_CHACT);
  884. fdp1_write(fdp1, opmode, FD1_CTL_OPMODE);
  885. fdp1_write(fdp1, ipcmode, FD1_IPC_MODE);
  886. }
  887. /*
  888. * fdp1_device_process() - Run the hardware
  889. *
  890. * Configure and start the hardware to generate a single frame
  891. * of output given our input parameters.
  892. */
  893. static int fdp1_device_process(struct fdp1_ctx *ctx)
  894. {
  895. struct fdp1_dev *fdp1 = ctx->fdp1;
  896. struct fdp1_job *job;
  897. unsigned long flags;
  898. spin_lock_irqsave(&fdp1->device_process_lock, flags);
  899. /* Get a job to process */
  900. job = get_queued_job(fdp1);
  901. if (!job) {
  902. /*
  903. * VINT can call us to see if we can queue another job.
  904. * If we have no work to do, we simply return.
  905. */
  906. spin_unlock_irqrestore(&fdp1->device_process_lock, flags);
  907. return 0;
  908. }
  909. /* First Frame only? ... */
  910. fdp1_write(fdp1, FD1_CTL_CLKCTRL_CSTP_N, FD1_CTL_CLKCTRL);
  911. /* Set the mode, and configuration */
  912. fdp1_configure_deint_mode(ctx, job);
  913. /* DLI Static Configuration */
  914. fdp1_set_ipc_dli(ctx);
  915. /* Sensor Configuration */
  916. fdp1_set_ipc_sensor(ctx);
  917. /* Setup the source picture */
  918. fdp1_configure_rpf(ctx, job);
  919. /* Setup the destination picture */
  920. fdp1_configure_wpf(ctx, job);
  921. /* Line Memory Pixel Number Register for linear access */
  922. fdp1_write(fdp1, FD1_IPC_LMEM_LINEAR, FD1_IPC_LMEM);
  923. /* Enable Interrupts */
  924. fdp1_write(fdp1, FD1_CTL_IRQ_MASK, FD1_CTL_IRQENB);
  925. /* Finally, the Immediate Registers */
  926. /* This job is now in the HW queue */
  927. queue_hw_job(fdp1, job);
  928. /* Start the command */
  929. fdp1_write(fdp1, FD1_CTL_CMD_STRCMD, FD1_CTL_CMD);
  930. /* Registers will update to HW at next VINT */
  931. fdp1_write(fdp1, FD1_CTL_REGEND_REGEND, FD1_CTL_REGEND);
  932. /* Enable VINT Generator */
  933. fdp1_write(fdp1, FD1_CTL_SGCMD_SGEN, FD1_CTL_SGCMD);
  934. spin_unlock_irqrestore(&fdp1->device_process_lock, flags);
  935. return 0;
  936. }
  937. /*
  938. * mem2mem callbacks
  939. */
  940. /**
  941. * job_ready() - check whether an instance is ready to be scheduled to run
  942. */
  943. static int fdp1_m2m_job_ready(void *priv)
  944. {
  945. struct fdp1_ctx *ctx = priv;
  946. struct fdp1_q_data *src_q_data = &ctx->out_q;
  947. int srcbufs = 1;
  948. int dstbufs = 1;
  949. dprintk(ctx->fdp1, "+ Src: %d : Dst: %d\n",
  950. v4l2_m2m_num_src_bufs_ready(ctx->fh.m2m_ctx),
  951. v4l2_m2m_num_dst_bufs_ready(ctx->fh.m2m_ctx));
  952. /* One output buffer is required for each field */
  953. if (V4L2_FIELD_HAS_BOTH(src_q_data->format.field))
  954. dstbufs = 2;
  955. if (v4l2_m2m_num_src_bufs_ready(ctx->fh.m2m_ctx) < srcbufs
  956. || v4l2_m2m_num_dst_bufs_ready(ctx->fh.m2m_ctx) < dstbufs) {
  957. dprintk(ctx->fdp1, "Not enough buffers available\n");
  958. return 0;
  959. }
  960. return 1;
  961. }
  962. static void fdp1_m2m_job_abort(void *priv)
  963. {
  964. struct fdp1_ctx *ctx = priv;
  965. dprintk(ctx->fdp1, "+\n");
  966. /* Will cancel the transaction in the next interrupt handler */
  967. ctx->aborting = 1;
  968. /* Immediate abort sequence */
  969. fdp1_write(ctx->fdp1, 0, FD1_CTL_SGCMD);
  970. fdp1_write(ctx->fdp1, FD1_CTL_SRESET_SRST, FD1_CTL_SRESET);
  971. }
  972. /*
  973. * fdp1_prepare_job: Prepare and queue a new job for a single action of work
  974. *
  975. * Prepare the next field, (or frame in progressive) and an output
  976. * buffer for the hardware to perform a single operation.
  977. */
  978. static struct fdp1_job *fdp1_prepare_job(struct fdp1_ctx *ctx)
  979. {
  980. struct vb2_v4l2_buffer *vbuf;
  981. struct fdp1_buffer *fbuf;
  982. struct fdp1_dev *fdp1 = ctx->fdp1;
  983. struct fdp1_job *job;
  984. unsigned int buffers_required = 1;
  985. dprintk(fdp1, "+\n");
  986. if (FDP1_DEINT_MODE_USES_NEXT(ctx->deint_mode))
  987. buffers_required = 2;
  988. if (ctx->buffers_queued < buffers_required)
  989. return NULL;
  990. job = fdp1_job_alloc(fdp1);
  991. if (!job) {
  992. dprintk(fdp1, "No free jobs currently available\n");
  993. return NULL;
  994. }
  995. job->active = fdp1_dequeue_field(ctx);
  996. if (!job->active) {
  997. /* Buffer check should prevent this ever happening */
  998. dprintk(fdp1, "No input buffers currently available\n");
  999. fdp1_job_free(fdp1, job);
  1000. return NULL;
  1001. }
  1002. dprintk(fdp1, "+ Buffer en-route...\n");
  1003. /* Source buffers have been prepared on our buffer_queue
  1004. * Prepare our Output buffer
  1005. */
  1006. vbuf = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);
  1007. fbuf = to_fdp1_buffer(vbuf);
  1008. job->dst = &fbuf->fields[0];
  1009. job->active->vb->sequence = ctx->sequence;
  1010. job->dst->vb->sequence = ctx->sequence;
  1011. ctx->sequence++;
  1012. if (FDP1_DEINT_MODE_USES_PREV(ctx->deint_mode)) {
  1013. job->previous = ctx->previous;
  1014. /* Active buffer becomes the next job's previous buffer */
  1015. ctx->previous = job->active;
  1016. }
  1017. if (FDP1_DEINT_MODE_USES_NEXT(ctx->deint_mode)) {
  1018. /* Must be called after 'active' is dequeued */
  1019. job->next = fdp1_peek_queued_field(ctx);
  1020. }
  1021. /* Transfer timestamps and flags from src->dst */
  1022. job->dst->vb->vb2_buf.timestamp = job->active->vb->vb2_buf.timestamp;
  1023. job->dst->vb->flags = job->active->vb->flags &
  1024. V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
  1025. /* Ideally, the frame-end function will just 'check' to see
  1026. * if there are more jobs instead
  1027. */
  1028. ctx->translen++;
  1029. /* Finally, Put this job on the processing queue */
  1030. queue_job(fdp1, job);
  1031. dprintk(fdp1, "Job Queued translen = %d\n", ctx->translen);
  1032. return job;
  1033. }
  1034. /* fdp1_m2m_device_run() - prepares and starts the device for an M2M task
  1035. *
  1036. * A single input buffer is taken and serialised into our fdp1_buffer
  1037. * queue. The queue is then processed to create as many jobs as possible
  1038. * from our available input.
  1039. */
  1040. static void fdp1_m2m_device_run(void *priv)
  1041. {
  1042. struct fdp1_ctx *ctx = priv;
  1043. struct fdp1_dev *fdp1 = ctx->fdp1;
  1044. struct vb2_v4l2_buffer *src_vb;
  1045. struct fdp1_buffer *buf;
  1046. unsigned int i;
  1047. dprintk(fdp1, "+\n");
  1048. ctx->translen = 0;
  1049. /* Get our incoming buffer of either one or two fields, or one frame */
  1050. src_vb = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
  1051. buf = to_fdp1_buffer(src_vb);
  1052. for (i = 0; i < buf->num_fields; i++) {
  1053. struct fdp1_field_buffer *fbuf = &buf->fields[i];
  1054. fdp1_queue_field(ctx, fbuf);
  1055. dprintk(fdp1, "Queued Buffer [%d] last_field:%d\n",
  1056. i, fbuf->last_field);
  1057. }
  1058. /* Queue as many jobs as our data provides for */
  1059. while (fdp1_prepare_job(ctx))
  1060. ;
  1061. if (ctx->translen == 0) {
  1062. dprintk(fdp1, "No jobs were processed. M2M action complete\n");
  1063. v4l2_m2m_job_finish(fdp1->m2m_dev, ctx->fh.m2m_ctx);
  1064. return;
  1065. }
  1066. /* Kick the job processing action */
  1067. fdp1_device_process(ctx);
  1068. }
  1069. /*
  1070. * device_frame_end:
  1071. *
  1072. * Handles the M2M level after a buffer completion event.
  1073. */
  1074. static void device_frame_end(struct fdp1_dev *fdp1,
  1075. enum vb2_buffer_state state)
  1076. {
  1077. struct fdp1_ctx *ctx;
  1078. unsigned long flags;
  1079. struct fdp1_job *job = get_hw_queued_job(fdp1);
  1080. dprintk(fdp1, "+\n");
  1081. ctx = v4l2_m2m_get_curr_priv(fdp1->m2m_dev);
  1082. if (ctx == NULL) {
  1083. v4l2_err(&fdp1->v4l2_dev,
  1084. "Instance released before the end of transaction\n");
  1085. return;
  1086. }
  1087. ctx->num_processed++;
  1088. /*
  1089. * fdp1_field_complete will call buf_done only when the last vb2_buffer
  1090. * reference is complete
  1091. */
  1092. if (FDP1_DEINT_MODE_USES_PREV(ctx->deint_mode))
  1093. fdp1_field_complete(ctx, job->previous);
  1094. else
  1095. fdp1_field_complete(ctx, job->active);
  1096. spin_lock_irqsave(&fdp1->irqlock, flags);
  1097. v4l2_m2m_buf_done(job->dst->vb, state);
  1098. job->dst = NULL;
  1099. spin_unlock_irqrestore(&fdp1->irqlock, flags);
  1100. /* Move this job back to the free job list */
  1101. fdp1_job_free(fdp1, job);
  1102. dprintk(fdp1, "curr_ctx->num_processed %d curr_ctx->translen %d\n",
  1103. ctx->num_processed, ctx->translen);
  1104. if (ctx->num_processed == ctx->translen ||
  1105. ctx->aborting) {
  1106. dprintk(ctx->fdp1, "Finishing transaction\n");
  1107. ctx->num_processed = 0;
  1108. v4l2_m2m_job_finish(fdp1->m2m_dev, ctx->fh.m2m_ctx);
  1109. } else {
  1110. /*
  1111. * For pipelined performance support, this would
  1112. * be called from a VINT handler
  1113. */
  1114. fdp1_device_process(ctx);
  1115. }
  1116. }
  1117. /*
  1118. * video ioctls
  1119. */
  1120. static int fdp1_vidioc_querycap(struct file *file, void *priv,
  1121. struct v4l2_capability *cap)
  1122. {
  1123. strlcpy(cap->driver, DRIVER_NAME, sizeof(cap->driver));
  1124. strlcpy(cap->card, DRIVER_NAME, sizeof(cap->card));
  1125. snprintf(cap->bus_info, sizeof(cap->bus_info),
  1126. "platform:%s", DRIVER_NAME);
  1127. return 0;
  1128. }
  1129. static int fdp1_enum_fmt(struct v4l2_fmtdesc *f, u32 type)
  1130. {
  1131. unsigned int i, num;
  1132. num = 0;
  1133. for (i = 0; i < ARRAY_SIZE(fdp1_formats); ++i) {
  1134. if (fdp1_formats[i].types & type) {
  1135. if (num == f->index)
  1136. break;
  1137. ++num;
  1138. }
  1139. }
  1140. /* Format not found */
  1141. if (i >= ARRAY_SIZE(fdp1_formats))
  1142. return -EINVAL;
  1143. /* Format found */
  1144. f->pixelformat = fdp1_formats[i].fourcc;
  1145. return 0;
  1146. }
  1147. static int fdp1_enum_fmt_vid_cap(struct file *file, void *priv,
  1148. struct v4l2_fmtdesc *f)
  1149. {
  1150. return fdp1_enum_fmt(f, FDP1_CAPTURE);
  1151. }
  1152. static int fdp1_enum_fmt_vid_out(struct file *file, void *priv,
  1153. struct v4l2_fmtdesc *f)
  1154. {
  1155. return fdp1_enum_fmt(f, FDP1_OUTPUT);
  1156. }
  1157. static int fdp1_g_fmt(struct file *file, void *priv, struct v4l2_format *f)
  1158. {
  1159. struct fdp1_q_data *q_data;
  1160. struct fdp1_ctx *ctx = fh_to_ctx(priv);
  1161. if (!v4l2_m2m_get_vq(ctx->fh.m2m_ctx, f->type))
  1162. return -EINVAL;
  1163. q_data = get_q_data(ctx, f->type);
  1164. f->fmt.pix_mp = q_data->format;
  1165. return 0;
  1166. }
  1167. static void fdp1_compute_stride(struct v4l2_pix_format_mplane *pix,
  1168. const struct fdp1_fmt *fmt)
  1169. {
  1170. unsigned int i;
  1171. /* Compute and clamp the stride and image size. */
  1172. for (i = 0; i < min_t(unsigned int, fmt->num_planes, 2U); ++i) {
  1173. unsigned int hsub = i > 0 ? fmt->hsub : 1;
  1174. unsigned int vsub = i > 0 ? fmt->vsub : 1;
  1175. /* From VSP : TODO: Confirm alignment limits for FDP1 */
  1176. unsigned int align = 128;
  1177. unsigned int bpl;
  1178. bpl = clamp_t(unsigned int, pix->plane_fmt[i].bytesperline,
  1179. pix->width / hsub * fmt->bpp[i] / 8,
  1180. round_down(FDP1_MAX_STRIDE, align));
  1181. pix->plane_fmt[i].bytesperline = round_up(bpl, align);
  1182. pix->plane_fmt[i].sizeimage = pix->plane_fmt[i].bytesperline
  1183. * pix->height / vsub;
  1184. memset(pix->plane_fmt[i].reserved, 0,
  1185. sizeof(pix->plane_fmt[i].reserved));
  1186. }
  1187. if (fmt->num_planes == 3) {
  1188. /* The two chroma planes must have the same stride. */
  1189. pix->plane_fmt[2].bytesperline = pix->plane_fmt[1].bytesperline;
  1190. pix->plane_fmt[2].sizeimage = pix->plane_fmt[1].sizeimage;
  1191. memset(pix->plane_fmt[2].reserved, 0,
  1192. sizeof(pix->plane_fmt[2].reserved));
  1193. }
  1194. }
  1195. static void fdp1_try_fmt_output(struct fdp1_ctx *ctx,
  1196. const struct fdp1_fmt **fmtinfo,
  1197. struct v4l2_pix_format_mplane *pix)
  1198. {
  1199. const struct fdp1_fmt *fmt;
  1200. unsigned int width;
  1201. unsigned int height;
  1202. /* Validate the pixel format to ensure the output queue supports it. */
  1203. fmt = fdp1_find_format(pix->pixelformat);
  1204. if (!fmt || !(fmt->types & FDP1_OUTPUT))
  1205. fmt = fdp1_find_format(V4L2_PIX_FMT_YUYV);
  1206. if (fmtinfo)
  1207. *fmtinfo = fmt;
  1208. pix->pixelformat = fmt->fourcc;
  1209. pix->num_planes = fmt->num_planes;
  1210. /*
  1211. * Progressive video and all interlaced field orders are acceptable.
  1212. * Default to V4L2_FIELD_INTERLACED.
  1213. */
  1214. if (pix->field != V4L2_FIELD_NONE &&
  1215. pix->field != V4L2_FIELD_ALTERNATE &&
  1216. !V4L2_FIELD_HAS_BOTH(pix->field))
  1217. pix->field = V4L2_FIELD_INTERLACED;
  1218. /*
  1219. * The deinterlacer doesn't care about the colorspace, accept all values
  1220. * and default to V4L2_COLORSPACE_SMPTE170M. The YUV to RGB conversion
  1221. * at the output of the deinterlacer supports a subset of encodings and
  1222. * quantization methods and will only be available when the colorspace
  1223. * allows it.
  1224. */
  1225. if (pix->colorspace == V4L2_COLORSPACE_DEFAULT)
  1226. pix->colorspace = V4L2_COLORSPACE_SMPTE170M;
  1227. /*
  1228. * Align the width and height for YUV 4:2:2 and 4:2:0 formats and clamp
  1229. * them to the supported frame size range. The height boundary are
  1230. * related to the full frame, divide them by two when the format passes
  1231. * fields in separate buffers.
  1232. */
  1233. width = round_down(pix->width, fmt->hsub);
  1234. pix->width = clamp(width, FDP1_MIN_W, FDP1_MAX_W);
  1235. height = round_down(pix->height, fmt->vsub);
  1236. if (pix->field == V4L2_FIELD_ALTERNATE)
  1237. pix->height = clamp(height, FDP1_MIN_H / 2, FDP1_MAX_H / 2);
  1238. else
  1239. pix->height = clamp(height, FDP1_MIN_H, FDP1_MAX_H);
  1240. fdp1_compute_stride(pix, fmt);
  1241. }
  1242. static void fdp1_try_fmt_capture(struct fdp1_ctx *ctx,
  1243. const struct fdp1_fmt **fmtinfo,
  1244. struct v4l2_pix_format_mplane *pix)
  1245. {
  1246. struct fdp1_q_data *src_data = &ctx->out_q;
  1247. enum v4l2_colorspace colorspace;
  1248. enum v4l2_ycbcr_encoding ycbcr_enc;
  1249. enum v4l2_quantization quantization;
  1250. const struct fdp1_fmt *fmt;
  1251. bool allow_rgb;
  1252. /*
  1253. * Validate the pixel format. We can only accept RGB output formats if
  1254. * the input encoding and quantization are compatible with the format
  1255. * conversions supported by the hardware. The supported combinations are
  1256. *
  1257. * V4L2_YCBCR_ENC_601 + V4L2_QUANTIZATION_LIM_RANGE
  1258. * V4L2_YCBCR_ENC_601 + V4L2_QUANTIZATION_FULL_RANGE
  1259. * V4L2_YCBCR_ENC_709 + V4L2_QUANTIZATION_LIM_RANGE
  1260. */
  1261. colorspace = src_data->format.colorspace;
  1262. ycbcr_enc = src_data->format.ycbcr_enc;
  1263. if (ycbcr_enc == V4L2_YCBCR_ENC_DEFAULT)
  1264. ycbcr_enc = V4L2_MAP_YCBCR_ENC_DEFAULT(colorspace);
  1265. quantization = src_data->format.quantization;
  1266. if (quantization == V4L2_QUANTIZATION_DEFAULT)
  1267. quantization = V4L2_MAP_QUANTIZATION_DEFAULT(false, colorspace,
  1268. ycbcr_enc);
  1269. allow_rgb = ycbcr_enc == V4L2_YCBCR_ENC_601 ||
  1270. (ycbcr_enc == V4L2_YCBCR_ENC_709 &&
  1271. quantization == V4L2_QUANTIZATION_LIM_RANGE);
  1272. fmt = fdp1_find_format(pix->pixelformat);
  1273. if (!fmt || (!allow_rgb && fdp1_fmt_is_rgb(fmt)))
  1274. fmt = fdp1_find_format(V4L2_PIX_FMT_YUYV);
  1275. if (fmtinfo)
  1276. *fmtinfo = fmt;
  1277. pix->pixelformat = fmt->fourcc;
  1278. pix->num_planes = fmt->num_planes;
  1279. pix->field = V4L2_FIELD_NONE;
  1280. /*
  1281. * The colorspace on the capture queue is copied from the output queue
  1282. * as the hardware can't change the colorspace. It can convert YCbCr to
  1283. * RGB though, in which case the encoding and quantization are set to
  1284. * default values as anything else wouldn't make sense.
  1285. */
  1286. pix->colorspace = src_data->format.colorspace;
  1287. pix->xfer_func = src_data->format.xfer_func;
  1288. if (fdp1_fmt_is_rgb(fmt)) {
  1289. pix->ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT;
  1290. pix->quantization = V4L2_QUANTIZATION_DEFAULT;
  1291. } else {
  1292. pix->ycbcr_enc = src_data->format.ycbcr_enc;
  1293. pix->quantization = src_data->format.quantization;
  1294. }
  1295. /*
  1296. * The frame width is identical to the output queue, and the height is
  1297. * either doubled or identical depending on whether the output queue
  1298. * field order contains one or two fields per frame.
  1299. */
  1300. pix->width = src_data->format.width;
  1301. if (src_data->format.field == V4L2_FIELD_ALTERNATE)
  1302. pix->height = 2 * src_data->format.height;
  1303. else
  1304. pix->height = src_data->format.height;
  1305. fdp1_compute_stride(pix, fmt);
  1306. }
  1307. static int fdp1_try_fmt(struct file *file, void *priv, struct v4l2_format *f)
  1308. {
  1309. struct fdp1_ctx *ctx = fh_to_ctx(priv);
  1310. if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
  1311. fdp1_try_fmt_output(ctx, NULL, &f->fmt.pix_mp);
  1312. else
  1313. fdp1_try_fmt_capture(ctx, NULL, &f->fmt.pix_mp);
  1314. dprintk(ctx->fdp1, "Try %s format: %4s (0x%08x) %ux%u field %u\n",
  1315. V4L2_TYPE_IS_OUTPUT(f->type) ? "output" : "capture",
  1316. (char *)&f->fmt.pix_mp.pixelformat, f->fmt.pix_mp.pixelformat,
  1317. f->fmt.pix_mp.width, f->fmt.pix_mp.height, f->fmt.pix_mp.field);
  1318. return 0;
  1319. }
  1320. static void fdp1_set_format(struct fdp1_ctx *ctx,
  1321. struct v4l2_pix_format_mplane *pix,
  1322. enum v4l2_buf_type type)
  1323. {
  1324. struct fdp1_q_data *q_data = get_q_data(ctx, type);
  1325. const struct fdp1_fmt *fmtinfo;
  1326. if (type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
  1327. fdp1_try_fmt_output(ctx, &fmtinfo, pix);
  1328. else
  1329. fdp1_try_fmt_capture(ctx, &fmtinfo, pix);
  1330. q_data->fmt = fmtinfo;
  1331. q_data->format = *pix;
  1332. q_data->vsize = pix->height;
  1333. if (pix->field != V4L2_FIELD_NONE)
  1334. q_data->vsize /= 2;
  1335. q_data->stride_y = pix->plane_fmt[0].bytesperline;
  1336. q_data->stride_c = pix->plane_fmt[1].bytesperline;
  1337. /* Adjust strides for interleaved buffers */
  1338. if (pix->field == V4L2_FIELD_INTERLACED ||
  1339. pix->field == V4L2_FIELD_INTERLACED_TB ||
  1340. pix->field == V4L2_FIELD_INTERLACED_BT) {
  1341. q_data->stride_y *= 2;
  1342. q_data->stride_c *= 2;
  1343. }
  1344. /* Propagate the format from the output node to the capture node. */
  1345. if (type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
  1346. struct fdp1_q_data *dst_data = &ctx->cap_q;
  1347. /*
  1348. * Copy the format, clear the per-plane bytes per line and image
  1349. * size, override the field and double the height if needed.
  1350. */
  1351. dst_data->format = q_data->format;
  1352. memset(dst_data->format.plane_fmt, 0,
  1353. sizeof(dst_data->format.plane_fmt));
  1354. dst_data->format.field = V4L2_FIELD_NONE;
  1355. if (pix->field == V4L2_FIELD_ALTERNATE)
  1356. dst_data->format.height *= 2;
  1357. fdp1_try_fmt_capture(ctx, &dst_data->fmt, &dst_data->format);
  1358. dst_data->vsize = dst_data->format.height;
  1359. dst_data->stride_y = dst_data->format.plane_fmt[0].bytesperline;
  1360. dst_data->stride_c = dst_data->format.plane_fmt[1].bytesperline;
  1361. }
  1362. }
  1363. static int fdp1_s_fmt(struct file *file, void *priv, struct v4l2_format *f)
  1364. {
  1365. struct fdp1_ctx *ctx = fh_to_ctx(priv);
  1366. struct v4l2_m2m_ctx *m2m_ctx = ctx->fh.m2m_ctx;
  1367. struct vb2_queue *vq = v4l2_m2m_get_vq(m2m_ctx, f->type);
  1368. if (vb2_is_busy(vq)) {
  1369. v4l2_err(&ctx->fdp1->v4l2_dev, "%s queue busy\n", __func__);
  1370. return -EBUSY;
  1371. }
  1372. fdp1_set_format(ctx, &f->fmt.pix_mp, f->type);
  1373. dprintk(ctx->fdp1, "Set %s format: %4s (0x%08x) %ux%u field %u\n",
  1374. V4L2_TYPE_IS_OUTPUT(f->type) ? "output" : "capture",
  1375. (char *)&f->fmt.pix_mp.pixelformat, f->fmt.pix_mp.pixelformat,
  1376. f->fmt.pix_mp.width, f->fmt.pix_mp.height, f->fmt.pix_mp.field);
  1377. return 0;
  1378. }
  1379. static int fdp1_g_ctrl(struct v4l2_ctrl *ctrl)
  1380. {
  1381. struct fdp1_ctx *ctx =
  1382. container_of(ctrl->handler, struct fdp1_ctx, hdl);
  1383. struct fdp1_q_data *src_q_data = &ctx->out_q;
  1384. switch (ctrl->id) {
  1385. case V4L2_CID_MIN_BUFFERS_FOR_CAPTURE:
  1386. if (V4L2_FIELD_HAS_BOTH(src_q_data->format.field))
  1387. ctrl->val = 2;
  1388. else
  1389. ctrl->val = 1;
  1390. return 0;
  1391. }
  1392. return 1;
  1393. }
  1394. static int fdp1_s_ctrl(struct v4l2_ctrl *ctrl)
  1395. {
  1396. struct fdp1_ctx *ctx =
  1397. container_of(ctrl->handler, struct fdp1_ctx, hdl);
  1398. switch (ctrl->id) {
  1399. case V4L2_CID_ALPHA_COMPONENT:
  1400. ctx->alpha = ctrl->val;
  1401. break;
  1402. case V4L2_CID_DEINTERLACING_MODE:
  1403. ctx->deint_mode = ctrl->val;
  1404. break;
  1405. }
  1406. return 0;
  1407. }
  1408. static const struct v4l2_ctrl_ops fdp1_ctrl_ops = {
  1409. .s_ctrl = fdp1_s_ctrl,
  1410. .g_volatile_ctrl = fdp1_g_ctrl,
  1411. };
  1412. static const char * const fdp1_ctrl_deint_menu[] = {
  1413. "Progressive",
  1414. "Adaptive 2D/3D",
  1415. "Fixed 2D",
  1416. "Fixed 3D",
  1417. "Previous field",
  1418. "Next field",
  1419. NULL
  1420. };
  1421. static const struct v4l2_ioctl_ops fdp1_ioctl_ops = {
  1422. .vidioc_querycap = fdp1_vidioc_querycap,
  1423. .vidioc_enum_fmt_vid_cap_mplane = fdp1_enum_fmt_vid_cap,
  1424. .vidioc_enum_fmt_vid_out_mplane = fdp1_enum_fmt_vid_out,
  1425. .vidioc_g_fmt_vid_cap_mplane = fdp1_g_fmt,
  1426. .vidioc_g_fmt_vid_out_mplane = fdp1_g_fmt,
  1427. .vidioc_try_fmt_vid_cap_mplane = fdp1_try_fmt,
  1428. .vidioc_try_fmt_vid_out_mplane = fdp1_try_fmt,
  1429. .vidioc_s_fmt_vid_cap_mplane = fdp1_s_fmt,
  1430. .vidioc_s_fmt_vid_out_mplane = fdp1_s_fmt,
  1431. .vidioc_reqbufs = v4l2_m2m_ioctl_reqbufs,
  1432. .vidioc_querybuf = v4l2_m2m_ioctl_querybuf,
  1433. .vidioc_qbuf = v4l2_m2m_ioctl_qbuf,
  1434. .vidioc_dqbuf = v4l2_m2m_ioctl_dqbuf,
  1435. .vidioc_prepare_buf = v4l2_m2m_ioctl_prepare_buf,
  1436. .vidioc_create_bufs = v4l2_m2m_ioctl_create_bufs,
  1437. .vidioc_expbuf = v4l2_m2m_ioctl_expbuf,
  1438. .vidioc_streamon = v4l2_m2m_ioctl_streamon,
  1439. .vidioc_streamoff = v4l2_m2m_ioctl_streamoff,
  1440. .vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
  1441. .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
  1442. };
  1443. /*
  1444. * Queue operations
  1445. */
  1446. static int fdp1_queue_setup(struct vb2_queue *vq,
  1447. unsigned int *nbuffers, unsigned int *nplanes,
  1448. unsigned int sizes[],
  1449. struct device *alloc_ctxs[])
  1450. {
  1451. struct fdp1_ctx *ctx = vb2_get_drv_priv(vq);
  1452. struct fdp1_q_data *q_data;
  1453. unsigned int i;
  1454. q_data = get_q_data(ctx, vq->type);
  1455. if (*nplanes) {
  1456. if (*nplanes > FDP1_MAX_PLANES)
  1457. return -EINVAL;
  1458. return 0;
  1459. }
  1460. *nplanes = q_data->format.num_planes;
  1461. for (i = 0; i < *nplanes; i++)
  1462. sizes[i] = q_data->format.plane_fmt[i].sizeimage;
  1463. return 0;
  1464. }
  1465. static void fdp1_buf_prepare_field(struct fdp1_q_data *q_data,
  1466. struct vb2_v4l2_buffer *vbuf,
  1467. unsigned int field_num)
  1468. {
  1469. struct fdp1_buffer *buf = to_fdp1_buffer(vbuf);
  1470. struct fdp1_field_buffer *fbuf = &buf->fields[field_num];
  1471. unsigned int num_fields;
  1472. unsigned int i;
  1473. num_fields = V4L2_FIELD_HAS_BOTH(vbuf->field) ? 2 : 1;
  1474. fbuf->vb = vbuf;
  1475. fbuf->last_field = (field_num + 1) == num_fields;
  1476. for (i = 0; i < vbuf->vb2_buf.num_planes; ++i)
  1477. fbuf->addrs[i] = vb2_dma_contig_plane_dma_addr(&vbuf->vb2_buf, i);
  1478. switch (vbuf->field) {
  1479. case V4L2_FIELD_INTERLACED:
  1480. /*
  1481. * Interlaced means bottom-top for 60Hz TV standards (NTSC) and
  1482. * top-bottom for 50Hz. As TV standards are not applicable to
  1483. * the mem-to-mem API, use the height as a heuristic.
  1484. */
  1485. fbuf->field = (q_data->format.height < 576) == field_num
  1486. ? V4L2_FIELD_TOP : V4L2_FIELD_BOTTOM;
  1487. break;
  1488. case V4L2_FIELD_INTERLACED_TB:
  1489. case V4L2_FIELD_SEQ_TB:
  1490. fbuf->field = field_num ? V4L2_FIELD_BOTTOM : V4L2_FIELD_TOP;
  1491. break;
  1492. case V4L2_FIELD_INTERLACED_BT:
  1493. case V4L2_FIELD_SEQ_BT:
  1494. fbuf->field = field_num ? V4L2_FIELD_TOP : V4L2_FIELD_BOTTOM;
  1495. break;
  1496. default:
  1497. fbuf->field = vbuf->field;
  1498. break;
  1499. }
  1500. /* Buffer is completed */
  1501. if (!field_num)
  1502. return;
  1503. /* Adjust buffer addresses for second field */
  1504. switch (vbuf->field) {
  1505. case V4L2_FIELD_INTERLACED:
  1506. case V4L2_FIELD_INTERLACED_TB:
  1507. case V4L2_FIELD_INTERLACED_BT:
  1508. for (i = 0; i < vbuf->vb2_buf.num_planes; i++)
  1509. fbuf->addrs[i] +=
  1510. (i == 0 ? q_data->stride_y : q_data->stride_c);
  1511. break;
  1512. case V4L2_FIELD_SEQ_TB:
  1513. case V4L2_FIELD_SEQ_BT:
  1514. for (i = 0; i < vbuf->vb2_buf.num_planes; i++)
  1515. fbuf->addrs[i] += q_data->vsize *
  1516. (i == 0 ? q_data->stride_y : q_data->stride_c);
  1517. break;
  1518. }
  1519. }
  1520. static int fdp1_buf_prepare(struct vb2_buffer *vb)
  1521. {
  1522. struct fdp1_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
  1523. struct fdp1_q_data *q_data = get_q_data(ctx, vb->vb2_queue->type);
  1524. struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
  1525. struct fdp1_buffer *buf = to_fdp1_buffer(vbuf);
  1526. unsigned int i;
  1527. if (V4L2_TYPE_IS_OUTPUT(vb->vb2_queue->type)) {
  1528. bool field_valid = true;
  1529. /* Validate the buffer field. */
  1530. switch (q_data->format.field) {
  1531. case V4L2_FIELD_NONE:
  1532. if (vbuf->field != V4L2_FIELD_NONE)
  1533. field_valid = false;
  1534. break;
  1535. case V4L2_FIELD_ALTERNATE:
  1536. if (vbuf->field != V4L2_FIELD_TOP &&
  1537. vbuf->field != V4L2_FIELD_BOTTOM)
  1538. field_valid = false;
  1539. break;
  1540. case V4L2_FIELD_INTERLACED:
  1541. case V4L2_FIELD_SEQ_TB:
  1542. case V4L2_FIELD_SEQ_BT:
  1543. case V4L2_FIELD_INTERLACED_TB:
  1544. case V4L2_FIELD_INTERLACED_BT:
  1545. if (vbuf->field != q_data->format.field)
  1546. field_valid = false;
  1547. break;
  1548. }
  1549. if (!field_valid) {
  1550. dprintk(ctx->fdp1,
  1551. "buffer field %u invalid for format field %u\n",
  1552. vbuf->field, q_data->format.field);
  1553. return -EINVAL;
  1554. }
  1555. } else {
  1556. vbuf->field = V4L2_FIELD_NONE;
  1557. }
  1558. /* Validate the planes sizes. */
  1559. for (i = 0; i < q_data->format.num_planes; i++) {
  1560. unsigned long size = q_data->format.plane_fmt[i].sizeimage;
  1561. if (vb2_plane_size(vb, i) < size) {
  1562. dprintk(ctx->fdp1,
  1563. "data will not fit into plane [%u/%u] (%lu < %lu)\n",
  1564. i, q_data->format.num_planes,
  1565. vb2_plane_size(vb, i), size);
  1566. return -EINVAL;
  1567. }
  1568. /* We have known size formats all around */
  1569. vb2_set_plane_payload(vb, i, size);
  1570. }
  1571. buf->num_fields = V4L2_FIELD_HAS_BOTH(vbuf->field) ? 2 : 1;
  1572. for (i = 0; i < buf->num_fields; ++i)
  1573. fdp1_buf_prepare_field(q_data, vbuf, i);
  1574. return 0;
  1575. }
  1576. static void fdp1_buf_queue(struct vb2_buffer *vb)
  1577. {
  1578. struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
  1579. struct fdp1_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
  1580. v4l2_m2m_buf_queue(ctx->fh.m2m_ctx, vbuf);
  1581. }
  1582. static int fdp1_start_streaming(struct vb2_queue *q, unsigned int count)
  1583. {
  1584. struct fdp1_ctx *ctx = vb2_get_drv_priv(q);
  1585. struct fdp1_q_data *q_data = get_q_data(ctx, q->type);
  1586. if (V4L2_TYPE_IS_OUTPUT(q->type)) {
  1587. /*
  1588. * Force our deint_mode when we are progressive,
  1589. * ignoring any setting on the device from the user,
  1590. * Otherwise, lock in the requested de-interlace mode.
  1591. */
  1592. if (q_data->format.field == V4L2_FIELD_NONE)
  1593. ctx->deint_mode = FDP1_PROGRESSIVE;
  1594. if (ctx->deint_mode == FDP1_ADAPT2D3D) {
  1595. u32 stride;
  1596. dma_addr_t smsk_base;
  1597. const u32 bpp = 2; /* bytes per pixel */
  1598. stride = round_up(q_data->format.width, 8);
  1599. ctx->smsk_size = bpp * stride * q_data->vsize;
  1600. ctx->smsk_cpu = dma_alloc_coherent(ctx->fdp1->dev,
  1601. ctx->smsk_size, &smsk_base, GFP_KERNEL);
  1602. if (ctx->smsk_cpu == NULL) {
  1603. dprintk(ctx->fdp1, "Failed to alloc smsk\n");
  1604. return -ENOMEM;
  1605. }
  1606. ctx->smsk_addr[0] = smsk_base;
  1607. ctx->smsk_addr[1] = smsk_base + (ctx->smsk_size/2);
  1608. }
  1609. }
  1610. return 0;
  1611. }
  1612. static void fdp1_stop_streaming(struct vb2_queue *q)
  1613. {
  1614. struct fdp1_ctx *ctx = vb2_get_drv_priv(q);
  1615. struct vb2_v4l2_buffer *vbuf;
  1616. unsigned long flags;
  1617. while (1) {
  1618. if (V4L2_TYPE_IS_OUTPUT(q->type))
  1619. vbuf = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
  1620. else
  1621. vbuf = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);
  1622. if (vbuf == NULL)
  1623. break;
  1624. spin_lock_irqsave(&ctx->fdp1->irqlock, flags);
  1625. v4l2_m2m_buf_done(vbuf, VB2_BUF_STATE_ERROR);
  1626. spin_unlock_irqrestore(&ctx->fdp1->irqlock, flags);
  1627. }
  1628. /* Empty Output queues */
  1629. if (V4L2_TYPE_IS_OUTPUT(q->type)) {
  1630. /* Empty our internal queues */
  1631. struct fdp1_field_buffer *fbuf;
  1632. /* Free any queued buffers */
  1633. fbuf = fdp1_dequeue_field(ctx);
  1634. while (fbuf != NULL) {
  1635. fdp1_field_complete(ctx, fbuf);
  1636. fbuf = fdp1_dequeue_field(ctx);
  1637. }
  1638. /* Free smsk_data */
  1639. if (ctx->smsk_cpu) {
  1640. dma_free_coherent(ctx->fdp1->dev, ctx->smsk_size,
  1641. ctx->smsk_cpu, ctx->smsk_addr[0]);
  1642. ctx->smsk_addr[0] = ctx->smsk_addr[1] = 0;
  1643. ctx->smsk_cpu = NULL;
  1644. }
  1645. WARN(!list_empty(&ctx->fields_queue),
  1646. "Buffer queue not empty");
  1647. } else {
  1648. /* Empty Capture queues (Jobs) */
  1649. struct fdp1_job *job;
  1650. job = get_queued_job(ctx->fdp1);
  1651. while (job) {
  1652. if (FDP1_DEINT_MODE_USES_PREV(ctx->deint_mode))
  1653. fdp1_field_complete(ctx, job->previous);
  1654. else
  1655. fdp1_field_complete(ctx, job->active);
  1656. v4l2_m2m_buf_done(job->dst->vb, VB2_BUF_STATE_ERROR);
  1657. job->dst = NULL;
  1658. job = get_queued_job(ctx->fdp1);
  1659. }
  1660. /* Free any held buffer in the ctx */
  1661. fdp1_field_complete(ctx, ctx->previous);
  1662. WARN(!list_empty(&ctx->fdp1->queued_job_list),
  1663. "Queued Job List not empty");
  1664. WARN(!list_empty(&ctx->fdp1->hw_job_list),
  1665. "HW Job list not empty");
  1666. }
  1667. }
  1668. static struct vb2_ops fdp1_qops = {
  1669. .queue_setup = fdp1_queue_setup,
  1670. .buf_prepare = fdp1_buf_prepare,
  1671. .buf_queue = fdp1_buf_queue,
  1672. .start_streaming = fdp1_start_streaming,
  1673. .stop_streaming = fdp1_stop_streaming,
  1674. .wait_prepare = vb2_ops_wait_prepare,
  1675. .wait_finish = vb2_ops_wait_finish,
  1676. };
  1677. static int queue_init(void *priv, struct vb2_queue *src_vq,
  1678. struct vb2_queue *dst_vq)
  1679. {
  1680. struct fdp1_ctx *ctx = priv;
  1681. int ret;
  1682. src_vq->type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
  1683. src_vq->io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF;
  1684. src_vq->drv_priv = ctx;
  1685. src_vq->buf_struct_size = sizeof(struct fdp1_buffer);
  1686. src_vq->ops = &fdp1_qops;
  1687. src_vq->mem_ops = &vb2_dma_contig_memops;
  1688. src_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
  1689. src_vq->lock = &ctx->fdp1->dev_mutex;
  1690. src_vq->dev = ctx->fdp1->dev;
  1691. ret = vb2_queue_init(src_vq);
  1692. if (ret)
  1693. return ret;
  1694. dst_vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
  1695. dst_vq->io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF;
  1696. dst_vq->drv_priv = ctx;
  1697. dst_vq->buf_struct_size = sizeof(struct fdp1_buffer);
  1698. dst_vq->ops = &fdp1_qops;
  1699. dst_vq->mem_ops = &vb2_dma_contig_memops;
  1700. dst_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
  1701. dst_vq->lock = &ctx->fdp1->dev_mutex;
  1702. dst_vq->dev = ctx->fdp1->dev;
  1703. return vb2_queue_init(dst_vq);
  1704. }
  1705. /*
  1706. * File operations
  1707. */
  1708. static int fdp1_open(struct file *file)
  1709. {
  1710. struct fdp1_dev *fdp1 = video_drvdata(file);
  1711. struct v4l2_pix_format_mplane format;
  1712. struct fdp1_ctx *ctx = NULL;
  1713. struct v4l2_ctrl *ctrl;
  1714. int ret = 0;
  1715. if (mutex_lock_interruptible(&fdp1->dev_mutex))
  1716. return -ERESTARTSYS;
  1717. ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
  1718. if (!ctx) {
  1719. ret = -ENOMEM;
  1720. goto done;
  1721. }
  1722. v4l2_fh_init(&ctx->fh, video_devdata(file));
  1723. file->private_data = &ctx->fh;
  1724. ctx->fdp1 = fdp1;
  1725. /* Initialise Queues */
  1726. INIT_LIST_HEAD(&ctx->fields_queue);
  1727. ctx->translen = 1;
  1728. ctx->sequence = 0;
  1729. /* Initialise controls */
  1730. v4l2_ctrl_handler_init(&ctx->hdl, 3);
  1731. v4l2_ctrl_new_std_menu_items(&ctx->hdl, &fdp1_ctrl_ops,
  1732. V4L2_CID_DEINTERLACING_MODE,
  1733. FDP1_NEXTFIELD, BIT(0), FDP1_FIXED3D,
  1734. fdp1_ctrl_deint_menu);
  1735. ctrl = v4l2_ctrl_new_std(&ctx->hdl, &fdp1_ctrl_ops,
  1736. V4L2_CID_MIN_BUFFERS_FOR_CAPTURE, 1, 2, 1, 1);
  1737. if (ctrl)
  1738. ctrl->flags |= V4L2_CTRL_FLAG_VOLATILE;
  1739. v4l2_ctrl_new_std(&ctx->hdl, &fdp1_ctrl_ops,
  1740. V4L2_CID_ALPHA_COMPONENT, 0, 255, 1, 255);
  1741. if (ctx->hdl.error) {
  1742. ret = ctx->hdl.error;
  1743. v4l2_ctrl_handler_free(&ctx->hdl);
  1744. goto done;
  1745. }
  1746. ctx->fh.ctrl_handler = &ctx->hdl;
  1747. v4l2_ctrl_handler_setup(&ctx->hdl);
  1748. /* Configure default parameters. */
  1749. memset(&format, 0, sizeof(format));
  1750. fdp1_set_format(ctx, &format, V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
  1751. ctx->fh.m2m_ctx = v4l2_m2m_ctx_init(fdp1->m2m_dev, ctx, &queue_init);
  1752. if (IS_ERR(ctx->fh.m2m_ctx)) {
  1753. ret = PTR_ERR(ctx->fh.m2m_ctx);
  1754. v4l2_ctrl_handler_free(&ctx->hdl);
  1755. kfree(ctx);
  1756. goto done;
  1757. }
  1758. /* Perform any power management required */
  1759. pm_runtime_get_sync(fdp1->dev);
  1760. v4l2_fh_add(&ctx->fh);
  1761. dprintk(fdp1, "Created instance: %p, m2m_ctx: %p\n",
  1762. ctx, ctx->fh.m2m_ctx);
  1763. done:
  1764. mutex_unlock(&fdp1->dev_mutex);
  1765. return ret;
  1766. }
  1767. static int fdp1_release(struct file *file)
  1768. {
  1769. struct fdp1_dev *fdp1 = video_drvdata(file);
  1770. struct fdp1_ctx *ctx = fh_to_ctx(file->private_data);
  1771. dprintk(fdp1, "Releasing instance %p\n", ctx);
  1772. v4l2_fh_del(&ctx->fh);
  1773. v4l2_fh_exit(&ctx->fh);
  1774. v4l2_ctrl_handler_free(&ctx->hdl);
  1775. mutex_lock(&fdp1->dev_mutex);
  1776. v4l2_m2m_ctx_release(ctx->fh.m2m_ctx);
  1777. mutex_unlock(&fdp1->dev_mutex);
  1778. kfree(ctx);
  1779. pm_runtime_put(fdp1->dev);
  1780. return 0;
  1781. }
  1782. static const struct v4l2_file_operations fdp1_fops = {
  1783. .owner = THIS_MODULE,
  1784. .open = fdp1_open,
  1785. .release = fdp1_release,
  1786. .poll = v4l2_m2m_fop_poll,
  1787. .unlocked_ioctl = video_ioctl2,
  1788. .mmap = v4l2_m2m_fop_mmap,
  1789. };
  1790. static const struct video_device fdp1_videodev = {
  1791. .name = DRIVER_NAME,
  1792. .vfl_dir = VFL_DIR_M2M,
  1793. .fops = &fdp1_fops,
  1794. .device_caps = V4L2_CAP_VIDEO_M2M_MPLANE | V4L2_CAP_STREAMING,
  1795. .ioctl_ops = &fdp1_ioctl_ops,
  1796. .minor = -1,
  1797. .release = video_device_release_empty,
  1798. };
  1799. static const struct v4l2_m2m_ops m2m_ops = {
  1800. .device_run = fdp1_m2m_device_run,
  1801. .job_ready = fdp1_m2m_job_ready,
  1802. .job_abort = fdp1_m2m_job_abort,
  1803. };
  1804. static irqreturn_t fdp1_irq_handler(int irq, void *dev_id)
  1805. {
  1806. struct fdp1_dev *fdp1 = dev_id;
  1807. u32 int_status;
  1808. u32 ctl_status;
  1809. u32 vint_cnt;
  1810. u32 cycles;
  1811. int_status = fdp1_read(fdp1, FD1_CTL_IRQSTA);
  1812. cycles = fdp1_read(fdp1, FD1_CTL_VCYCLE_STAT);
  1813. ctl_status = fdp1_read(fdp1, FD1_CTL_STATUS);
  1814. vint_cnt = (ctl_status & FD1_CTL_STATUS_VINT_CNT_MASK) >>
  1815. FD1_CTL_STATUS_VINT_CNT_SHIFT;
  1816. /* Clear interrupts */
  1817. fdp1_write(fdp1, ~(int_status) & FD1_CTL_IRQ_MASK, FD1_CTL_IRQSTA);
  1818. if (debug >= 2) {
  1819. dprintk(fdp1, "IRQ: 0x%x %s%s%s\n", int_status,
  1820. int_status & FD1_CTL_IRQ_VERE ? "[Error]" : "[!E]",
  1821. int_status & FD1_CTL_IRQ_VINTE ? "[VSync]" : "[!V]",
  1822. int_status & FD1_CTL_IRQ_FREE ? "[FrameEnd]" : "[!F]");
  1823. dprintk(fdp1, "CycleStatus = %d (%dms)\n",
  1824. cycles, cycles/(fdp1->clk_rate/1000));
  1825. dprintk(fdp1,
  1826. "Control Status = 0x%08x : VINT_CNT = %d %s:%s:%s:%s\n",
  1827. ctl_status, vint_cnt,
  1828. ctl_status & FD1_CTL_STATUS_SGREGSET ? "RegSet" : "",
  1829. ctl_status & FD1_CTL_STATUS_SGVERR ? "Vsync Error" : "",
  1830. ctl_status & FD1_CTL_STATUS_SGFREND ? "FrameEnd" : "",
  1831. ctl_status & FD1_CTL_STATUS_BSY ? "Busy" : "");
  1832. dprintk(fdp1, "***********************************\n");
  1833. }
  1834. /* Spurious interrupt */
  1835. if (!(FD1_CTL_IRQ_MASK & int_status))
  1836. return IRQ_NONE;
  1837. /* Work completed, release the frame */
  1838. if (FD1_CTL_IRQ_VERE & int_status)
  1839. device_frame_end(fdp1, VB2_BUF_STATE_ERROR);
  1840. else if (FD1_CTL_IRQ_FREE & int_status)
  1841. device_frame_end(fdp1, VB2_BUF_STATE_DONE);
  1842. return IRQ_HANDLED;
  1843. }
  1844. static int fdp1_probe(struct platform_device *pdev)
  1845. {
  1846. struct fdp1_dev *fdp1;
  1847. struct video_device *vfd;
  1848. struct device_node *fcp_node;
  1849. struct resource *res;
  1850. struct clk *clk;
  1851. unsigned int i;
  1852. int ret;
  1853. int hw_version;
  1854. fdp1 = devm_kzalloc(&pdev->dev, sizeof(*fdp1), GFP_KERNEL);
  1855. if (!fdp1)
  1856. return -ENOMEM;
  1857. INIT_LIST_HEAD(&fdp1->free_job_list);
  1858. INIT_LIST_HEAD(&fdp1->queued_job_list);
  1859. INIT_LIST_HEAD(&fdp1->hw_job_list);
  1860. /* Initialise the jobs on the free list */
  1861. for (i = 0; i < ARRAY_SIZE(fdp1->jobs); i++)
  1862. list_add(&fdp1->jobs[i].list, &fdp1->free_job_list);
  1863. mutex_init(&fdp1->dev_mutex);
  1864. spin_lock_init(&fdp1->irqlock);
  1865. spin_lock_init(&fdp1->device_process_lock);
  1866. fdp1->dev = &pdev->dev;
  1867. platform_set_drvdata(pdev, fdp1);
  1868. /* Memory-mapped registers */
  1869. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  1870. fdp1->regs = devm_ioremap_resource(&pdev->dev, res);
  1871. if (IS_ERR(fdp1->regs))
  1872. return PTR_ERR(fdp1->regs);
  1873. /* Interrupt service routine registration */
  1874. fdp1->irq = ret = platform_get_irq(pdev, 0);
  1875. if (ret < 0) {
  1876. dev_err(&pdev->dev, "cannot find IRQ\n");
  1877. return ret;
  1878. }
  1879. ret = devm_request_irq(&pdev->dev, fdp1->irq, fdp1_irq_handler, 0,
  1880. dev_name(&pdev->dev), fdp1);
  1881. if (ret) {
  1882. dev_err(&pdev->dev, "cannot claim IRQ %d\n", fdp1->irq);
  1883. return ret;
  1884. }
  1885. /* FCP */
  1886. fcp_node = of_parse_phandle(pdev->dev.of_node, "renesas,fcp", 0);
  1887. if (fcp_node) {
  1888. fdp1->fcp = rcar_fcp_get(fcp_node);
  1889. of_node_put(fcp_node);
  1890. if (IS_ERR(fdp1->fcp)) {
  1891. dev_err(&pdev->dev, "FCP not found (%ld)\n",
  1892. PTR_ERR(fdp1->fcp));
  1893. return PTR_ERR(fdp1->fcp);
  1894. }
  1895. }
  1896. /* Determine our clock rate */
  1897. clk = clk_get(&pdev->dev, NULL);
  1898. if (IS_ERR(clk))
  1899. return PTR_ERR(clk);
  1900. fdp1->clk_rate = clk_get_rate(clk);
  1901. clk_put(clk);
  1902. /* V4L2 device registration */
  1903. ret = v4l2_device_register(&pdev->dev, &fdp1->v4l2_dev);
  1904. if (ret) {
  1905. v4l2_err(&fdp1->v4l2_dev, "Failed to register video device\n");
  1906. return ret;
  1907. }
  1908. /* M2M registration */
  1909. fdp1->m2m_dev = v4l2_m2m_init(&m2m_ops);
  1910. if (IS_ERR(fdp1->m2m_dev)) {
  1911. v4l2_err(&fdp1->v4l2_dev, "Failed to init mem2mem device\n");
  1912. ret = PTR_ERR(fdp1->m2m_dev);
  1913. goto unreg_dev;
  1914. }
  1915. /* Video registration */
  1916. fdp1->vfd = fdp1_videodev;
  1917. vfd = &fdp1->vfd;
  1918. vfd->lock = &fdp1->dev_mutex;
  1919. vfd->v4l2_dev = &fdp1->v4l2_dev;
  1920. video_set_drvdata(vfd, fdp1);
  1921. strlcpy(vfd->name, fdp1_videodev.name, sizeof(vfd->name));
  1922. ret = video_register_device(vfd, VFL_TYPE_GRABBER, 0);
  1923. if (ret) {
  1924. v4l2_err(&fdp1->v4l2_dev, "Failed to register video device\n");
  1925. goto release_m2m;
  1926. }
  1927. v4l2_info(&fdp1->v4l2_dev,
  1928. "Device registered as /dev/video%d\n", vfd->num);
  1929. /* Power up the cells to read HW */
  1930. pm_runtime_enable(&pdev->dev);
  1931. pm_runtime_get_sync(fdp1->dev);
  1932. hw_version = fdp1_read(fdp1, FD1_IP_INTDATA);
  1933. switch (hw_version) {
  1934. case FD1_IP_H3:
  1935. dprintk(fdp1, "FDP1 Version R-Car H3\n");
  1936. break;
  1937. case FD1_IP_M3W:
  1938. dprintk(fdp1, "FDP1 Version R-Car M3-W\n");
  1939. break;
  1940. default:
  1941. dev_err(fdp1->dev, "FDP1 Unidentifiable (0x%08x)\n",
  1942. hw_version);
  1943. }
  1944. /* Allow the hw to sleep until an open call puts it to use */
  1945. pm_runtime_put(fdp1->dev);
  1946. return 0;
  1947. release_m2m:
  1948. v4l2_m2m_release(fdp1->m2m_dev);
  1949. unreg_dev:
  1950. v4l2_device_unregister(&fdp1->v4l2_dev);
  1951. return ret;
  1952. }
  1953. static int fdp1_remove(struct platform_device *pdev)
  1954. {
  1955. struct fdp1_dev *fdp1 = platform_get_drvdata(pdev);
  1956. v4l2_m2m_release(fdp1->m2m_dev);
  1957. video_unregister_device(&fdp1->vfd);
  1958. v4l2_device_unregister(&fdp1->v4l2_dev);
  1959. pm_runtime_disable(&pdev->dev);
  1960. return 0;
  1961. }
  1962. static int __maybe_unused fdp1_pm_runtime_suspend(struct device *dev)
  1963. {
  1964. struct fdp1_dev *fdp1 = dev_get_drvdata(dev);
  1965. rcar_fcp_disable(fdp1->fcp);
  1966. return 0;
  1967. }
  1968. static int __maybe_unused fdp1_pm_runtime_resume(struct device *dev)
  1969. {
  1970. struct fdp1_dev *fdp1 = dev_get_drvdata(dev);
  1971. /* Program in the static LUTs */
  1972. fdp1_set_lut(fdp1);
  1973. return rcar_fcp_enable(fdp1->fcp);
  1974. }
  1975. static const struct dev_pm_ops fdp1_pm_ops = {
  1976. SET_RUNTIME_PM_OPS(fdp1_pm_runtime_suspend,
  1977. fdp1_pm_runtime_resume,
  1978. NULL)
  1979. };
  1980. static const struct of_device_id fdp1_dt_ids[] = {
  1981. { .compatible = "renesas,fdp1" },
  1982. { },
  1983. };
  1984. MODULE_DEVICE_TABLE(of, fdp1_dt_ids);
  1985. static struct platform_driver fdp1_pdrv = {
  1986. .probe = fdp1_probe,
  1987. .remove = fdp1_remove,
  1988. .driver = {
  1989. .name = DRIVER_NAME,
  1990. .of_match_table = fdp1_dt_ids,
  1991. .pm = &fdp1_pm_ops,
  1992. },
  1993. };
  1994. module_platform_driver(fdp1_pdrv);
  1995. MODULE_DESCRIPTION("Renesas R-Car Fine Display Processor Driver");
  1996. MODULE_AUTHOR("Kieran Bingham <kieran@bingham.xyz>");
  1997. MODULE_LICENSE("GPL");
  1998. MODULE_ALIAS("platform:" DRIVER_NAME);