am437x-vpfe.c 72 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773
  1. /*
  2. * TI VPFE capture Driver
  3. *
  4. * Copyright (C) 2013 - 2014 Texas Instruments, Inc.
  5. *
  6. * Benoit Parrot <bparrot@ti.com>
  7. * Lad, Prabhakar <prabhakar.csengg@gmail.com>
  8. *
  9. * This program is free software; you may redistribute it and/or modify
  10. * it under the terms of the GNU General Public License as published by
  11. * the Free Software Foundation; version 2 of the License.
  12. *
  13. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  14. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  15. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  16. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  17. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  18. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  19. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  20. * SOFTWARE.
  21. */
  22. #include <linux/delay.h>
  23. #include <linux/err.h>
  24. #include <linux/init.h>
  25. #include <linux/interrupt.h>
  26. #include <linux/io.h>
  27. #include <linux/module.h>
  28. #include <linux/of_graph.h>
  29. #include <linux/pinctrl/consumer.h>
  30. #include <linux/platform_device.h>
  31. #include <linux/pm_runtime.h>
  32. #include <linux/slab.h>
  33. #include <linux/uaccess.h>
  34. #include <linux/videodev2.h>
  35. #include <media/v4l2-common.h>
  36. #include <media/v4l2-ctrls.h>
  37. #include <media/v4l2-event.h>
  38. #include <media/v4l2-fwnode.h>
  39. #include "am437x-vpfe.h"
  40. #define VPFE_MODULE_NAME "vpfe"
  41. #define VPFE_VERSION "0.1.0"
  42. static int debug;
  43. module_param(debug, int, 0644);
  44. MODULE_PARM_DESC(debug, "Debug level 0-8");
  45. #define vpfe_dbg(level, dev, fmt, arg...) \
  46. v4l2_dbg(level, debug, &dev->v4l2_dev, fmt, ##arg)
  47. #define vpfe_info(dev, fmt, arg...) \
  48. v4l2_info(&dev->v4l2_dev, fmt, ##arg)
  49. #define vpfe_err(dev, fmt, arg...) \
  50. v4l2_err(&dev->v4l2_dev, fmt, ##arg)
  51. /* standard information */
  52. struct vpfe_standard {
  53. v4l2_std_id std_id;
  54. unsigned int width;
  55. unsigned int height;
  56. struct v4l2_fract pixelaspect;
  57. int frame_format;
  58. };
  59. static const struct vpfe_standard vpfe_standards[] = {
  60. {V4L2_STD_525_60, 720, 480, {11, 10}, 1},
  61. {V4L2_STD_625_50, 720, 576, {54, 59}, 1},
  62. };
  63. struct bus_format {
  64. unsigned int width;
  65. unsigned int bpp;
  66. };
  67. /*
  68. * struct vpfe_fmt - VPFE media bus format information
  69. * @name: V4L2 format description
  70. * @code: V4L2 media bus format code
  71. * @shifted: V4L2 media bus format code for the same pixel layout but
  72. * shifted to be 8 bits per pixel. =0 if format is not shiftable.
  73. * @pixelformat: V4L2 pixel format FCC identifier
  74. * @width: Bits per pixel (when transferred over a bus)
  75. * @bpp: Bytes per pixel (when stored in memory)
  76. * @supported: Indicates format supported by subdev
  77. */
  78. struct vpfe_fmt {
  79. const char *name;
  80. u32 fourcc;
  81. u32 code;
  82. struct bus_format l;
  83. struct bus_format s;
  84. bool supported;
  85. u32 index;
  86. };
  87. static struct vpfe_fmt formats[] = {
  88. {
  89. .name = "YUV 4:2:2 packed, YCbYCr",
  90. .fourcc = V4L2_PIX_FMT_YUYV,
  91. .code = MEDIA_BUS_FMT_YUYV8_2X8,
  92. .l.width = 10,
  93. .l.bpp = 4,
  94. .s.width = 8,
  95. .s.bpp = 2,
  96. .supported = false,
  97. }, {
  98. .name = "YUV 4:2:2 packed, CbYCrY",
  99. .fourcc = V4L2_PIX_FMT_UYVY,
  100. .code = MEDIA_BUS_FMT_UYVY8_2X8,
  101. .l.width = 10,
  102. .l.bpp = 4,
  103. .s.width = 8,
  104. .s.bpp = 2,
  105. .supported = false,
  106. }, {
  107. .name = "YUV 4:2:2 packed, YCrYCb",
  108. .fourcc = V4L2_PIX_FMT_YVYU,
  109. .code = MEDIA_BUS_FMT_YVYU8_2X8,
  110. .l.width = 10,
  111. .l.bpp = 4,
  112. .s.width = 8,
  113. .s.bpp = 2,
  114. .supported = false,
  115. }, {
  116. .name = "YUV 4:2:2 packed, CrYCbY",
  117. .fourcc = V4L2_PIX_FMT_VYUY,
  118. .code = MEDIA_BUS_FMT_VYUY8_2X8,
  119. .l.width = 10,
  120. .l.bpp = 4,
  121. .s.width = 8,
  122. .s.bpp = 2,
  123. .supported = false,
  124. }, {
  125. .name = "RAW8 BGGR",
  126. .fourcc = V4L2_PIX_FMT_SBGGR8,
  127. .code = MEDIA_BUS_FMT_SBGGR8_1X8,
  128. .l.width = 10,
  129. .l.bpp = 2,
  130. .s.width = 8,
  131. .s.bpp = 1,
  132. .supported = false,
  133. }, {
  134. .name = "RAW8 GBRG",
  135. .fourcc = V4L2_PIX_FMT_SGBRG8,
  136. .code = MEDIA_BUS_FMT_SGBRG8_1X8,
  137. .l.width = 10,
  138. .l.bpp = 2,
  139. .s.width = 8,
  140. .s.bpp = 1,
  141. .supported = false,
  142. }, {
  143. .name = "RAW8 GRBG",
  144. .fourcc = V4L2_PIX_FMT_SGRBG8,
  145. .code = MEDIA_BUS_FMT_SGRBG8_1X8,
  146. .l.width = 10,
  147. .l.bpp = 2,
  148. .s.width = 8,
  149. .s.bpp = 1,
  150. .supported = false,
  151. }, {
  152. .name = "RAW8 RGGB",
  153. .fourcc = V4L2_PIX_FMT_SRGGB8,
  154. .code = MEDIA_BUS_FMT_SRGGB8_1X8,
  155. .l.width = 10,
  156. .l.bpp = 2,
  157. .s.width = 8,
  158. .s.bpp = 1,
  159. .supported = false,
  160. }, {
  161. .name = "RGB565 (LE)",
  162. .fourcc = V4L2_PIX_FMT_RGB565,
  163. .code = MEDIA_BUS_FMT_RGB565_2X8_LE,
  164. .l.width = 10,
  165. .l.bpp = 4,
  166. .s.width = 8,
  167. .s.bpp = 2,
  168. .supported = false,
  169. }, {
  170. .name = "RGB565 (BE)",
  171. .fourcc = V4L2_PIX_FMT_RGB565X,
  172. .code = MEDIA_BUS_FMT_RGB565_2X8_BE,
  173. .l.width = 10,
  174. .l.bpp = 4,
  175. .s.width = 8,
  176. .s.bpp = 2,
  177. .supported = false,
  178. },
  179. };
  180. static int
  181. __vpfe_get_format(struct vpfe_device *vpfe,
  182. struct v4l2_format *format, unsigned int *bpp);
  183. static struct vpfe_fmt *find_format_by_code(unsigned int code)
  184. {
  185. struct vpfe_fmt *fmt;
  186. unsigned int k;
  187. for (k = 0; k < ARRAY_SIZE(formats); k++) {
  188. fmt = &formats[k];
  189. if (fmt->code == code)
  190. return fmt;
  191. }
  192. return NULL;
  193. }
  194. static struct vpfe_fmt *find_format_by_pix(unsigned int pixelformat)
  195. {
  196. struct vpfe_fmt *fmt;
  197. unsigned int k;
  198. for (k = 0; k < ARRAY_SIZE(formats); k++) {
  199. fmt = &formats[k];
  200. if (fmt->fourcc == pixelformat)
  201. return fmt;
  202. }
  203. return NULL;
  204. }
  205. static void
  206. mbus_to_pix(struct vpfe_device *vpfe,
  207. const struct v4l2_mbus_framefmt *mbus,
  208. struct v4l2_pix_format *pix, unsigned int *bpp)
  209. {
  210. struct vpfe_subdev_info *sdinfo = vpfe->current_subdev;
  211. unsigned int bus_width = sdinfo->vpfe_param.bus_width;
  212. struct vpfe_fmt *fmt;
  213. fmt = find_format_by_code(mbus->code);
  214. if (WARN_ON(fmt == NULL)) {
  215. pr_err("Invalid mbus code set\n");
  216. *bpp = 1;
  217. return;
  218. }
  219. memset(pix, 0, sizeof(*pix));
  220. v4l2_fill_pix_format(pix, mbus);
  221. pix->pixelformat = fmt->fourcc;
  222. *bpp = (bus_width == 10) ? fmt->l.bpp : fmt->s.bpp;
  223. /* pitch should be 32 bytes aligned */
  224. pix->bytesperline = ALIGN(pix->width * *bpp, 32);
  225. pix->sizeimage = pix->bytesperline * pix->height;
  226. }
  227. static void pix_to_mbus(struct vpfe_device *vpfe,
  228. struct v4l2_pix_format *pix_fmt,
  229. struct v4l2_mbus_framefmt *mbus_fmt)
  230. {
  231. struct vpfe_fmt *fmt;
  232. fmt = find_format_by_pix(pix_fmt->pixelformat);
  233. if (!fmt) {
  234. /* default to first entry */
  235. vpfe_dbg(3, vpfe, "Invalid pixel code: %x, default used instead\n",
  236. pix_fmt->pixelformat);
  237. fmt = &formats[0];
  238. }
  239. memset(mbus_fmt, 0, sizeof(*mbus_fmt));
  240. v4l2_fill_mbus_format(mbus_fmt, pix_fmt, fmt->code);
  241. }
  242. /* Print Four-character-code (FOURCC) */
  243. static char *print_fourcc(u32 fmt)
  244. {
  245. static char code[5];
  246. code[0] = (unsigned char)(fmt & 0xff);
  247. code[1] = (unsigned char)((fmt >> 8) & 0xff);
  248. code[2] = (unsigned char)((fmt >> 16) & 0xff);
  249. code[3] = (unsigned char)((fmt >> 24) & 0xff);
  250. code[4] = '\0';
  251. return code;
  252. }
  253. static int
  254. cmp_v4l2_format(const struct v4l2_format *lhs, const struct v4l2_format *rhs)
  255. {
  256. return lhs->type == rhs->type &&
  257. lhs->fmt.pix.width == rhs->fmt.pix.width &&
  258. lhs->fmt.pix.height == rhs->fmt.pix.height &&
  259. lhs->fmt.pix.pixelformat == rhs->fmt.pix.pixelformat &&
  260. lhs->fmt.pix.field == rhs->fmt.pix.field &&
  261. lhs->fmt.pix.colorspace == rhs->fmt.pix.colorspace &&
  262. lhs->fmt.pix.ycbcr_enc == rhs->fmt.pix.ycbcr_enc &&
  263. lhs->fmt.pix.quantization == rhs->fmt.pix.quantization &&
  264. lhs->fmt.pix.xfer_func == rhs->fmt.pix.xfer_func;
  265. }
  266. static inline u32 vpfe_reg_read(struct vpfe_ccdc *ccdc, u32 offset)
  267. {
  268. return ioread32(ccdc->ccdc_cfg.base_addr + offset);
  269. }
  270. static inline void vpfe_reg_write(struct vpfe_ccdc *ccdc, u32 val, u32 offset)
  271. {
  272. iowrite32(val, ccdc->ccdc_cfg.base_addr + offset);
  273. }
  274. static inline struct vpfe_device *to_vpfe(struct vpfe_ccdc *ccdc)
  275. {
  276. return container_of(ccdc, struct vpfe_device, ccdc);
  277. }
  278. static inline
  279. struct vpfe_cap_buffer *to_vpfe_buffer(struct vb2_v4l2_buffer *vb)
  280. {
  281. return container_of(vb, struct vpfe_cap_buffer, vb);
  282. }
  283. static inline void vpfe_pcr_enable(struct vpfe_ccdc *ccdc, int flag)
  284. {
  285. vpfe_reg_write(ccdc, !!flag, VPFE_PCR);
  286. }
  287. static void vpfe_config_enable(struct vpfe_ccdc *ccdc, int flag)
  288. {
  289. unsigned int cfg;
  290. if (!flag) {
  291. cfg = vpfe_reg_read(ccdc, VPFE_CONFIG);
  292. cfg &= ~(VPFE_CONFIG_EN_ENABLE << VPFE_CONFIG_EN_SHIFT);
  293. } else {
  294. cfg = VPFE_CONFIG_EN_ENABLE << VPFE_CONFIG_EN_SHIFT;
  295. }
  296. vpfe_reg_write(ccdc, cfg, VPFE_CONFIG);
  297. }
  298. static void vpfe_ccdc_setwin(struct vpfe_ccdc *ccdc,
  299. struct v4l2_rect *image_win,
  300. enum ccdc_frmfmt frm_fmt,
  301. int bpp)
  302. {
  303. int horz_start, horz_nr_pixels;
  304. int vert_start, vert_nr_lines;
  305. int val, mid_img;
  306. /*
  307. * ppc - per pixel count. indicates how many pixels per cell
  308. * output to SDRAM. example, for ycbcr, it is one y and one c, so 2.
  309. * raw capture this is 1
  310. */
  311. horz_start = image_win->left * bpp;
  312. horz_nr_pixels = (image_win->width * bpp) - 1;
  313. vpfe_reg_write(ccdc, (horz_start << VPFE_HORZ_INFO_SPH_SHIFT) |
  314. horz_nr_pixels, VPFE_HORZ_INFO);
  315. vert_start = image_win->top;
  316. if (frm_fmt == CCDC_FRMFMT_INTERLACED) {
  317. vert_nr_lines = (image_win->height >> 1) - 1;
  318. vert_start >>= 1;
  319. /* Since first line doesn't have any data */
  320. vert_start += 1;
  321. /* configure VDINT0 */
  322. val = (vert_start << VPFE_VDINT_VDINT0_SHIFT);
  323. } else {
  324. /* Since first line doesn't have any data */
  325. vert_start += 1;
  326. vert_nr_lines = image_win->height - 1;
  327. /*
  328. * configure VDINT0 and VDINT1. VDINT1 will be at half
  329. * of image height
  330. */
  331. mid_img = vert_start + (image_win->height / 2);
  332. val = (vert_start << VPFE_VDINT_VDINT0_SHIFT) |
  333. (mid_img & VPFE_VDINT_VDINT1_MASK);
  334. }
  335. vpfe_reg_write(ccdc, val, VPFE_VDINT);
  336. vpfe_reg_write(ccdc, (vert_start << VPFE_VERT_START_SLV0_SHIFT) |
  337. vert_start, VPFE_VERT_START);
  338. vpfe_reg_write(ccdc, vert_nr_lines, VPFE_VERT_LINES);
  339. }
  340. static void vpfe_reg_dump(struct vpfe_ccdc *ccdc)
  341. {
  342. struct vpfe_device *vpfe = to_vpfe(ccdc);
  343. vpfe_dbg(3, vpfe, "ALAW: 0x%x\n", vpfe_reg_read(ccdc, VPFE_ALAW));
  344. vpfe_dbg(3, vpfe, "CLAMP: 0x%x\n", vpfe_reg_read(ccdc, VPFE_CLAMP));
  345. vpfe_dbg(3, vpfe, "DCSUB: 0x%x\n", vpfe_reg_read(ccdc, VPFE_DCSUB));
  346. vpfe_dbg(3, vpfe, "BLKCMP: 0x%x\n", vpfe_reg_read(ccdc, VPFE_BLKCMP));
  347. vpfe_dbg(3, vpfe, "COLPTN: 0x%x\n", vpfe_reg_read(ccdc, VPFE_COLPTN));
  348. vpfe_dbg(3, vpfe, "SDOFST: 0x%x\n", vpfe_reg_read(ccdc, VPFE_SDOFST));
  349. vpfe_dbg(3, vpfe, "SYN_MODE: 0x%x\n",
  350. vpfe_reg_read(ccdc, VPFE_SYNMODE));
  351. vpfe_dbg(3, vpfe, "HSIZE_OFF: 0x%x\n",
  352. vpfe_reg_read(ccdc, VPFE_HSIZE_OFF));
  353. vpfe_dbg(3, vpfe, "HORZ_INFO: 0x%x\n",
  354. vpfe_reg_read(ccdc, VPFE_HORZ_INFO));
  355. vpfe_dbg(3, vpfe, "VERT_START: 0x%x\n",
  356. vpfe_reg_read(ccdc, VPFE_VERT_START));
  357. vpfe_dbg(3, vpfe, "VERT_LINES: 0x%x\n",
  358. vpfe_reg_read(ccdc, VPFE_VERT_LINES));
  359. }
  360. static int
  361. vpfe_ccdc_validate_param(struct vpfe_ccdc *ccdc,
  362. struct vpfe_ccdc_config_params_raw *ccdcparam)
  363. {
  364. struct vpfe_device *vpfe = to_vpfe(ccdc);
  365. u8 max_gamma, max_data;
  366. if (!ccdcparam->alaw.enable)
  367. return 0;
  368. max_gamma = ccdc_gamma_width_max_bit(ccdcparam->alaw.gamma_wd);
  369. max_data = ccdc_data_size_max_bit(ccdcparam->data_sz);
  370. if (ccdcparam->alaw.gamma_wd > VPFE_CCDC_GAMMA_BITS_09_0 ||
  371. ccdcparam->alaw.gamma_wd < VPFE_CCDC_GAMMA_BITS_15_6 ||
  372. max_gamma > max_data) {
  373. vpfe_dbg(1, vpfe, "Invalid data line select\n");
  374. return -EINVAL;
  375. }
  376. return 0;
  377. }
  378. static void
  379. vpfe_ccdc_update_raw_params(struct vpfe_ccdc *ccdc,
  380. struct vpfe_ccdc_config_params_raw *raw_params)
  381. {
  382. struct vpfe_ccdc_config_params_raw *config_params =
  383. &ccdc->ccdc_cfg.bayer.config_params;
  384. *config_params = *raw_params;
  385. }
  386. /*
  387. * vpfe_ccdc_restore_defaults()
  388. * This function will write defaults to all CCDC registers
  389. */
  390. static void vpfe_ccdc_restore_defaults(struct vpfe_ccdc *ccdc)
  391. {
  392. int i;
  393. /* Disable CCDC */
  394. vpfe_pcr_enable(ccdc, 0);
  395. /* set all registers to default value */
  396. for (i = 4; i <= 0x94; i += 4)
  397. vpfe_reg_write(ccdc, 0, i);
  398. vpfe_reg_write(ccdc, VPFE_NO_CULLING, VPFE_CULLING);
  399. vpfe_reg_write(ccdc, VPFE_CCDC_GAMMA_BITS_11_2, VPFE_ALAW);
  400. }
  401. static int vpfe_ccdc_close(struct vpfe_ccdc *ccdc, struct device *dev)
  402. {
  403. int dma_cntl, i, pcr;
  404. /* If the CCDC module is still busy wait for it to be done */
  405. for (i = 0; i < 10; i++) {
  406. usleep_range(5000, 6000);
  407. pcr = vpfe_reg_read(ccdc, VPFE_PCR);
  408. if (!pcr)
  409. break;
  410. /* make sure it it is disabled */
  411. vpfe_pcr_enable(ccdc, 0);
  412. }
  413. /* Disable CCDC by resetting all register to default POR values */
  414. vpfe_ccdc_restore_defaults(ccdc);
  415. /* if DMA_CNTL overflow bit is set. Clear it
  416. * It appears to take a while for this to become quiescent ~20ms
  417. */
  418. for (i = 0; i < 10; i++) {
  419. dma_cntl = vpfe_reg_read(ccdc, VPFE_DMA_CNTL);
  420. if (!(dma_cntl & VPFE_DMA_CNTL_OVERFLOW))
  421. break;
  422. /* Clear the overflow bit */
  423. vpfe_reg_write(ccdc, dma_cntl, VPFE_DMA_CNTL);
  424. usleep_range(5000, 6000);
  425. }
  426. /* Disabled the module at the CONFIG level */
  427. vpfe_config_enable(ccdc, 0);
  428. pm_runtime_put_sync(dev);
  429. return 0;
  430. }
  431. static int vpfe_ccdc_set_params(struct vpfe_ccdc *ccdc, void __user *params)
  432. {
  433. struct vpfe_device *vpfe = container_of(ccdc, struct vpfe_device, ccdc);
  434. struct vpfe_ccdc_config_params_raw raw_params;
  435. int x;
  436. if (ccdc->ccdc_cfg.if_type != VPFE_RAW_BAYER)
  437. return -EINVAL;
  438. x = copy_from_user(&raw_params, params, sizeof(raw_params));
  439. if (x) {
  440. vpfe_dbg(1, vpfe,
  441. "vpfe_ccdc_set_params: error in copying ccdc params, %d\n",
  442. x);
  443. return -EFAULT;
  444. }
  445. if (!vpfe_ccdc_validate_param(ccdc, &raw_params)) {
  446. vpfe_ccdc_update_raw_params(ccdc, &raw_params);
  447. return 0;
  448. }
  449. return -EINVAL;
  450. }
  451. /*
  452. * vpfe_ccdc_config_ycbcr()
  453. * This function will configure CCDC for YCbCr video capture
  454. */
  455. static void vpfe_ccdc_config_ycbcr(struct vpfe_ccdc *ccdc)
  456. {
  457. struct vpfe_device *vpfe = container_of(ccdc, struct vpfe_device, ccdc);
  458. struct ccdc_params_ycbcr *params = &ccdc->ccdc_cfg.ycbcr;
  459. u32 syn_mode;
  460. vpfe_dbg(3, vpfe, "vpfe_ccdc_config_ycbcr:\n");
  461. /*
  462. * first restore the CCDC registers to default values
  463. * This is important since we assume default values to be set in
  464. * a lot of registers that we didn't touch
  465. */
  466. vpfe_ccdc_restore_defaults(ccdc);
  467. /*
  468. * configure pixel format, frame format, configure video frame
  469. * format, enable output to SDRAM, enable internal timing generator
  470. * and 8bit pack mode
  471. */
  472. syn_mode = (((params->pix_fmt & VPFE_SYN_MODE_INPMOD_MASK) <<
  473. VPFE_SYN_MODE_INPMOD_SHIFT) |
  474. ((params->frm_fmt & VPFE_SYN_FLDMODE_MASK) <<
  475. VPFE_SYN_FLDMODE_SHIFT) | VPFE_VDHDEN_ENABLE |
  476. VPFE_WEN_ENABLE | VPFE_DATA_PACK_ENABLE);
  477. /* setup BT.656 sync mode */
  478. if (params->bt656_enable) {
  479. vpfe_reg_write(ccdc, VPFE_REC656IF_BT656_EN, VPFE_REC656IF);
  480. /*
  481. * configure the FID, VD, HD pin polarity,
  482. * fld,hd pol positive, vd negative, 8-bit data
  483. */
  484. syn_mode |= VPFE_SYN_MODE_VD_POL_NEGATIVE;
  485. if (ccdc->ccdc_cfg.if_type == VPFE_BT656_10BIT)
  486. syn_mode |= VPFE_SYN_MODE_10BITS;
  487. else
  488. syn_mode |= VPFE_SYN_MODE_8BITS;
  489. } else {
  490. /* y/c external sync mode */
  491. syn_mode |= (((params->fid_pol & VPFE_FID_POL_MASK) <<
  492. VPFE_FID_POL_SHIFT) |
  493. ((params->hd_pol & VPFE_HD_POL_MASK) <<
  494. VPFE_HD_POL_SHIFT) |
  495. ((params->vd_pol & VPFE_VD_POL_MASK) <<
  496. VPFE_VD_POL_SHIFT));
  497. }
  498. vpfe_reg_write(ccdc, syn_mode, VPFE_SYNMODE);
  499. /* configure video window */
  500. vpfe_ccdc_setwin(ccdc, &params->win,
  501. params->frm_fmt, params->bytesperpixel);
  502. /*
  503. * configure the order of y cb cr in SDRAM, and disable latch
  504. * internal register on vsync
  505. */
  506. if (ccdc->ccdc_cfg.if_type == VPFE_BT656_10BIT)
  507. vpfe_reg_write(ccdc,
  508. (params->pix_order << VPFE_CCDCFG_Y8POS_SHIFT) |
  509. VPFE_LATCH_ON_VSYNC_DISABLE |
  510. VPFE_CCDCFG_BW656_10BIT, VPFE_CCDCFG);
  511. else
  512. vpfe_reg_write(ccdc,
  513. (params->pix_order << VPFE_CCDCFG_Y8POS_SHIFT) |
  514. VPFE_LATCH_ON_VSYNC_DISABLE, VPFE_CCDCFG);
  515. /*
  516. * configure the horizontal line offset. This should be a
  517. * on 32 byte boundary. So clear LSB 5 bits
  518. */
  519. vpfe_reg_write(ccdc, params->bytesperline, VPFE_HSIZE_OFF);
  520. /* configure the memory line offset */
  521. if (params->buf_type == CCDC_BUFTYPE_FLD_INTERLEAVED)
  522. /* two fields are interleaved in memory */
  523. vpfe_reg_write(ccdc, VPFE_SDOFST_FIELD_INTERLEAVED,
  524. VPFE_SDOFST);
  525. }
  526. static void
  527. vpfe_ccdc_config_black_clamp(struct vpfe_ccdc *ccdc,
  528. struct vpfe_ccdc_black_clamp *bclamp)
  529. {
  530. u32 val;
  531. if (!bclamp->enable) {
  532. /* configure DCSub */
  533. val = (bclamp->dc_sub) & VPFE_BLK_DC_SUB_MASK;
  534. vpfe_reg_write(ccdc, val, VPFE_DCSUB);
  535. vpfe_reg_write(ccdc, VPFE_CLAMP_DEFAULT_VAL, VPFE_CLAMP);
  536. return;
  537. }
  538. /*
  539. * Configure gain, Start pixel, No of line to be avg,
  540. * No of pixel/line to be avg, & Enable the Black clamping
  541. */
  542. val = ((bclamp->sgain & VPFE_BLK_SGAIN_MASK) |
  543. ((bclamp->start_pixel & VPFE_BLK_ST_PXL_MASK) <<
  544. VPFE_BLK_ST_PXL_SHIFT) |
  545. ((bclamp->sample_ln & VPFE_BLK_SAMPLE_LINE_MASK) <<
  546. VPFE_BLK_SAMPLE_LINE_SHIFT) |
  547. ((bclamp->sample_pixel & VPFE_BLK_SAMPLE_LN_MASK) <<
  548. VPFE_BLK_SAMPLE_LN_SHIFT) | VPFE_BLK_CLAMP_ENABLE);
  549. vpfe_reg_write(ccdc, val, VPFE_CLAMP);
  550. /* If Black clamping is enable then make dcsub 0 */
  551. vpfe_reg_write(ccdc, VPFE_DCSUB_DEFAULT_VAL, VPFE_DCSUB);
  552. }
  553. static void
  554. vpfe_ccdc_config_black_compense(struct vpfe_ccdc *ccdc,
  555. struct vpfe_ccdc_black_compensation *bcomp)
  556. {
  557. u32 val;
  558. val = ((bcomp->b & VPFE_BLK_COMP_MASK) |
  559. ((bcomp->gb & VPFE_BLK_COMP_MASK) <<
  560. VPFE_BLK_COMP_GB_COMP_SHIFT) |
  561. ((bcomp->gr & VPFE_BLK_COMP_MASK) <<
  562. VPFE_BLK_COMP_GR_COMP_SHIFT) |
  563. ((bcomp->r & VPFE_BLK_COMP_MASK) <<
  564. VPFE_BLK_COMP_R_COMP_SHIFT));
  565. vpfe_reg_write(ccdc, val, VPFE_BLKCMP);
  566. }
  567. /*
  568. * vpfe_ccdc_config_raw()
  569. * This function will configure CCDC for Raw capture mode
  570. */
  571. static void vpfe_ccdc_config_raw(struct vpfe_ccdc *ccdc)
  572. {
  573. struct vpfe_device *vpfe = container_of(ccdc, struct vpfe_device, ccdc);
  574. struct vpfe_ccdc_config_params_raw *config_params =
  575. &ccdc->ccdc_cfg.bayer.config_params;
  576. struct ccdc_params_raw *params = &ccdc->ccdc_cfg.bayer;
  577. unsigned int syn_mode;
  578. unsigned int val;
  579. vpfe_dbg(3, vpfe, "vpfe_ccdc_config_raw:\n");
  580. /* Reset CCDC */
  581. vpfe_ccdc_restore_defaults(ccdc);
  582. /* Disable latching function registers on VSYNC */
  583. vpfe_reg_write(ccdc, VPFE_LATCH_ON_VSYNC_DISABLE, VPFE_CCDCFG);
  584. /*
  585. * Configure the vertical sync polarity(SYN_MODE.VDPOL),
  586. * horizontal sync polarity (SYN_MODE.HDPOL), frame id polarity
  587. * (SYN_MODE.FLDPOL), frame format(progressive or interlace),
  588. * data size(SYNMODE.DATSIZ), &pixel format (Input mode), output
  589. * SDRAM, enable internal timing generator
  590. */
  591. syn_mode = (((params->vd_pol & VPFE_VD_POL_MASK) << VPFE_VD_POL_SHIFT) |
  592. ((params->hd_pol & VPFE_HD_POL_MASK) << VPFE_HD_POL_SHIFT) |
  593. ((params->fid_pol & VPFE_FID_POL_MASK) <<
  594. VPFE_FID_POL_SHIFT) | ((params->frm_fmt &
  595. VPFE_FRM_FMT_MASK) << VPFE_FRM_FMT_SHIFT) |
  596. ((config_params->data_sz & VPFE_DATA_SZ_MASK) <<
  597. VPFE_DATA_SZ_SHIFT) | ((params->pix_fmt &
  598. VPFE_PIX_FMT_MASK) << VPFE_PIX_FMT_SHIFT) |
  599. VPFE_WEN_ENABLE | VPFE_VDHDEN_ENABLE);
  600. /* Enable and configure aLaw register if needed */
  601. if (config_params->alaw.enable) {
  602. val = ((config_params->alaw.gamma_wd &
  603. VPFE_ALAW_GAMMA_WD_MASK) | VPFE_ALAW_ENABLE);
  604. vpfe_reg_write(ccdc, val, VPFE_ALAW);
  605. vpfe_dbg(3, vpfe, "\nWriting 0x%x to ALAW...\n", val);
  606. }
  607. /* Configure video window */
  608. vpfe_ccdc_setwin(ccdc, &params->win, params->frm_fmt,
  609. params->bytesperpixel);
  610. /* Configure Black Clamp */
  611. vpfe_ccdc_config_black_clamp(ccdc, &config_params->blk_clamp);
  612. /* Configure Black level compensation */
  613. vpfe_ccdc_config_black_compense(ccdc, &config_params->blk_comp);
  614. /* If data size is 8 bit then pack the data */
  615. if ((config_params->data_sz == VPFE_CCDC_DATA_8BITS) ||
  616. config_params->alaw.enable)
  617. syn_mode |= VPFE_DATA_PACK_ENABLE;
  618. /*
  619. * Configure Horizontal offset register. If pack 8 is enabled then
  620. * 1 pixel will take 1 byte
  621. */
  622. vpfe_reg_write(ccdc, params->bytesperline, VPFE_HSIZE_OFF);
  623. vpfe_dbg(3, vpfe, "Writing %d (%x) to HSIZE_OFF\n",
  624. params->bytesperline, params->bytesperline);
  625. /* Set value for SDOFST */
  626. if (params->frm_fmt == CCDC_FRMFMT_INTERLACED) {
  627. if (params->image_invert_enable) {
  628. /* For interlace inverse mode */
  629. vpfe_reg_write(ccdc, VPFE_INTERLACED_IMAGE_INVERT,
  630. VPFE_SDOFST);
  631. } else {
  632. /* For interlace non inverse mode */
  633. vpfe_reg_write(ccdc, VPFE_INTERLACED_NO_IMAGE_INVERT,
  634. VPFE_SDOFST);
  635. }
  636. } else if (params->frm_fmt == CCDC_FRMFMT_PROGRESSIVE) {
  637. vpfe_reg_write(ccdc, VPFE_PROGRESSIVE_NO_IMAGE_INVERT,
  638. VPFE_SDOFST);
  639. }
  640. vpfe_reg_write(ccdc, syn_mode, VPFE_SYNMODE);
  641. vpfe_reg_dump(ccdc);
  642. }
  643. static inline int
  644. vpfe_ccdc_set_buftype(struct vpfe_ccdc *ccdc,
  645. enum ccdc_buftype buf_type)
  646. {
  647. if (ccdc->ccdc_cfg.if_type == VPFE_RAW_BAYER)
  648. ccdc->ccdc_cfg.bayer.buf_type = buf_type;
  649. else
  650. ccdc->ccdc_cfg.ycbcr.buf_type = buf_type;
  651. return 0;
  652. }
  653. static inline enum ccdc_buftype vpfe_ccdc_get_buftype(struct vpfe_ccdc *ccdc)
  654. {
  655. if (ccdc->ccdc_cfg.if_type == VPFE_RAW_BAYER)
  656. return ccdc->ccdc_cfg.bayer.buf_type;
  657. return ccdc->ccdc_cfg.ycbcr.buf_type;
  658. }
  659. static int vpfe_ccdc_set_pixel_format(struct vpfe_ccdc *ccdc, u32 pixfmt)
  660. {
  661. struct vpfe_device *vpfe = container_of(ccdc, struct vpfe_device, ccdc);
  662. vpfe_dbg(1, vpfe, "vpfe_ccdc_set_pixel_format: if_type: %d, pixfmt:%s\n",
  663. ccdc->ccdc_cfg.if_type, print_fourcc(pixfmt));
  664. if (ccdc->ccdc_cfg.if_type == VPFE_RAW_BAYER) {
  665. ccdc->ccdc_cfg.bayer.pix_fmt = CCDC_PIXFMT_RAW;
  666. /*
  667. * Need to clear it in case it was left on
  668. * after the last capture.
  669. */
  670. ccdc->ccdc_cfg.bayer.config_params.alaw.enable = 0;
  671. switch (pixfmt) {
  672. case V4L2_PIX_FMT_SBGGR8:
  673. ccdc->ccdc_cfg.bayer.config_params.alaw.enable = 1;
  674. break;
  675. case V4L2_PIX_FMT_YUYV:
  676. case V4L2_PIX_FMT_UYVY:
  677. case V4L2_PIX_FMT_YUV420:
  678. case V4L2_PIX_FMT_NV12:
  679. case V4L2_PIX_FMT_RGB565X:
  680. break;
  681. case V4L2_PIX_FMT_SBGGR16:
  682. default:
  683. return -EINVAL;
  684. }
  685. } else {
  686. switch (pixfmt) {
  687. case V4L2_PIX_FMT_YUYV:
  688. ccdc->ccdc_cfg.ycbcr.pix_order = CCDC_PIXORDER_YCBYCR;
  689. break;
  690. case V4L2_PIX_FMT_UYVY:
  691. ccdc->ccdc_cfg.ycbcr.pix_order = CCDC_PIXORDER_CBYCRY;
  692. break;
  693. default:
  694. return -EINVAL;
  695. }
  696. }
  697. return 0;
  698. }
  699. static u32 vpfe_ccdc_get_pixel_format(struct vpfe_ccdc *ccdc)
  700. {
  701. u32 pixfmt;
  702. if (ccdc->ccdc_cfg.if_type == VPFE_RAW_BAYER) {
  703. pixfmt = V4L2_PIX_FMT_YUYV;
  704. } else {
  705. if (ccdc->ccdc_cfg.ycbcr.pix_order == CCDC_PIXORDER_YCBYCR)
  706. pixfmt = V4L2_PIX_FMT_YUYV;
  707. else
  708. pixfmt = V4L2_PIX_FMT_UYVY;
  709. }
  710. return pixfmt;
  711. }
  712. static int
  713. vpfe_ccdc_set_image_window(struct vpfe_ccdc *ccdc,
  714. struct v4l2_rect *win, unsigned int bpp)
  715. {
  716. if (ccdc->ccdc_cfg.if_type == VPFE_RAW_BAYER) {
  717. ccdc->ccdc_cfg.bayer.win = *win;
  718. ccdc->ccdc_cfg.bayer.bytesperpixel = bpp;
  719. ccdc->ccdc_cfg.bayer.bytesperline = ALIGN(win->width * bpp, 32);
  720. } else {
  721. ccdc->ccdc_cfg.ycbcr.win = *win;
  722. ccdc->ccdc_cfg.ycbcr.bytesperpixel = bpp;
  723. ccdc->ccdc_cfg.ycbcr.bytesperline = ALIGN(win->width * bpp, 32);
  724. }
  725. return 0;
  726. }
  727. static inline void
  728. vpfe_ccdc_get_image_window(struct vpfe_ccdc *ccdc,
  729. struct v4l2_rect *win)
  730. {
  731. if (ccdc->ccdc_cfg.if_type == VPFE_RAW_BAYER)
  732. *win = ccdc->ccdc_cfg.bayer.win;
  733. else
  734. *win = ccdc->ccdc_cfg.ycbcr.win;
  735. }
  736. static inline unsigned int vpfe_ccdc_get_line_length(struct vpfe_ccdc *ccdc)
  737. {
  738. if (ccdc->ccdc_cfg.if_type == VPFE_RAW_BAYER)
  739. return ccdc->ccdc_cfg.bayer.bytesperline;
  740. return ccdc->ccdc_cfg.ycbcr.bytesperline;
  741. }
  742. static inline int
  743. vpfe_ccdc_set_frame_format(struct vpfe_ccdc *ccdc,
  744. enum ccdc_frmfmt frm_fmt)
  745. {
  746. if (ccdc->ccdc_cfg.if_type == VPFE_RAW_BAYER)
  747. ccdc->ccdc_cfg.bayer.frm_fmt = frm_fmt;
  748. else
  749. ccdc->ccdc_cfg.ycbcr.frm_fmt = frm_fmt;
  750. return 0;
  751. }
  752. static inline enum ccdc_frmfmt
  753. vpfe_ccdc_get_frame_format(struct vpfe_ccdc *ccdc)
  754. {
  755. if (ccdc->ccdc_cfg.if_type == VPFE_RAW_BAYER)
  756. return ccdc->ccdc_cfg.bayer.frm_fmt;
  757. return ccdc->ccdc_cfg.ycbcr.frm_fmt;
  758. }
  759. static inline int vpfe_ccdc_getfid(struct vpfe_ccdc *ccdc)
  760. {
  761. return (vpfe_reg_read(ccdc, VPFE_SYNMODE) >> 15) & 1;
  762. }
  763. static inline void vpfe_set_sdr_addr(struct vpfe_ccdc *ccdc, unsigned long addr)
  764. {
  765. vpfe_reg_write(ccdc, addr & 0xffffffe0, VPFE_SDR_ADDR);
  766. }
  767. static int vpfe_ccdc_set_hw_if_params(struct vpfe_ccdc *ccdc,
  768. struct vpfe_hw_if_param *params)
  769. {
  770. struct vpfe_device *vpfe = container_of(ccdc, struct vpfe_device, ccdc);
  771. ccdc->ccdc_cfg.if_type = params->if_type;
  772. switch (params->if_type) {
  773. case VPFE_BT656:
  774. case VPFE_YCBCR_SYNC_16:
  775. case VPFE_YCBCR_SYNC_8:
  776. case VPFE_BT656_10BIT:
  777. ccdc->ccdc_cfg.ycbcr.vd_pol = params->vdpol;
  778. ccdc->ccdc_cfg.ycbcr.hd_pol = params->hdpol;
  779. break;
  780. case VPFE_RAW_BAYER:
  781. ccdc->ccdc_cfg.bayer.vd_pol = params->vdpol;
  782. ccdc->ccdc_cfg.bayer.hd_pol = params->hdpol;
  783. if (params->bus_width == 10)
  784. ccdc->ccdc_cfg.bayer.config_params.data_sz =
  785. VPFE_CCDC_DATA_10BITS;
  786. else
  787. ccdc->ccdc_cfg.bayer.config_params.data_sz =
  788. VPFE_CCDC_DATA_8BITS;
  789. vpfe_dbg(1, vpfe, "params.bus_width: %d\n",
  790. params->bus_width);
  791. vpfe_dbg(1, vpfe, "config_params.data_sz: %d\n",
  792. ccdc->ccdc_cfg.bayer.config_params.data_sz);
  793. break;
  794. default:
  795. return -EINVAL;
  796. }
  797. return 0;
  798. }
  799. static void vpfe_clear_intr(struct vpfe_ccdc *ccdc, int vdint)
  800. {
  801. unsigned int vpfe_int_status;
  802. vpfe_int_status = vpfe_reg_read(ccdc, VPFE_IRQ_STS);
  803. switch (vdint) {
  804. /* VD0 interrupt */
  805. case VPFE_VDINT0:
  806. vpfe_int_status &= ~VPFE_VDINT0;
  807. vpfe_int_status |= VPFE_VDINT0;
  808. break;
  809. /* VD1 interrupt */
  810. case VPFE_VDINT1:
  811. vpfe_int_status &= ~VPFE_VDINT1;
  812. vpfe_int_status |= VPFE_VDINT1;
  813. break;
  814. /* VD2 interrupt */
  815. case VPFE_VDINT2:
  816. vpfe_int_status &= ~VPFE_VDINT2;
  817. vpfe_int_status |= VPFE_VDINT2;
  818. break;
  819. /* Clear all interrupts */
  820. default:
  821. vpfe_int_status &= ~(VPFE_VDINT0 |
  822. VPFE_VDINT1 |
  823. VPFE_VDINT2);
  824. vpfe_int_status |= (VPFE_VDINT0 |
  825. VPFE_VDINT1 |
  826. VPFE_VDINT2);
  827. break;
  828. }
  829. /* Clear specific VDINT from the status register */
  830. vpfe_reg_write(ccdc, vpfe_int_status, VPFE_IRQ_STS);
  831. vpfe_int_status = vpfe_reg_read(ccdc, VPFE_IRQ_STS);
  832. /* Acknowledge that we are done with all interrupts */
  833. vpfe_reg_write(ccdc, 1, VPFE_IRQ_EOI);
  834. }
  835. static void vpfe_ccdc_config_defaults(struct vpfe_ccdc *ccdc)
  836. {
  837. ccdc->ccdc_cfg.if_type = VPFE_RAW_BAYER;
  838. ccdc->ccdc_cfg.ycbcr.pix_fmt = CCDC_PIXFMT_YCBCR_8BIT;
  839. ccdc->ccdc_cfg.ycbcr.frm_fmt = CCDC_FRMFMT_INTERLACED;
  840. ccdc->ccdc_cfg.ycbcr.fid_pol = VPFE_PINPOL_POSITIVE;
  841. ccdc->ccdc_cfg.ycbcr.vd_pol = VPFE_PINPOL_POSITIVE;
  842. ccdc->ccdc_cfg.ycbcr.hd_pol = VPFE_PINPOL_POSITIVE;
  843. ccdc->ccdc_cfg.ycbcr.pix_order = CCDC_PIXORDER_CBYCRY;
  844. ccdc->ccdc_cfg.ycbcr.buf_type = CCDC_BUFTYPE_FLD_INTERLEAVED;
  845. ccdc->ccdc_cfg.ycbcr.win.left = 0;
  846. ccdc->ccdc_cfg.ycbcr.win.top = 0;
  847. ccdc->ccdc_cfg.ycbcr.win.width = 720;
  848. ccdc->ccdc_cfg.ycbcr.win.height = 576;
  849. ccdc->ccdc_cfg.ycbcr.bt656_enable = 1;
  850. ccdc->ccdc_cfg.bayer.pix_fmt = CCDC_PIXFMT_RAW;
  851. ccdc->ccdc_cfg.bayer.frm_fmt = CCDC_FRMFMT_PROGRESSIVE;
  852. ccdc->ccdc_cfg.bayer.fid_pol = VPFE_PINPOL_POSITIVE;
  853. ccdc->ccdc_cfg.bayer.vd_pol = VPFE_PINPOL_POSITIVE;
  854. ccdc->ccdc_cfg.bayer.hd_pol = VPFE_PINPOL_POSITIVE;
  855. ccdc->ccdc_cfg.bayer.win.left = 0;
  856. ccdc->ccdc_cfg.bayer.win.top = 0;
  857. ccdc->ccdc_cfg.bayer.win.width = 800;
  858. ccdc->ccdc_cfg.bayer.win.height = 600;
  859. ccdc->ccdc_cfg.bayer.config_params.data_sz = VPFE_CCDC_DATA_8BITS;
  860. ccdc->ccdc_cfg.bayer.config_params.alaw.gamma_wd =
  861. VPFE_CCDC_GAMMA_BITS_09_0;
  862. }
  863. /*
  864. * vpfe_get_ccdc_image_format - Get image parameters based on CCDC settings
  865. */
  866. static int vpfe_get_ccdc_image_format(struct vpfe_device *vpfe,
  867. struct v4l2_format *f)
  868. {
  869. struct v4l2_rect image_win;
  870. enum ccdc_buftype buf_type;
  871. enum ccdc_frmfmt frm_fmt;
  872. memset(f, 0, sizeof(*f));
  873. f->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
  874. vpfe_ccdc_get_image_window(&vpfe->ccdc, &image_win);
  875. f->fmt.pix.width = image_win.width;
  876. f->fmt.pix.height = image_win.height;
  877. f->fmt.pix.bytesperline = vpfe_ccdc_get_line_length(&vpfe->ccdc);
  878. f->fmt.pix.sizeimage = f->fmt.pix.bytesperline *
  879. f->fmt.pix.height;
  880. buf_type = vpfe_ccdc_get_buftype(&vpfe->ccdc);
  881. f->fmt.pix.pixelformat = vpfe_ccdc_get_pixel_format(&vpfe->ccdc);
  882. frm_fmt = vpfe_ccdc_get_frame_format(&vpfe->ccdc);
  883. if (frm_fmt == CCDC_FRMFMT_PROGRESSIVE) {
  884. f->fmt.pix.field = V4L2_FIELD_NONE;
  885. } else if (frm_fmt == CCDC_FRMFMT_INTERLACED) {
  886. if (buf_type == CCDC_BUFTYPE_FLD_INTERLEAVED) {
  887. f->fmt.pix.field = V4L2_FIELD_INTERLACED;
  888. } else if (buf_type == CCDC_BUFTYPE_FLD_SEPARATED) {
  889. f->fmt.pix.field = V4L2_FIELD_SEQ_TB;
  890. } else {
  891. vpfe_err(vpfe, "Invalid buf_type\n");
  892. return -EINVAL;
  893. }
  894. } else {
  895. vpfe_err(vpfe, "Invalid frm_fmt\n");
  896. return -EINVAL;
  897. }
  898. return 0;
  899. }
  900. static int vpfe_config_ccdc_image_format(struct vpfe_device *vpfe)
  901. {
  902. enum ccdc_frmfmt frm_fmt = CCDC_FRMFMT_INTERLACED;
  903. int ret = 0;
  904. vpfe_dbg(2, vpfe, "vpfe_config_ccdc_image_format\n");
  905. vpfe_dbg(1, vpfe, "pixelformat: %s\n",
  906. print_fourcc(vpfe->fmt.fmt.pix.pixelformat));
  907. if (vpfe_ccdc_set_pixel_format(&vpfe->ccdc,
  908. vpfe->fmt.fmt.pix.pixelformat) < 0) {
  909. vpfe_err(vpfe, "couldn't set pix format in ccdc\n");
  910. return -EINVAL;
  911. }
  912. /* configure the image window */
  913. vpfe_ccdc_set_image_window(&vpfe->ccdc, &vpfe->crop, vpfe->bpp);
  914. switch (vpfe->fmt.fmt.pix.field) {
  915. case V4L2_FIELD_INTERLACED:
  916. /* do nothing, since it is default */
  917. ret = vpfe_ccdc_set_buftype(
  918. &vpfe->ccdc,
  919. CCDC_BUFTYPE_FLD_INTERLEAVED);
  920. break;
  921. case V4L2_FIELD_NONE:
  922. frm_fmt = CCDC_FRMFMT_PROGRESSIVE;
  923. /* buffer type only applicable for interlaced scan */
  924. break;
  925. case V4L2_FIELD_SEQ_TB:
  926. ret = vpfe_ccdc_set_buftype(
  927. &vpfe->ccdc,
  928. CCDC_BUFTYPE_FLD_SEPARATED);
  929. break;
  930. default:
  931. return -EINVAL;
  932. }
  933. if (ret)
  934. return ret;
  935. return vpfe_ccdc_set_frame_format(&vpfe->ccdc, frm_fmt);
  936. }
  937. /*
  938. * vpfe_config_image_format()
  939. * For a given standard, this functions sets up the default
  940. * pix format & crop values in the vpfe device and ccdc. It first
  941. * starts with defaults based values from the standard table.
  942. * It then checks if sub device supports get_fmt and then override the
  943. * values based on that.Sets crop values to match with scan resolution
  944. * starting at 0,0. It calls vpfe_config_ccdc_image_format() set the
  945. * values in ccdc
  946. */
  947. static int vpfe_config_image_format(struct vpfe_device *vpfe,
  948. v4l2_std_id std_id)
  949. {
  950. struct v4l2_pix_format *pix = &vpfe->fmt.fmt.pix;
  951. int i, ret;
  952. for (i = 0; i < ARRAY_SIZE(vpfe_standards); i++) {
  953. if (vpfe_standards[i].std_id & std_id) {
  954. vpfe->std_info.active_pixels =
  955. vpfe_standards[i].width;
  956. vpfe->std_info.active_lines =
  957. vpfe_standards[i].height;
  958. vpfe->std_info.frame_format =
  959. vpfe_standards[i].frame_format;
  960. vpfe->std_index = i;
  961. break;
  962. }
  963. }
  964. if (i == ARRAY_SIZE(vpfe_standards)) {
  965. vpfe_err(vpfe, "standard not supported\n");
  966. return -EINVAL;
  967. }
  968. vpfe->crop.top = vpfe->crop.left = 0;
  969. vpfe->crop.width = vpfe->std_info.active_pixels;
  970. vpfe->crop.height = vpfe->std_info.active_lines;
  971. pix->width = vpfe->crop.width;
  972. pix->height = vpfe->crop.height;
  973. pix->pixelformat = V4L2_PIX_FMT_YUYV;
  974. /* first field and frame format based on standard frame format */
  975. if (vpfe->std_info.frame_format)
  976. pix->field = V4L2_FIELD_INTERLACED;
  977. else
  978. pix->field = V4L2_FIELD_NONE;
  979. ret = __vpfe_get_format(vpfe, &vpfe->fmt, &vpfe->bpp);
  980. if (ret)
  981. return ret;
  982. /* Update the crop window based on found values */
  983. vpfe->crop.width = pix->width;
  984. vpfe->crop.height = pix->height;
  985. return vpfe_config_ccdc_image_format(vpfe);
  986. }
  987. static int vpfe_initialize_device(struct vpfe_device *vpfe)
  988. {
  989. struct vpfe_subdev_info *sdinfo;
  990. int ret;
  991. sdinfo = &vpfe->cfg->sub_devs[0];
  992. sdinfo->sd = vpfe->sd[0];
  993. vpfe->current_input = 0;
  994. vpfe->std_index = 0;
  995. /* Configure the default format information */
  996. ret = vpfe_config_image_format(vpfe,
  997. vpfe_standards[vpfe->std_index].std_id);
  998. if (ret)
  999. return ret;
  1000. pm_runtime_get_sync(vpfe->pdev);
  1001. vpfe_config_enable(&vpfe->ccdc, 1);
  1002. vpfe_ccdc_restore_defaults(&vpfe->ccdc);
  1003. /* Clear all VPFE interrupts */
  1004. vpfe_clear_intr(&vpfe->ccdc, -1);
  1005. return ret;
  1006. }
  1007. /*
  1008. * vpfe_release : This function is based on the vb2_fop_release
  1009. * helper function.
  1010. * It has been augmented to handle module power management,
  1011. * by disabling/enabling h/w module fcntl clock when necessary.
  1012. */
  1013. static int vpfe_release(struct file *file)
  1014. {
  1015. struct vpfe_device *vpfe = video_drvdata(file);
  1016. bool fh_singular;
  1017. int ret;
  1018. mutex_lock(&vpfe->lock);
  1019. /* Save the singular status before we call the clean-up helper */
  1020. fh_singular = v4l2_fh_is_singular_file(file);
  1021. /* the release helper will cleanup any on-going streaming */
  1022. ret = _vb2_fop_release(file, NULL);
  1023. /*
  1024. * If this was the last open file.
  1025. * Then de-initialize hw module.
  1026. */
  1027. if (fh_singular)
  1028. vpfe_ccdc_close(&vpfe->ccdc, vpfe->pdev);
  1029. mutex_unlock(&vpfe->lock);
  1030. return ret;
  1031. }
  1032. /*
  1033. * vpfe_open : This function is based on the v4l2_fh_open helper function.
  1034. * It has been augmented to handle module power management,
  1035. * by disabling/enabling h/w module fcntl clock when necessary.
  1036. */
  1037. static int vpfe_open(struct file *file)
  1038. {
  1039. struct vpfe_device *vpfe = video_drvdata(file);
  1040. int ret;
  1041. mutex_lock(&vpfe->lock);
  1042. ret = v4l2_fh_open(file);
  1043. if (ret) {
  1044. vpfe_err(vpfe, "v4l2_fh_open failed\n");
  1045. goto unlock;
  1046. }
  1047. if (!v4l2_fh_is_singular_file(file))
  1048. goto unlock;
  1049. if (vpfe_initialize_device(vpfe)) {
  1050. v4l2_fh_release(file);
  1051. ret = -ENODEV;
  1052. }
  1053. unlock:
  1054. mutex_unlock(&vpfe->lock);
  1055. return ret;
  1056. }
  1057. /**
  1058. * vpfe_schedule_next_buffer: set next buffer address for capture
  1059. * @vpfe : ptr to vpfe device
  1060. *
  1061. * This function will get next buffer from the dma queue and
  1062. * set the buffer address in the vpfe register for capture.
  1063. * the buffer is marked active
  1064. *
  1065. * Assumes caller is holding vpfe->dma_queue_lock already
  1066. */
  1067. static inline void vpfe_schedule_next_buffer(struct vpfe_device *vpfe)
  1068. {
  1069. vpfe->next_frm = list_entry(vpfe->dma_queue.next,
  1070. struct vpfe_cap_buffer, list);
  1071. list_del(&vpfe->next_frm->list);
  1072. vpfe_set_sdr_addr(&vpfe->ccdc,
  1073. vb2_dma_contig_plane_dma_addr(&vpfe->next_frm->vb.vb2_buf, 0));
  1074. }
  1075. static inline void vpfe_schedule_bottom_field(struct vpfe_device *vpfe)
  1076. {
  1077. unsigned long addr;
  1078. addr = vb2_dma_contig_plane_dma_addr(&vpfe->next_frm->vb.vb2_buf, 0) +
  1079. vpfe->field_off;
  1080. vpfe_set_sdr_addr(&vpfe->ccdc, addr);
  1081. }
  1082. /*
  1083. * vpfe_process_buffer_complete: process a completed buffer
  1084. * @vpfe : ptr to vpfe device
  1085. *
  1086. * This function time stamp the buffer and mark it as DONE. It also
  1087. * wake up any process waiting on the QUEUE and set the next buffer
  1088. * as current
  1089. */
  1090. static inline void vpfe_process_buffer_complete(struct vpfe_device *vpfe)
  1091. {
  1092. vpfe->cur_frm->vb.vb2_buf.timestamp = ktime_get_ns();
  1093. vpfe->cur_frm->vb.field = vpfe->fmt.fmt.pix.field;
  1094. vpfe->cur_frm->vb.sequence = vpfe->sequence++;
  1095. vb2_buffer_done(&vpfe->cur_frm->vb.vb2_buf, VB2_BUF_STATE_DONE);
  1096. vpfe->cur_frm = vpfe->next_frm;
  1097. }
  1098. /*
  1099. * vpfe_isr : ISR handler for vpfe capture (VINT0)
  1100. * @irq: irq number
  1101. * @dev_id: dev_id ptr
  1102. *
  1103. * It changes status of the captured buffer, takes next buffer from the queue
  1104. * and sets its address in VPFE registers
  1105. */
  1106. static irqreturn_t vpfe_isr(int irq, void *dev)
  1107. {
  1108. struct vpfe_device *vpfe = (struct vpfe_device *)dev;
  1109. enum v4l2_field field;
  1110. int intr_status;
  1111. int fid;
  1112. intr_status = vpfe_reg_read(&vpfe->ccdc, VPFE_IRQ_STS);
  1113. if (intr_status & VPFE_VDINT0) {
  1114. field = vpfe->fmt.fmt.pix.field;
  1115. if (field == V4L2_FIELD_NONE) {
  1116. /* handle progressive frame capture */
  1117. if (vpfe->cur_frm != vpfe->next_frm)
  1118. vpfe_process_buffer_complete(vpfe);
  1119. goto next_intr;
  1120. }
  1121. /* interlaced or TB capture check which field
  1122. we are in hardware */
  1123. fid = vpfe_ccdc_getfid(&vpfe->ccdc);
  1124. /* switch the software maintained field id */
  1125. vpfe->field ^= 1;
  1126. if (fid == vpfe->field) {
  1127. /* we are in-sync here,continue */
  1128. if (fid == 0) {
  1129. /*
  1130. * One frame is just being captured. If the
  1131. * next frame is available, release the
  1132. * current frame and move on
  1133. */
  1134. if (vpfe->cur_frm != vpfe->next_frm)
  1135. vpfe_process_buffer_complete(vpfe);
  1136. /*
  1137. * based on whether the two fields are stored
  1138. * interleave or separately in memory,
  1139. * reconfigure the CCDC memory address
  1140. */
  1141. if (field == V4L2_FIELD_SEQ_TB)
  1142. vpfe_schedule_bottom_field(vpfe);
  1143. goto next_intr;
  1144. }
  1145. /*
  1146. * if one field is just being captured configure
  1147. * the next frame get the next frame from the empty
  1148. * queue if no frame is available hold on to the
  1149. * current buffer
  1150. */
  1151. spin_lock(&vpfe->dma_queue_lock);
  1152. if (!list_empty(&vpfe->dma_queue) &&
  1153. vpfe->cur_frm == vpfe->next_frm)
  1154. vpfe_schedule_next_buffer(vpfe);
  1155. spin_unlock(&vpfe->dma_queue_lock);
  1156. } else if (fid == 0) {
  1157. /*
  1158. * out of sync. Recover from any hardware out-of-sync.
  1159. * May loose one frame
  1160. */
  1161. vpfe->field = fid;
  1162. }
  1163. }
  1164. next_intr:
  1165. if (intr_status & VPFE_VDINT1) {
  1166. spin_lock(&vpfe->dma_queue_lock);
  1167. if (vpfe->fmt.fmt.pix.field == V4L2_FIELD_NONE &&
  1168. !list_empty(&vpfe->dma_queue) &&
  1169. vpfe->cur_frm == vpfe->next_frm)
  1170. vpfe_schedule_next_buffer(vpfe);
  1171. spin_unlock(&vpfe->dma_queue_lock);
  1172. }
  1173. vpfe_clear_intr(&vpfe->ccdc, intr_status);
  1174. return IRQ_HANDLED;
  1175. }
  1176. static inline void vpfe_detach_irq(struct vpfe_device *vpfe)
  1177. {
  1178. unsigned int intr = VPFE_VDINT0;
  1179. enum ccdc_frmfmt frame_format;
  1180. frame_format = vpfe_ccdc_get_frame_format(&vpfe->ccdc);
  1181. if (frame_format == CCDC_FRMFMT_PROGRESSIVE)
  1182. intr |= VPFE_VDINT1;
  1183. vpfe_reg_write(&vpfe->ccdc, intr, VPFE_IRQ_EN_CLR);
  1184. }
  1185. static inline void vpfe_attach_irq(struct vpfe_device *vpfe)
  1186. {
  1187. unsigned int intr = VPFE_VDINT0;
  1188. enum ccdc_frmfmt frame_format;
  1189. frame_format = vpfe_ccdc_get_frame_format(&vpfe->ccdc);
  1190. if (frame_format == CCDC_FRMFMT_PROGRESSIVE)
  1191. intr |= VPFE_VDINT1;
  1192. vpfe_reg_write(&vpfe->ccdc, intr, VPFE_IRQ_EN_SET);
  1193. }
  1194. static int vpfe_querycap(struct file *file, void *priv,
  1195. struct v4l2_capability *cap)
  1196. {
  1197. struct vpfe_device *vpfe = video_drvdata(file);
  1198. vpfe_dbg(2, vpfe, "vpfe_querycap\n");
  1199. strscpy(cap->driver, VPFE_MODULE_NAME, sizeof(cap->driver));
  1200. strscpy(cap->card, "TI AM437x VPFE", sizeof(cap->card));
  1201. snprintf(cap->bus_info, sizeof(cap->bus_info),
  1202. "platform:%s", vpfe->v4l2_dev.name);
  1203. cap->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING |
  1204. V4L2_CAP_READWRITE;
  1205. cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
  1206. return 0;
  1207. }
  1208. /* get the format set at output pad of the adjacent subdev */
  1209. static int __vpfe_get_format(struct vpfe_device *vpfe,
  1210. struct v4l2_format *format, unsigned int *bpp)
  1211. {
  1212. struct v4l2_mbus_framefmt mbus_fmt;
  1213. struct vpfe_subdev_info *sdinfo;
  1214. struct v4l2_subdev_format fmt;
  1215. int ret;
  1216. sdinfo = vpfe->current_subdev;
  1217. if (!sdinfo->sd)
  1218. return -EINVAL;
  1219. fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE;
  1220. fmt.pad = 0;
  1221. ret = v4l2_subdev_call(sdinfo->sd, pad, get_fmt, NULL, &fmt);
  1222. if (ret && ret != -ENOIOCTLCMD && ret != -ENODEV)
  1223. return ret;
  1224. if (!ret) {
  1225. v4l2_fill_pix_format(&format->fmt.pix, &fmt.format);
  1226. mbus_to_pix(vpfe, &fmt.format, &format->fmt.pix, bpp);
  1227. } else {
  1228. ret = v4l2_device_call_until_err(&vpfe->v4l2_dev,
  1229. sdinfo->grp_id,
  1230. pad, get_fmt,
  1231. NULL, &fmt);
  1232. if (ret && ret != -ENOIOCTLCMD && ret != -ENODEV)
  1233. return ret;
  1234. v4l2_fill_pix_format(&format->fmt.pix, &mbus_fmt);
  1235. mbus_to_pix(vpfe, &mbus_fmt, &format->fmt.pix, bpp);
  1236. }
  1237. format->type = vpfe->fmt.type;
  1238. vpfe_dbg(1, vpfe,
  1239. "%s size %dx%d (%s) bytesperline = %d, size = %d, bpp = %d\n",
  1240. __func__, format->fmt.pix.width, format->fmt.pix.height,
  1241. print_fourcc(format->fmt.pix.pixelformat),
  1242. format->fmt.pix.bytesperline, format->fmt.pix.sizeimage, *bpp);
  1243. return 0;
  1244. }
  1245. /* set the format at output pad of the adjacent subdev */
  1246. static int __vpfe_set_format(struct vpfe_device *vpfe,
  1247. struct v4l2_format *format, unsigned int *bpp)
  1248. {
  1249. struct vpfe_subdev_info *sdinfo;
  1250. struct v4l2_subdev_format fmt;
  1251. int ret;
  1252. vpfe_dbg(2, vpfe, "__vpfe_set_format\n");
  1253. sdinfo = vpfe->current_subdev;
  1254. if (!sdinfo->sd)
  1255. return -EINVAL;
  1256. fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE;
  1257. fmt.pad = 0;
  1258. pix_to_mbus(vpfe, &format->fmt.pix, &fmt.format);
  1259. ret = v4l2_subdev_call(sdinfo->sd, pad, set_fmt, NULL, &fmt);
  1260. if (ret)
  1261. return ret;
  1262. v4l2_fill_pix_format(&format->fmt.pix, &fmt.format);
  1263. mbus_to_pix(vpfe, &fmt.format, &format->fmt.pix, bpp);
  1264. format->type = vpfe->fmt.type;
  1265. vpfe_dbg(1, vpfe,
  1266. "%s size %dx%d (%s) bytesperline = %d, size = %d, bpp = %d\n",
  1267. __func__, format->fmt.pix.width, format->fmt.pix.height,
  1268. print_fourcc(format->fmt.pix.pixelformat),
  1269. format->fmt.pix.bytesperline, format->fmt.pix.sizeimage, *bpp);
  1270. return 0;
  1271. }
  1272. static int vpfe_g_fmt(struct file *file, void *priv,
  1273. struct v4l2_format *fmt)
  1274. {
  1275. struct vpfe_device *vpfe = video_drvdata(file);
  1276. vpfe_dbg(2, vpfe, "vpfe_g_fmt\n");
  1277. *fmt = vpfe->fmt;
  1278. return 0;
  1279. }
  1280. static int vpfe_enum_fmt(struct file *file, void *priv,
  1281. struct v4l2_fmtdesc *f)
  1282. {
  1283. struct vpfe_device *vpfe = video_drvdata(file);
  1284. struct vpfe_subdev_info *sdinfo;
  1285. struct vpfe_fmt *fmt = NULL;
  1286. unsigned int k;
  1287. vpfe_dbg(2, vpfe, "vpfe_enum_format index:%d\n",
  1288. f->index);
  1289. sdinfo = vpfe->current_subdev;
  1290. if (!sdinfo->sd)
  1291. return -EINVAL;
  1292. if (f->index > ARRAY_SIZE(formats))
  1293. return -EINVAL;
  1294. for (k = 0; k < ARRAY_SIZE(formats); k++) {
  1295. if (formats[k].index == f->index) {
  1296. fmt = &formats[k];
  1297. break;
  1298. }
  1299. }
  1300. if (!fmt)
  1301. return -EINVAL;
  1302. strncpy(f->description, fmt->name, sizeof(f->description) - 1);
  1303. f->pixelformat = fmt->fourcc;
  1304. f->type = vpfe->fmt.type;
  1305. vpfe_dbg(1, vpfe, "vpfe_enum_format: mbus index: %d code: %x pixelformat: %s [%s]\n",
  1306. f->index, fmt->code, print_fourcc(fmt->fourcc), fmt->name);
  1307. return 0;
  1308. }
  1309. static int vpfe_try_fmt(struct file *file, void *priv,
  1310. struct v4l2_format *fmt)
  1311. {
  1312. struct vpfe_device *vpfe = video_drvdata(file);
  1313. unsigned int bpp;
  1314. vpfe_dbg(2, vpfe, "vpfe_try_fmt\n");
  1315. return __vpfe_get_format(vpfe, fmt, &bpp);
  1316. }
  1317. static int vpfe_s_fmt(struct file *file, void *priv,
  1318. struct v4l2_format *fmt)
  1319. {
  1320. struct vpfe_device *vpfe = video_drvdata(file);
  1321. struct v4l2_format format;
  1322. unsigned int bpp;
  1323. int ret;
  1324. vpfe_dbg(2, vpfe, "vpfe_s_fmt\n");
  1325. /* If streaming is started, return error */
  1326. if (vb2_is_busy(&vpfe->buffer_queue)) {
  1327. vpfe_err(vpfe, "%s device busy\n", __func__);
  1328. return -EBUSY;
  1329. }
  1330. ret = __vpfe_get_format(vpfe, &format, &bpp);
  1331. if (ret)
  1332. return ret;
  1333. if (!cmp_v4l2_format(fmt, &format)) {
  1334. /* Sensor format is different from the requested format
  1335. * so we need to change it
  1336. */
  1337. ret = __vpfe_set_format(vpfe, fmt, &bpp);
  1338. if (ret)
  1339. return ret;
  1340. } else /* Just make sure all of the fields are consistent */
  1341. *fmt = format;
  1342. /* First detach any IRQ if currently attached */
  1343. vpfe_detach_irq(vpfe);
  1344. vpfe->fmt = *fmt;
  1345. vpfe->bpp = bpp;
  1346. /* Update the crop window based on found values */
  1347. vpfe->crop.width = fmt->fmt.pix.width;
  1348. vpfe->crop.height = fmt->fmt.pix.height;
  1349. /* set image capture parameters in the ccdc */
  1350. return vpfe_config_ccdc_image_format(vpfe);
  1351. }
  1352. static int vpfe_enum_size(struct file *file, void *priv,
  1353. struct v4l2_frmsizeenum *fsize)
  1354. {
  1355. struct vpfe_device *vpfe = video_drvdata(file);
  1356. struct v4l2_subdev_frame_size_enum fse;
  1357. struct vpfe_subdev_info *sdinfo;
  1358. struct v4l2_mbus_framefmt mbus;
  1359. struct v4l2_pix_format pix;
  1360. struct vpfe_fmt *fmt;
  1361. int ret;
  1362. vpfe_dbg(2, vpfe, "vpfe_enum_size\n");
  1363. /* check for valid format */
  1364. fmt = find_format_by_pix(fsize->pixel_format);
  1365. if (!fmt) {
  1366. vpfe_dbg(3, vpfe, "Invalid pixel code: %x, default used instead\n",
  1367. fsize->pixel_format);
  1368. return -EINVAL;
  1369. }
  1370. memset(fsize->reserved, 0x0, sizeof(fsize->reserved));
  1371. sdinfo = vpfe->current_subdev;
  1372. if (!sdinfo->sd)
  1373. return -EINVAL;
  1374. memset(&pix, 0x0, sizeof(pix));
  1375. /* Construct pix from parameter and use default for the rest */
  1376. pix.pixelformat = fsize->pixel_format;
  1377. pix.width = 640;
  1378. pix.height = 480;
  1379. pix.colorspace = V4L2_COLORSPACE_SRGB;
  1380. pix.field = V4L2_FIELD_NONE;
  1381. pix_to_mbus(vpfe, &pix, &mbus);
  1382. memset(&fse, 0x0, sizeof(fse));
  1383. fse.index = fsize->index;
  1384. fse.pad = 0;
  1385. fse.code = mbus.code;
  1386. fse.which = V4L2_SUBDEV_FORMAT_ACTIVE;
  1387. ret = v4l2_subdev_call(sdinfo->sd, pad, enum_frame_size, NULL, &fse);
  1388. if (ret)
  1389. return -EINVAL;
  1390. vpfe_dbg(1, vpfe, "vpfe_enum_size: index: %d code: %x W:[%d,%d] H:[%d,%d]\n",
  1391. fse.index, fse.code, fse.min_width, fse.max_width,
  1392. fse.min_height, fse.max_height);
  1393. fsize->type = V4L2_FRMSIZE_TYPE_DISCRETE;
  1394. fsize->discrete.width = fse.max_width;
  1395. fsize->discrete.height = fse.max_height;
  1396. vpfe_dbg(1, vpfe, "vpfe_enum_size: index: %d pixformat: %s size: %dx%d\n",
  1397. fsize->index, print_fourcc(fsize->pixel_format),
  1398. fsize->discrete.width, fsize->discrete.height);
  1399. return 0;
  1400. }
  1401. /*
  1402. * vpfe_get_subdev_input_index - Get subdev index and subdev input index for a
  1403. * given app input index
  1404. */
  1405. static int
  1406. vpfe_get_subdev_input_index(struct vpfe_device *vpfe,
  1407. int *subdev_index,
  1408. int *subdev_input_index,
  1409. int app_input_index)
  1410. {
  1411. int i, j = 0;
  1412. for (i = 0; i < ARRAY_SIZE(vpfe->cfg->asd); i++) {
  1413. if (app_input_index < (j + 1)) {
  1414. *subdev_index = i;
  1415. *subdev_input_index = app_input_index - j;
  1416. return 0;
  1417. }
  1418. j++;
  1419. }
  1420. return -EINVAL;
  1421. }
  1422. /*
  1423. * vpfe_get_app_input - Get app input index for a given subdev input index
  1424. * driver stores the input index of the current sub device and translate it
  1425. * when application request the current input
  1426. */
  1427. static int vpfe_get_app_input_index(struct vpfe_device *vpfe,
  1428. int *app_input_index)
  1429. {
  1430. struct vpfe_config *cfg = vpfe->cfg;
  1431. struct vpfe_subdev_info *sdinfo;
  1432. struct i2c_client *client;
  1433. struct i2c_client *curr_client;
  1434. int i, j = 0;
  1435. curr_client = v4l2_get_subdevdata(vpfe->current_subdev->sd);
  1436. for (i = 0; i < ARRAY_SIZE(vpfe->cfg->asd); i++) {
  1437. sdinfo = &cfg->sub_devs[i];
  1438. client = v4l2_get_subdevdata(sdinfo->sd);
  1439. if (client->addr == curr_client->addr &&
  1440. client->adapter->nr == curr_client->adapter->nr) {
  1441. if (vpfe->current_input >= 1)
  1442. return -1;
  1443. *app_input_index = j + vpfe->current_input;
  1444. return 0;
  1445. }
  1446. j++;
  1447. }
  1448. return -EINVAL;
  1449. }
  1450. static int vpfe_enum_input(struct file *file, void *priv,
  1451. struct v4l2_input *inp)
  1452. {
  1453. struct vpfe_device *vpfe = video_drvdata(file);
  1454. struct vpfe_subdev_info *sdinfo;
  1455. int subdev, index;
  1456. vpfe_dbg(2, vpfe, "vpfe_enum_input\n");
  1457. if (vpfe_get_subdev_input_index(vpfe, &subdev, &index,
  1458. inp->index) < 0) {
  1459. vpfe_dbg(1, vpfe,
  1460. "input information not found for the subdev\n");
  1461. return -EINVAL;
  1462. }
  1463. sdinfo = &vpfe->cfg->sub_devs[subdev];
  1464. *inp = sdinfo->inputs[index];
  1465. return 0;
  1466. }
  1467. static int vpfe_g_input(struct file *file, void *priv, unsigned int *index)
  1468. {
  1469. struct vpfe_device *vpfe = video_drvdata(file);
  1470. vpfe_dbg(2, vpfe, "vpfe_g_input\n");
  1471. return vpfe_get_app_input_index(vpfe, index);
  1472. }
  1473. /* Assumes caller is holding vpfe_dev->lock */
  1474. static int vpfe_set_input(struct vpfe_device *vpfe, unsigned int index)
  1475. {
  1476. int subdev_index = 0, inp_index = 0;
  1477. struct vpfe_subdev_info *sdinfo;
  1478. struct vpfe_route *route;
  1479. u32 input, output;
  1480. int ret;
  1481. vpfe_dbg(2, vpfe, "vpfe_set_input: index: %d\n", index);
  1482. /* If streaming is started, return error */
  1483. if (vb2_is_busy(&vpfe->buffer_queue)) {
  1484. vpfe_err(vpfe, "%s device busy\n", __func__);
  1485. return -EBUSY;
  1486. }
  1487. ret = vpfe_get_subdev_input_index(vpfe,
  1488. &subdev_index,
  1489. &inp_index,
  1490. index);
  1491. if (ret < 0) {
  1492. vpfe_err(vpfe, "invalid input index: %d\n", index);
  1493. goto get_out;
  1494. }
  1495. sdinfo = &vpfe->cfg->sub_devs[subdev_index];
  1496. sdinfo->sd = vpfe->sd[subdev_index];
  1497. route = &sdinfo->routes[inp_index];
  1498. if (route && sdinfo->can_route) {
  1499. input = route->input;
  1500. output = route->output;
  1501. if (sdinfo->sd) {
  1502. ret = v4l2_subdev_call(sdinfo->sd, video,
  1503. s_routing, input, output, 0);
  1504. if (ret) {
  1505. vpfe_err(vpfe, "s_routing failed\n");
  1506. ret = -EINVAL;
  1507. goto get_out;
  1508. }
  1509. }
  1510. }
  1511. vpfe->current_subdev = sdinfo;
  1512. if (sdinfo->sd)
  1513. vpfe->v4l2_dev.ctrl_handler = sdinfo->sd->ctrl_handler;
  1514. vpfe->current_input = index;
  1515. vpfe->std_index = 0;
  1516. /* set the bus/interface parameter for the sub device in ccdc */
  1517. ret = vpfe_ccdc_set_hw_if_params(&vpfe->ccdc, &sdinfo->vpfe_param);
  1518. if (ret)
  1519. return ret;
  1520. /* set the default image parameters in the device */
  1521. return vpfe_config_image_format(vpfe,
  1522. vpfe_standards[vpfe->std_index].std_id);
  1523. get_out:
  1524. return ret;
  1525. }
  1526. static int vpfe_s_input(struct file *file, void *priv, unsigned int index)
  1527. {
  1528. struct vpfe_device *vpfe = video_drvdata(file);
  1529. vpfe_dbg(2, vpfe,
  1530. "vpfe_s_input: index: %d\n", index);
  1531. return vpfe_set_input(vpfe, index);
  1532. }
  1533. static int vpfe_querystd(struct file *file, void *priv, v4l2_std_id *std_id)
  1534. {
  1535. struct vpfe_device *vpfe = video_drvdata(file);
  1536. struct vpfe_subdev_info *sdinfo;
  1537. vpfe_dbg(2, vpfe, "vpfe_querystd\n");
  1538. sdinfo = vpfe->current_subdev;
  1539. if (!(sdinfo->inputs[0].capabilities & V4L2_IN_CAP_STD))
  1540. return -ENODATA;
  1541. /* Call querystd function of decoder device */
  1542. return v4l2_device_call_until_err(&vpfe->v4l2_dev, sdinfo->grp_id,
  1543. video, querystd, std_id);
  1544. }
  1545. static int vpfe_s_std(struct file *file, void *priv, v4l2_std_id std_id)
  1546. {
  1547. struct vpfe_device *vpfe = video_drvdata(file);
  1548. struct vpfe_subdev_info *sdinfo;
  1549. int ret;
  1550. vpfe_dbg(2, vpfe, "vpfe_s_std\n");
  1551. sdinfo = vpfe->current_subdev;
  1552. if (!(sdinfo->inputs[0].capabilities & V4L2_IN_CAP_STD))
  1553. return -ENODATA;
  1554. /* If streaming is started, return error */
  1555. if (vb2_is_busy(&vpfe->buffer_queue)) {
  1556. vpfe_err(vpfe, "%s device busy\n", __func__);
  1557. ret = -EBUSY;
  1558. return ret;
  1559. }
  1560. ret = v4l2_device_call_until_err(&vpfe->v4l2_dev, sdinfo->grp_id,
  1561. video, s_std, std_id);
  1562. if (ret < 0) {
  1563. vpfe_err(vpfe, "Failed to set standard\n");
  1564. return ret;
  1565. }
  1566. ret = vpfe_config_image_format(vpfe, std_id);
  1567. return ret;
  1568. }
  1569. static int vpfe_g_std(struct file *file, void *priv, v4l2_std_id *std_id)
  1570. {
  1571. struct vpfe_device *vpfe = video_drvdata(file);
  1572. struct vpfe_subdev_info *sdinfo;
  1573. vpfe_dbg(2, vpfe, "vpfe_g_std\n");
  1574. sdinfo = vpfe->current_subdev;
  1575. if (sdinfo->inputs[0].capabilities != V4L2_IN_CAP_STD)
  1576. return -ENODATA;
  1577. *std_id = vpfe_standards[vpfe->std_index].std_id;
  1578. return 0;
  1579. }
  1580. /*
  1581. * vpfe_calculate_offsets : This function calculates buffers offset
  1582. * for top and bottom field
  1583. */
  1584. static void vpfe_calculate_offsets(struct vpfe_device *vpfe)
  1585. {
  1586. struct v4l2_rect image_win;
  1587. vpfe_dbg(2, vpfe, "vpfe_calculate_offsets\n");
  1588. vpfe_ccdc_get_image_window(&vpfe->ccdc, &image_win);
  1589. vpfe->field_off = image_win.height * image_win.width;
  1590. }
  1591. /*
  1592. * vpfe_queue_setup - Callback function for buffer setup.
  1593. * @vq: vb2_queue ptr
  1594. * @nbuffers: ptr to number of buffers requested by application
  1595. * @nplanes:: contains number of distinct video planes needed to hold a frame
  1596. * @sizes[]: contains the size (in bytes) of each plane.
  1597. * @alloc_devs: ptr to allocation context
  1598. *
  1599. * This callback function is called when reqbuf() is called to adjust
  1600. * the buffer count and buffer size
  1601. */
  1602. static int vpfe_queue_setup(struct vb2_queue *vq,
  1603. unsigned int *nbuffers, unsigned int *nplanes,
  1604. unsigned int sizes[], struct device *alloc_devs[])
  1605. {
  1606. struct vpfe_device *vpfe = vb2_get_drv_priv(vq);
  1607. unsigned size = vpfe->fmt.fmt.pix.sizeimage;
  1608. if (vq->num_buffers + *nbuffers < 3)
  1609. *nbuffers = 3 - vq->num_buffers;
  1610. if (*nplanes) {
  1611. if (sizes[0] < size)
  1612. return -EINVAL;
  1613. size = sizes[0];
  1614. }
  1615. *nplanes = 1;
  1616. sizes[0] = size;
  1617. vpfe_dbg(1, vpfe,
  1618. "nbuffers=%d, size=%u\n", *nbuffers, sizes[0]);
  1619. /* Calculate field offset */
  1620. vpfe_calculate_offsets(vpfe);
  1621. return 0;
  1622. }
  1623. /*
  1624. * vpfe_buffer_prepare : callback function for buffer prepare
  1625. * @vb: ptr to vb2_buffer
  1626. *
  1627. * This is the callback function for buffer prepare when vb2_qbuf()
  1628. * function is called. The buffer is prepared and user space virtual address
  1629. * or user address is converted into physical address
  1630. */
  1631. static int vpfe_buffer_prepare(struct vb2_buffer *vb)
  1632. {
  1633. struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
  1634. struct vpfe_device *vpfe = vb2_get_drv_priv(vb->vb2_queue);
  1635. vb2_set_plane_payload(vb, 0, vpfe->fmt.fmt.pix.sizeimage);
  1636. if (vb2_get_plane_payload(vb, 0) > vb2_plane_size(vb, 0))
  1637. return -EINVAL;
  1638. vbuf->field = vpfe->fmt.fmt.pix.field;
  1639. return 0;
  1640. }
  1641. /*
  1642. * vpfe_buffer_queue : Callback function to add buffer to DMA queue
  1643. * @vb: ptr to vb2_buffer
  1644. */
  1645. static void vpfe_buffer_queue(struct vb2_buffer *vb)
  1646. {
  1647. struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
  1648. struct vpfe_device *vpfe = vb2_get_drv_priv(vb->vb2_queue);
  1649. struct vpfe_cap_buffer *buf = to_vpfe_buffer(vbuf);
  1650. unsigned long flags = 0;
  1651. /* add the buffer to the DMA queue */
  1652. spin_lock_irqsave(&vpfe->dma_queue_lock, flags);
  1653. list_add_tail(&buf->list, &vpfe->dma_queue);
  1654. spin_unlock_irqrestore(&vpfe->dma_queue_lock, flags);
  1655. }
  1656. /*
  1657. * vpfe_start_streaming : Starts the DMA engine for streaming
  1658. * @vb: ptr to vb2_buffer
  1659. * @count: number of buffers
  1660. */
  1661. static int vpfe_start_streaming(struct vb2_queue *vq, unsigned int count)
  1662. {
  1663. struct vpfe_device *vpfe = vb2_get_drv_priv(vq);
  1664. struct vpfe_cap_buffer *buf, *tmp;
  1665. struct vpfe_subdev_info *sdinfo;
  1666. unsigned long flags;
  1667. unsigned long addr;
  1668. int ret;
  1669. spin_lock_irqsave(&vpfe->dma_queue_lock, flags);
  1670. vpfe->field = 0;
  1671. vpfe->sequence = 0;
  1672. sdinfo = vpfe->current_subdev;
  1673. vpfe_attach_irq(vpfe);
  1674. if (vpfe->ccdc.ccdc_cfg.if_type == VPFE_RAW_BAYER)
  1675. vpfe_ccdc_config_raw(&vpfe->ccdc);
  1676. else
  1677. vpfe_ccdc_config_ycbcr(&vpfe->ccdc);
  1678. /* Get the next frame from the buffer queue */
  1679. vpfe->next_frm = list_entry(vpfe->dma_queue.next,
  1680. struct vpfe_cap_buffer, list);
  1681. vpfe->cur_frm = vpfe->next_frm;
  1682. /* Remove buffer from the buffer queue */
  1683. list_del(&vpfe->cur_frm->list);
  1684. spin_unlock_irqrestore(&vpfe->dma_queue_lock, flags);
  1685. addr = vb2_dma_contig_plane_dma_addr(&vpfe->cur_frm->vb.vb2_buf, 0);
  1686. vpfe_set_sdr_addr(&vpfe->ccdc, (unsigned long)(addr));
  1687. vpfe_pcr_enable(&vpfe->ccdc, 1);
  1688. ret = v4l2_subdev_call(sdinfo->sd, video, s_stream, 1);
  1689. if (ret < 0) {
  1690. vpfe_err(vpfe, "Error in attaching interrupt handle\n");
  1691. goto err;
  1692. }
  1693. return 0;
  1694. err:
  1695. list_for_each_entry_safe(buf, tmp, &vpfe->dma_queue, list) {
  1696. list_del(&buf->list);
  1697. vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_QUEUED);
  1698. }
  1699. return ret;
  1700. }
  1701. /*
  1702. * vpfe_stop_streaming : Stop the DMA engine
  1703. * @vq: ptr to vb2_queue
  1704. *
  1705. * This callback stops the DMA engine and any remaining buffers
  1706. * in the DMA queue are released.
  1707. */
  1708. static void vpfe_stop_streaming(struct vb2_queue *vq)
  1709. {
  1710. struct vpfe_device *vpfe = vb2_get_drv_priv(vq);
  1711. struct vpfe_subdev_info *sdinfo;
  1712. unsigned long flags;
  1713. int ret;
  1714. vpfe_pcr_enable(&vpfe->ccdc, 0);
  1715. vpfe_detach_irq(vpfe);
  1716. sdinfo = vpfe->current_subdev;
  1717. ret = v4l2_subdev_call(sdinfo->sd, video, s_stream, 0);
  1718. if (ret && ret != -ENOIOCTLCMD && ret != -ENODEV)
  1719. vpfe_dbg(1, vpfe, "stream off failed in subdev\n");
  1720. /* release all active buffers */
  1721. spin_lock_irqsave(&vpfe->dma_queue_lock, flags);
  1722. if (vpfe->cur_frm == vpfe->next_frm) {
  1723. vb2_buffer_done(&vpfe->cur_frm->vb.vb2_buf,
  1724. VB2_BUF_STATE_ERROR);
  1725. } else {
  1726. if (vpfe->cur_frm != NULL)
  1727. vb2_buffer_done(&vpfe->cur_frm->vb.vb2_buf,
  1728. VB2_BUF_STATE_ERROR);
  1729. if (vpfe->next_frm != NULL)
  1730. vb2_buffer_done(&vpfe->next_frm->vb.vb2_buf,
  1731. VB2_BUF_STATE_ERROR);
  1732. }
  1733. while (!list_empty(&vpfe->dma_queue)) {
  1734. vpfe->next_frm = list_entry(vpfe->dma_queue.next,
  1735. struct vpfe_cap_buffer, list);
  1736. list_del(&vpfe->next_frm->list);
  1737. vb2_buffer_done(&vpfe->next_frm->vb.vb2_buf,
  1738. VB2_BUF_STATE_ERROR);
  1739. }
  1740. spin_unlock_irqrestore(&vpfe->dma_queue_lock, flags);
  1741. }
  1742. static int vpfe_cropcap(struct file *file, void *priv,
  1743. struct v4l2_cropcap *crop)
  1744. {
  1745. struct vpfe_device *vpfe = video_drvdata(file);
  1746. vpfe_dbg(2, vpfe, "vpfe_cropcap\n");
  1747. if (vpfe->std_index >= ARRAY_SIZE(vpfe_standards))
  1748. return -EINVAL;
  1749. memset(crop, 0, sizeof(struct v4l2_cropcap));
  1750. crop->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
  1751. crop->defrect.width = vpfe_standards[vpfe->std_index].width;
  1752. crop->bounds.width = crop->defrect.width;
  1753. crop->defrect.height = vpfe_standards[vpfe->std_index].height;
  1754. crop->bounds.height = crop->defrect.height;
  1755. crop->pixelaspect = vpfe_standards[vpfe->std_index].pixelaspect;
  1756. return 0;
  1757. }
  1758. static int
  1759. vpfe_g_selection(struct file *file, void *fh, struct v4l2_selection *s)
  1760. {
  1761. struct vpfe_device *vpfe = video_drvdata(file);
  1762. switch (s->target) {
  1763. case V4L2_SEL_TGT_CROP_BOUNDS:
  1764. case V4L2_SEL_TGT_CROP_DEFAULT:
  1765. s->r.left = s->r.top = 0;
  1766. s->r.width = vpfe->crop.width;
  1767. s->r.height = vpfe->crop.height;
  1768. break;
  1769. case V4L2_SEL_TGT_CROP:
  1770. s->r = vpfe->crop;
  1771. break;
  1772. default:
  1773. return -EINVAL;
  1774. }
  1775. return 0;
  1776. }
  1777. static int enclosed_rectangle(struct v4l2_rect *a, struct v4l2_rect *b)
  1778. {
  1779. if (a->left < b->left || a->top < b->top)
  1780. return 0;
  1781. if (a->left + a->width > b->left + b->width)
  1782. return 0;
  1783. if (a->top + a->height > b->top + b->height)
  1784. return 0;
  1785. return 1;
  1786. }
  1787. static int
  1788. vpfe_s_selection(struct file *file, void *fh, struct v4l2_selection *s)
  1789. {
  1790. struct vpfe_device *vpfe = video_drvdata(file);
  1791. struct v4l2_rect cr = vpfe->crop;
  1792. struct v4l2_rect r = s->r;
  1793. /* If streaming is started, return error */
  1794. if (vb2_is_busy(&vpfe->buffer_queue)) {
  1795. vpfe_err(vpfe, "%s device busy\n", __func__);
  1796. return -EBUSY;
  1797. }
  1798. if (s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE ||
  1799. s->target != V4L2_SEL_TGT_CROP)
  1800. return -EINVAL;
  1801. v4l_bound_align_image(&r.width, 0, cr.width, 0,
  1802. &r.height, 0, cr.height, 0, 0);
  1803. r.left = clamp_t(unsigned int, r.left, 0, cr.width - r.width);
  1804. r.top = clamp_t(unsigned int, r.top, 0, cr.height - r.height);
  1805. if (s->flags & V4L2_SEL_FLAG_LE && !enclosed_rectangle(&r, &s->r))
  1806. return -ERANGE;
  1807. if (s->flags & V4L2_SEL_FLAG_GE && !enclosed_rectangle(&s->r, &r))
  1808. return -ERANGE;
  1809. s->r = vpfe->crop = r;
  1810. vpfe_ccdc_set_image_window(&vpfe->ccdc, &r, vpfe->bpp);
  1811. vpfe->fmt.fmt.pix.width = r.width;
  1812. vpfe->fmt.fmt.pix.height = r.height;
  1813. vpfe->fmt.fmt.pix.bytesperline = vpfe_ccdc_get_line_length(&vpfe->ccdc);
  1814. vpfe->fmt.fmt.pix.sizeimage = vpfe->fmt.fmt.pix.bytesperline *
  1815. vpfe->fmt.fmt.pix.height;
  1816. vpfe_dbg(1, vpfe, "cropped (%d,%d)/%dx%d of %dx%d\n",
  1817. r.left, r.top, r.width, r.height, cr.width, cr.height);
  1818. return 0;
  1819. }
  1820. static long vpfe_ioctl_default(struct file *file, void *priv,
  1821. bool valid_prio, unsigned int cmd, void *param)
  1822. {
  1823. struct vpfe_device *vpfe = video_drvdata(file);
  1824. int ret;
  1825. vpfe_dbg(2, vpfe, "vpfe_ioctl_default\n");
  1826. if (!valid_prio) {
  1827. vpfe_err(vpfe, "%s device busy\n", __func__);
  1828. return -EBUSY;
  1829. }
  1830. /* If streaming is started, return error */
  1831. if (vb2_is_busy(&vpfe->buffer_queue)) {
  1832. vpfe_err(vpfe, "%s device busy\n", __func__);
  1833. return -EBUSY;
  1834. }
  1835. switch (cmd) {
  1836. case VIDIOC_AM437X_CCDC_CFG:
  1837. ret = vpfe_ccdc_set_params(&vpfe->ccdc, (void __user *)param);
  1838. if (ret) {
  1839. vpfe_dbg(2, vpfe,
  1840. "Error setting parameters in CCDC\n");
  1841. return ret;
  1842. }
  1843. ret = vpfe_get_ccdc_image_format(vpfe,
  1844. &vpfe->fmt);
  1845. if (ret < 0) {
  1846. vpfe_dbg(2, vpfe,
  1847. "Invalid image format at CCDC\n");
  1848. return ret;
  1849. }
  1850. break;
  1851. default:
  1852. ret = -ENOTTY;
  1853. break;
  1854. }
  1855. return ret;
  1856. }
  1857. static const struct vb2_ops vpfe_video_qops = {
  1858. .wait_prepare = vb2_ops_wait_prepare,
  1859. .wait_finish = vb2_ops_wait_finish,
  1860. .queue_setup = vpfe_queue_setup,
  1861. .buf_prepare = vpfe_buffer_prepare,
  1862. .buf_queue = vpfe_buffer_queue,
  1863. .start_streaming = vpfe_start_streaming,
  1864. .stop_streaming = vpfe_stop_streaming,
  1865. };
  1866. /* vpfe capture driver file operations */
  1867. static const struct v4l2_file_operations vpfe_fops = {
  1868. .owner = THIS_MODULE,
  1869. .open = vpfe_open,
  1870. .release = vpfe_release,
  1871. .read = vb2_fop_read,
  1872. .poll = vb2_fop_poll,
  1873. .unlocked_ioctl = video_ioctl2,
  1874. .mmap = vb2_fop_mmap,
  1875. };
  1876. /* vpfe capture ioctl operations */
  1877. static const struct v4l2_ioctl_ops vpfe_ioctl_ops = {
  1878. .vidioc_querycap = vpfe_querycap,
  1879. .vidioc_enum_fmt_vid_cap = vpfe_enum_fmt,
  1880. .vidioc_g_fmt_vid_cap = vpfe_g_fmt,
  1881. .vidioc_s_fmt_vid_cap = vpfe_s_fmt,
  1882. .vidioc_try_fmt_vid_cap = vpfe_try_fmt,
  1883. .vidioc_enum_framesizes = vpfe_enum_size,
  1884. .vidioc_enum_input = vpfe_enum_input,
  1885. .vidioc_g_input = vpfe_g_input,
  1886. .vidioc_s_input = vpfe_s_input,
  1887. .vidioc_querystd = vpfe_querystd,
  1888. .vidioc_s_std = vpfe_s_std,
  1889. .vidioc_g_std = vpfe_g_std,
  1890. .vidioc_reqbufs = vb2_ioctl_reqbufs,
  1891. .vidioc_create_bufs = vb2_ioctl_create_bufs,
  1892. .vidioc_prepare_buf = vb2_ioctl_prepare_buf,
  1893. .vidioc_querybuf = vb2_ioctl_querybuf,
  1894. .vidioc_qbuf = vb2_ioctl_qbuf,
  1895. .vidioc_dqbuf = vb2_ioctl_dqbuf,
  1896. .vidioc_expbuf = vb2_ioctl_expbuf,
  1897. .vidioc_streamon = vb2_ioctl_streamon,
  1898. .vidioc_streamoff = vb2_ioctl_streamoff,
  1899. .vidioc_log_status = v4l2_ctrl_log_status,
  1900. .vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
  1901. .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
  1902. .vidioc_cropcap = vpfe_cropcap,
  1903. .vidioc_g_selection = vpfe_g_selection,
  1904. .vidioc_s_selection = vpfe_s_selection,
  1905. .vidioc_default = vpfe_ioctl_default,
  1906. };
  1907. static int
  1908. vpfe_async_bound(struct v4l2_async_notifier *notifier,
  1909. struct v4l2_subdev *subdev,
  1910. struct v4l2_async_subdev *asd)
  1911. {
  1912. struct vpfe_device *vpfe = container_of(notifier->v4l2_dev,
  1913. struct vpfe_device, v4l2_dev);
  1914. struct v4l2_subdev_mbus_code_enum mbus_code;
  1915. struct vpfe_subdev_info *sdinfo;
  1916. bool found = false;
  1917. int i, j;
  1918. vpfe_dbg(1, vpfe, "vpfe_async_bound\n");
  1919. for (i = 0; i < ARRAY_SIZE(vpfe->cfg->asd); i++) {
  1920. if (vpfe->cfg->asd[i]->match.fwnode ==
  1921. asd[i].match.fwnode) {
  1922. sdinfo = &vpfe->cfg->sub_devs[i];
  1923. vpfe->sd[i] = subdev;
  1924. vpfe->sd[i]->grp_id = sdinfo->grp_id;
  1925. found = true;
  1926. break;
  1927. }
  1928. }
  1929. if (!found) {
  1930. vpfe_info(vpfe, "sub device (%s) not matched\n", subdev->name);
  1931. return -EINVAL;
  1932. }
  1933. vpfe->video_dev.tvnorms |= sdinfo->inputs[0].std;
  1934. /* setup the supported formats & indexes */
  1935. for (j = 0, i = 0; ; ++j) {
  1936. struct vpfe_fmt *fmt;
  1937. int ret;
  1938. memset(&mbus_code, 0, sizeof(mbus_code));
  1939. mbus_code.index = j;
  1940. mbus_code.which = V4L2_SUBDEV_FORMAT_ACTIVE;
  1941. ret = v4l2_subdev_call(subdev, pad, enum_mbus_code,
  1942. NULL, &mbus_code);
  1943. if (ret)
  1944. break;
  1945. fmt = find_format_by_code(mbus_code.code);
  1946. if (!fmt)
  1947. continue;
  1948. fmt->supported = true;
  1949. fmt->index = i++;
  1950. }
  1951. return 0;
  1952. }
  1953. static int vpfe_probe_complete(struct vpfe_device *vpfe)
  1954. {
  1955. struct video_device *vdev;
  1956. struct vb2_queue *q;
  1957. int err;
  1958. spin_lock_init(&vpfe->dma_queue_lock);
  1959. mutex_init(&vpfe->lock);
  1960. vpfe->fmt.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
  1961. /* set first sub device as current one */
  1962. vpfe->current_subdev = &vpfe->cfg->sub_devs[0];
  1963. vpfe->v4l2_dev.ctrl_handler = vpfe->sd[0]->ctrl_handler;
  1964. err = vpfe_set_input(vpfe, 0);
  1965. if (err)
  1966. goto probe_out;
  1967. /* Initialize videobuf2 queue as per the buffer type */
  1968. q = &vpfe->buffer_queue;
  1969. q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
  1970. q->io_modes = VB2_MMAP | VB2_DMABUF | VB2_READ;
  1971. q->drv_priv = vpfe;
  1972. q->ops = &vpfe_video_qops;
  1973. q->mem_ops = &vb2_dma_contig_memops;
  1974. q->buf_struct_size = sizeof(struct vpfe_cap_buffer);
  1975. q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
  1976. q->lock = &vpfe->lock;
  1977. q->min_buffers_needed = 1;
  1978. q->dev = vpfe->pdev;
  1979. err = vb2_queue_init(q);
  1980. if (err) {
  1981. vpfe_err(vpfe, "vb2_queue_init() failed\n");
  1982. goto probe_out;
  1983. }
  1984. INIT_LIST_HEAD(&vpfe->dma_queue);
  1985. vdev = &vpfe->video_dev;
  1986. strscpy(vdev->name, VPFE_MODULE_NAME, sizeof(vdev->name));
  1987. vdev->release = video_device_release_empty;
  1988. vdev->fops = &vpfe_fops;
  1989. vdev->ioctl_ops = &vpfe_ioctl_ops;
  1990. vdev->v4l2_dev = &vpfe->v4l2_dev;
  1991. vdev->vfl_dir = VFL_DIR_RX;
  1992. vdev->queue = q;
  1993. vdev->lock = &vpfe->lock;
  1994. video_set_drvdata(vdev, vpfe);
  1995. err = video_register_device(&vpfe->video_dev, VFL_TYPE_GRABBER, -1);
  1996. if (err) {
  1997. vpfe_err(vpfe,
  1998. "Unable to register video device.\n");
  1999. goto probe_out;
  2000. }
  2001. return 0;
  2002. probe_out:
  2003. v4l2_device_unregister(&vpfe->v4l2_dev);
  2004. return err;
  2005. }
  2006. static int vpfe_async_complete(struct v4l2_async_notifier *notifier)
  2007. {
  2008. struct vpfe_device *vpfe = container_of(notifier->v4l2_dev,
  2009. struct vpfe_device, v4l2_dev);
  2010. return vpfe_probe_complete(vpfe);
  2011. }
  2012. static const struct v4l2_async_notifier_operations vpfe_async_ops = {
  2013. .bound = vpfe_async_bound,
  2014. .complete = vpfe_async_complete,
  2015. };
  2016. static struct vpfe_config *
  2017. vpfe_get_pdata(struct platform_device *pdev)
  2018. {
  2019. struct device_node *endpoint = NULL;
  2020. struct v4l2_fwnode_endpoint bus_cfg;
  2021. struct vpfe_subdev_info *sdinfo;
  2022. struct vpfe_config *pdata;
  2023. unsigned int flags;
  2024. unsigned int i;
  2025. int err;
  2026. dev_dbg(&pdev->dev, "vpfe_get_pdata\n");
  2027. if (!IS_ENABLED(CONFIG_OF) || !pdev->dev.of_node)
  2028. return pdev->dev.platform_data;
  2029. pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
  2030. if (!pdata)
  2031. return NULL;
  2032. for (i = 0; ; i++) {
  2033. struct device_node *rem;
  2034. endpoint = of_graph_get_next_endpoint(pdev->dev.of_node,
  2035. endpoint);
  2036. if (!endpoint)
  2037. break;
  2038. sdinfo = &pdata->sub_devs[i];
  2039. sdinfo->grp_id = 0;
  2040. /* we only support camera */
  2041. sdinfo->inputs[0].index = i;
  2042. strscpy(sdinfo->inputs[0].name, "Camera",
  2043. sizeof(sdinfo->inputs[0].name));
  2044. sdinfo->inputs[0].type = V4L2_INPUT_TYPE_CAMERA;
  2045. sdinfo->inputs[0].std = V4L2_STD_ALL;
  2046. sdinfo->inputs[0].capabilities = V4L2_IN_CAP_STD;
  2047. sdinfo->can_route = 0;
  2048. sdinfo->routes = NULL;
  2049. of_property_read_u32(endpoint, "ti,am437x-vpfe-interface",
  2050. &sdinfo->vpfe_param.if_type);
  2051. if (sdinfo->vpfe_param.if_type < 0 ||
  2052. sdinfo->vpfe_param.if_type > 4) {
  2053. sdinfo->vpfe_param.if_type = VPFE_RAW_BAYER;
  2054. }
  2055. err = v4l2_fwnode_endpoint_parse(of_fwnode_handle(endpoint),
  2056. &bus_cfg);
  2057. if (err) {
  2058. dev_err(&pdev->dev, "Could not parse the endpoint\n");
  2059. goto done;
  2060. }
  2061. sdinfo->vpfe_param.bus_width = bus_cfg.bus.parallel.bus_width;
  2062. if (sdinfo->vpfe_param.bus_width < 8 ||
  2063. sdinfo->vpfe_param.bus_width > 16) {
  2064. dev_err(&pdev->dev, "Invalid bus width.\n");
  2065. goto done;
  2066. }
  2067. flags = bus_cfg.bus.parallel.flags;
  2068. if (flags & V4L2_MBUS_HSYNC_ACTIVE_HIGH)
  2069. sdinfo->vpfe_param.hdpol = 1;
  2070. if (flags & V4L2_MBUS_VSYNC_ACTIVE_HIGH)
  2071. sdinfo->vpfe_param.vdpol = 1;
  2072. rem = of_graph_get_remote_port_parent(endpoint);
  2073. if (!rem) {
  2074. dev_err(&pdev->dev, "Remote device at %pOF not found\n",
  2075. endpoint);
  2076. goto done;
  2077. }
  2078. pdata->asd[i] = devm_kzalloc(&pdev->dev,
  2079. sizeof(struct v4l2_async_subdev),
  2080. GFP_KERNEL);
  2081. if (!pdata->asd[i]) {
  2082. of_node_put(rem);
  2083. pdata = NULL;
  2084. goto done;
  2085. }
  2086. pdata->asd[i]->match_type = V4L2_ASYNC_MATCH_FWNODE;
  2087. pdata->asd[i]->match.fwnode = of_fwnode_handle(rem);
  2088. of_node_put(rem);
  2089. }
  2090. of_node_put(endpoint);
  2091. return pdata;
  2092. done:
  2093. of_node_put(endpoint);
  2094. return NULL;
  2095. }
  2096. /*
  2097. * vpfe_probe : This function creates device entries by register
  2098. * itself to the V4L2 driver and initializes fields of each
  2099. * device objects
  2100. */
  2101. static int vpfe_probe(struct platform_device *pdev)
  2102. {
  2103. struct vpfe_config *vpfe_cfg = vpfe_get_pdata(pdev);
  2104. struct vpfe_device *vpfe;
  2105. struct vpfe_ccdc *ccdc;
  2106. struct resource *res;
  2107. int ret;
  2108. if (!vpfe_cfg) {
  2109. dev_err(&pdev->dev, "No platform data\n");
  2110. return -EINVAL;
  2111. }
  2112. vpfe = devm_kzalloc(&pdev->dev, sizeof(*vpfe), GFP_KERNEL);
  2113. if (!vpfe)
  2114. return -ENOMEM;
  2115. vpfe->pdev = &pdev->dev;
  2116. vpfe->cfg = vpfe_cfg;
  2117. ccdc = &vpfe->ccdc;
  2118. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  2119. ccdc->ccdc_cfg.base_addr = devm_ioremap_resource(&pdev->dev, res);
  2120. if (IS_ERR(ccdc->ccdc_cfg.base_addr))
  2121. return PTR_ERR(ccdc->ccdc_cfg.base_addr);
  2122. ret = platform_get_irq(pdev, 0);
  2123. if (ret <= 0) {
  2124. dev_err(&pdev->dev, "No IRQ resource\n");
  2125. return -ENODEV;
  2126. }
  2127. vpfe->irq = ret;
  2128. ret = devm_request_irq(vpfe->pdev, vpfe->irq, vpfe_isr, 0,
  2129. "vpfe_capture0", vpfe);
  2130. if (ret) {
  2131. dev_err(&pdev->dev, "Unable to request interrupt\n");
  2132. return -EINVAL;
  2133. }
  2134. ret = v4l2_device_register(&pdev->dev, &vpfe->v4l2_dev);
  2135. if (ret) {
  2136. vpfe_err(vpfe,
  2137. "Unable to register v4l2 device.\n");
  2138. return ret;
  2139. }
  2140. /* set the driver data in platform device */
  2141. platform_set_drvdata(pdev, vpfe);
  2142. /* Enabling module functional clock */
  2143. pm_runtime_enable(&pdev->dev);
  2144. /* for now just enable it here instead of waiting for the open */
  2145. pm_runtime_get_sync(&pdev->dev);
  2146. vpfe_ccdc_config_defaults(ccdc);
  2147. pm_runtime_put_sync(&pdev->dev);
  2148. vpfe->sd = devm_kcalloc(&pdev->dev,
  2149. ARRAY_SIZE(vpfe->cfg->asd),
  2150. sizeof(struct v4l2_subdev *),
  2151. GFP_KERNEL);
  2152. if (!vpfe->sd) {
  2153. ret = -ENOMEM;
  2154. goto probe_out_v4l2_unregister;
  2155. }
  2156. vpfe->notifier.subdevs = vpfe->cfg->asd;
  2157. vpfe->notifier.num_subdevs = ARRAY_SIZE(vpfe->cfg->asd);
  2158. vpfe->notifier.ops = &vpfe_async_ops;
  2159. ret = v4l2_async_notifier_register(&vpfe->v4l2_dev,
  2160. &vpfe->notifier);
  2161. if (ret) {
  2162. vpfe_err(vpfe, "Error registering async notifier\n");
  2163. ret = -EINVAL;
  2164. goto probe_out_v4l2_unregister;
  2165. }
  2166. return 0;
  2167. probe_out_v4l2_unregister:
  2168. v4l2_device_unregister(&vpfe->v4l2_dev);
  2169. return ret;
  2170. }
  2171. /*
  2172. * vpfe_remove : It un-register device from V4L2 driver
  2173. */
  2174. static int vpfe_remove(struct platform_device *pdev)
  2175. {
  2176. struct vpfe_device *vpfe = platform_get_drvdata(pdev);
  2177. vpfe_dbg(2, vpfe, "vpfe_remove\n");
  2178. pm_runtime_disable(&pdev->dev);
  2179. v4l2_async_notifier_unregister(&vpfe->notifier);
  2180. v4l2_device_unregister(&vpfe->v4l2_dev);
  2181. video_unregister_device(&vpfe->video_dev);
  2182. return 0;
  2183. }
  2184. #ifdef CONFIG_PM_SLEEP
  2185. static void vpfe_save_context(struct vpfe_ccdc *ccdc)
  2186. {
  2187. ccdc->ccdc_ctx[VPFE_PCR >> 2] = vpfe_reg_read(ccdc, VPFE_PCR);
  2188. ccdc->ccdc_ctx[VPFE_SYNMODE >> 2] = vpfe_reg_read(ccdc, VPFE_SYNMODE);
  2189. ccdc->ccdc_ctx[VPFE_SDOFST >> 2] = vpfe_reg_read(ccdc, VPFE_SDOFST);
  2190. ccdc->ccdc_ctx[VPFE_SDR_ADDR >> 2] = vpfe_reg_read(ccdc, VPFE_SDR_ADDR);
  2191. ccdc->ccdc_ctx[VPFE_CLAMP >> 2] = vpfe_reg_read(ccdc, VPFE_CLAMP);
  2192. ccdc->ccdc_ctx[VPFE_DCSUB >> 2] = vpfe_reg_read(ccdc, VPFE_DCSUB);
  2193. ccdc->ccdc_ctx[VPFE_COLPTN >> 2] = vpfe_reg_read(ccdc, VPFE_COLPTN);
  2194. ccdc->ccdc_ctx[VPFE_BLKCMP >> 2] = vpfe_reg_read(ccdc, VPFE_BLKCMP);
  2195. ccdc->ccdc_ctx[VPFE_VDINT >> 2] = vpfe_reg_read(ccdc, VPFE_VDINT);
  2196. ccdc->ccdc_ctx[VPFE_ALAW >> 2] = vpfe_reg_read(ccdc, VPFE_ALAW);
  2197. ccdc->ccdc_ctx[VPFE_REC656IF >> 2] = vpfe_reg_read(ccdc, VPFE_REC656IF);
  2198. ccdc->ccdc_ctx[VPFE_CCDCFG >> 2] = vpfe_reg_read(ccdc, VPFE_CCDCFG);
  2199. ccdc->ccdc_ctx[VPFE_CULLING >> 2] = vpfe_reg_read(ccdc, VPFE_CULLING);
  2200. ccdc->ccdc_ctx[VPFE_HD_VD_WID >> 2] = vpfe_reg_read(ccdc,
  2201. VPFE_HD_VD_WID);
  2202. ccdc->ccdc_ctx[VPFE_PIX_LINES >> 2] = vpfe_reg_read(ccdc,
  2203. VPFE_PIX_LINES);
  2204. ccdc->ccdc_ctx[VPFE_HORZ_INFO >> 2] = vpfe_reg_read(ccdc,
  2205. VPFE_HORZ_INFO);
  2206. ccdc->ccdc_ctx[VPFE_VERT_START >> 2] = vpfe_reg_read(ccdc,
  2207. VPFE_VERT_START);
  2208. ccdc->ccdc_ctx[VPFE_VERT_LINES >> 2] = vpfe_reg_read(ccdc,
  2209. VPFE_VERT_LINES);
  2210. ccdc->ccdc_ctx[VPFE_HSIZE_OFF >> 2] = vpfe_reg_read(ccdc,
  2211. VPFE_HSIZE_OFF);
  2212. }
  2213. static int vpfe_suspend(struct device *dev)
  2214. {
  2215. struct vpfe_device *vpfe = dev_get_drvdata(dev);
  2216. struct vpfe_ccdc *ccdc = &vpfe->ccdc;
  2217. /* if streaming has not started we don't care */
  2218. if (!vb2_start_streaming_called(&vpfe->buffer_queue))
  2219. return 0;
  2220. pm_runtime_get_sync(dev);
  2221. vpfe_config_enable(ccdc, 1);
  2222. /* Save VPFE context */
  2223. vpfe_save_context(ccdc);
  2224. /* Disable CCDC */
  2225. vpfe_pcr_enable(ccdc, 0);
  2226. vpfe_config_enable(ccdc, 0);
  2227. /* Disable both master and slave clock */
  2228. pm_runtime_put_sync(dev);
  2229. /* Select sleep pin state */
  2230. pinctrl_pm_select_sleep_state(dev);
  2231. return 0;
  2232. }
  2233. static void vpfe_restore_context(struct vpfe_ccdc *ccdc)
  2234. {
  2235. vpfe_reg_write(ccdc, ccdc->ccdc_ctx[VPFE_SYNMODE >> 2], VPFE_SYNMODE);
  2236. vpfe_reg_write(ccdc, ccdc->ccdc_ctx[VPFE_CULLING >> 2], VPFE_CULLING);
  2237. vpfe_reg_write(ccdc, ccdc->ccdc_ctx[VPFE_SDOFST >> 2], VPFE_SDOFST);
  2238. vpfe_reg_write(ccdc, ccdc->ccdc_ctx[VPFE_SDR_ADDR >> 2], VPFE_SDR_ADDR);
  2239. vpfe_reg_write(ccdc, ccdc->ccdc_ctx[VPFE_CLAMP >> 2], VPFE_CLAMP);
  2240. vpfe_reg_write(ccdc, ccdc->ccdc_ctx[VPFE_DCSUB >> 2], VPFE_DCSUB);
  2241. vpfe_reg_write(ccdc, ccdc->ccdc_ctx[VPFE_COLPTN >> 2], VPFE_COLPTN);
  2242. vpfe_reg_write(ccdc, ccdc->ccdc_ctx[VPFE_BLKCMP >> 2], VPFE_BLKCMP);
  2243. vpfe_reg_write(ccdc, ccdc->ccdc_ctx[VPFE_VDINT >> 2], VPFE_VDINT);
  2244. vpfe_reg_write(ccdc, ccdc->ccdc_ctx[VPFE_ALAW >> 2], VPFE_ALAW);
  2245. vpfe_reg_write(ccdc, ccdc->ccdc_ctx[VPFE_REC656IF >> 2], VPFE_REC656IF);
  2246. vpfe_reg_write(ccdc, ccdc->ccdc_ctx[VPFE_CCDCFG >> 2], VPFE_CCDCFG);
  2247. vpfe_reg_write(ccdc, ccdc->ccdc_ctx[VPFE_PCR >> 2], VPFE_PCR);
  2248. vpfe_reg_write(ccdc, ccdc->ccdc_ctx[VPFE_HD_VD_WID >> 2],
  2249. VPFE_HD_VD_WID);
  2250. vpfe_reg_write(ccdc, ccdc->ccdc_ctx[VPFE_PIX_LINES >> 2],
  2251. VPFE_PIX_LINES);
  2252. vpfe_reg_write(ccdc, ccdc->ccdc_ctx[VPFE_HORZ_INFO >> 2],
  2253. VPFE_HORZ_INFO);
  2254. vpfe_reg_write(ccdc, ccdc->ccdc_ctx[VPFE_VERT_START >> 2],
  2255. VPFE_VERT_START);
  2256. vpfe_reg_write(ccdc, ccdc->ccdc_ctx[VPFE_VERT_LINES >> 2],
  2257. VPFE_VERT_LINES);
  2258. vpfe_reg_write(ccdc, ccdc->ccdc_ctx[VPFE_HSIZE_OFF >> 2],
  2259. VPFE_HSIZE_OFF);
  2260. }
  2261. static int vpfe_resume(struct device *dev)
  2262. {
  2263. struct vpfe_device *vpfe = dev_get_drvdata(dev);
  2264. struct vpfe_ccdc *ccdc = &vpfe->ccdc;
  2265. /* if streaming has not started we don't care */
  2266. if (!vb2_start_streaming_called(&vpfe->buffer_queue))
  2267. return 0;
  2268. /* Enable both master and slave clock */
  2269. pm_runtime_get_sync(dev);
  2270. vpfe_config_enable(ccdc, 1);
  2271. /* Restore VPFE context */
  2272. vpfe_restore_context(ccdc);
  2273. vpfe_config_enable(ccdc, 0);
  2274. pm_runtime_put_sync(dev);
  2275. /* Select default pin state */
  2276. pinctrl_pm_select_default_state(dev);
  2277. return 0;
  2278. }
  2279. #endif
  2280. static SIMPLE_DEV_PM_OPS(vpfe_pm_ops, vpfe_suspend, vpfe_resume);
  2281. static const struct of_device_id vpfe_of_match[] = {
  2282. { .compatible = "ti,am437x-vpfe", },
  2283. { /* sentinel */ },
  2284. };
  2285. MODULE_DEVICE_TABLE(of, vpfe_of_match);
  2286. static struct platform_driver vpfe_driver = {
  2287. .probe = vpfe_probe,
  2288. .remove = vpfe_remove,
  2289. .driver = {
  2290. .name = VPFE_MODULE_NAME,
  2291. .pm = &vpfe_pm_ops,
  2292. .of_match_table = of_match_ptr(vpfe_of_match),
  2293. },
  2294. };
  2295. module_platform_driver(vpfe_driver);
  2296. MODULE_AUTHOR("Texas Instruments");
  2297. MODULE_DESCRIPTION("TI AM437x VPFE driver");
  2298. MODULE_LICENSE("GPL");
  2299. MODULE_VERSION(VPFE_VERSION);