am437x-vpfe.c 72 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770
  1. /*
  2. * TI VPFE capture Driver
  3. *
  4. * Copyright (C) 2013 - 2014 Texas Instruments, Inc.
  5. *
  6. * Benoit Parrot <bparrot@ti.com>
  7. * Lad, Prabhakar <prabhakar.csengg@gmail.com>
  8. *
  9. * This program is free software; you may redistribute it and/or modify
  10. * it under the terms of the GNU General Public License as published by
  11. * the Free Software Foundation; version 2 of the License.
  12. *
  13. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  14. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  15. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  16. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  17. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  18. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  19. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  20. * SOFTWARE.
  21. */
  22. #include <linux/delay.h>
  23. #include <linux/err.h>
  24. #include <linux/init.h>
  25. #include <linux/interrupt.h>
  26. #include <linux/io.h>
  27. #include <linux/module.h>
  28. #include <linux/pinctrl/consumer.h>
  29. #include <linux/platform_device.h>
  30. #include <linux/pm_runtime.h>
  31. #include <linux/slab.h>
  32. #include <linux/uaccess.h>
  33. #include <linux/videodev2.h>
  34. #include <media/v4l2-common.h>
  35. #include <media/v4l2-ctrls.h>
  36. #include <media/v4l2-event.h>
  37. #include <media/v4l2-of.h>
  38. #include "am437x-vpfe.h"
  39. #define VPFE_MODULE_NAME "vpfe"
  40. #define VPFE_VERSION "0.1.0"
  41. static int debug;
  42. module_param(debug, int, 0644);
  43. MODULE_PARM_DESC(debug, "Debug level 0-8");
  44. #define vpfe_dbg(level, dev, fmt, arg...) \
  45. v4l2_dbg(level, debug, &dev->v4l2_dev, fmt, ##arg)
  46. #define vpfe_info(dev, fmt, arg...) \
  47. v4l2_info(&dev->v4l2_dev, fmt, ##arg)
  48. #define vpfe_err(dev, fmt, arg...) \
  49. v4l2_err(&dev->v4l2_dev, fmt, ##arg)
  50. /* standard information */
  51. struct vpfe_standard {
  52. v4l2_std_id std_id;
  53. unsigned int width;
  54. unsigned int height;
  55. struct v4l2_fract pixelaspect;
  56. int frame_format;
  57. };
  58. static const struct vpfe_standard vpfe_standards[] = {
  59. {V4L2_STD_525_60, 720, 480, {11, 10}, 1},
  60. {V4L2_STD_625_50, 720, 576, {54, 59}, 1},
  61. };
  62. struct bus_format {
  63. unsigned int width;
  64. unsigned int bpp;
  65. };
  66. /*
  67. * struct vpfe_fmt - VPFE media bus format information
  68. * @name: V4L2 format description
  69. * @code: V4L2 media bus format code
  70. * @shifted: V4L2 media bus format code for the same pixel layout but
  71. * shifted to be 8 bits per pixel. =0 if format is not shiftable.
  72. * @pixelformat: V4L2 pixel format FCC identifier
  73. * @width: Bits per pixel (when transferred over a bus)
  74. * @bpp: Bytes per pixel (when stored in memory)
  75. * @supported: Indicates format supported by subdev
  76. */
  77. struct vpfe_fmt {
  78. const char *name;
  79. u32 fourcc;
  80. u32 code;
  81. struct bus_format l;
  82. struct bus_format s;
  83. bool supported;
  84. u32 index;
  85. };
  86. static struct vpfe_fmt formats[] = {
  87. {
  88. .name = "YUV 4:2:2 packed, YCbYCr",
  89. .fourcc = V4L2_PIX_FMT_YUYV,
  90. .code = MEDIA_BUS_FMT_YUYV8_2X8,
  91. .l.width = 10,
  92. .l.bpp = 4,
  93. .s.width = 8,
  94. .s.bpp = 2,
  95. .supported = false,
  96. }, {
  97. .name = "YUV 4:2:2 packed, CbYCrY",
  98. .fourcc = V4L2_PIX_FMT_UYVY,
  99. .code = MEDIA_BUS_FMT_UYVY8_2X8,
  100. .l.width = 10,
  101. .l.bpp = 4,
  102. .s.width = 8,
  103. .s.bpp = 2,
  104. .supported = false,
  105. }, {
  106. .name = "YUV 4:2:2 packed, YCrYCb",
  107. .fourcc = V4L2_PIX_FMT_YVYU,
  108. .code = MEDIA_BUS_FMT_YVYU8_2X8,
  109. .l.width = 10,
  110. .l.bpp = 4,
  111. .s.width = 8,
  112. .s.bpp = 2,
  113. .supported = false,
  114. }, {
  115. .name = "YUV 4:2:2 packed, CrYCbY",
  116. .fourcc = V4L2_PIX_FMT_VYUY,
  117. .code = MEDIA_BUS_FMT_VYUY8_2X8,
  118. .l.width = 10,
  119. .l.bpp = 4,
  120. .s.width = 8,
  121. .s.bpp = 2,
  122. .supported = false,
  123. }, {
  124. .name = "RAW8 BGGR",
  125. .fourcc = V4L2_PIX_FMT_SBGGR8,
  126. .code = MEDIA_BUS_FMT_SBGGR8_1X8,
  127. .l.width = 10,
  128. .l.bpp = 2,
  129. .s.width = 8,
  130. .s.bpp = 1,
  131. .supported = false,
  132. }, {
  133. .name = "RAW8 GBRG",
  134. .fourcc = V4L2_PIX_FMT_SGBRG8,
  135. .code = MEDIA_BUS_FMT_SGBRG8_1X8,
  136. .l.width = 10,
  137. .l.bpp = 2,
  138. .s.width = 8,
  139. .s.bpp = 1,
  140. .supported = false,
  141. }, {
  142. .name = "RAW8 GRBG",
  143. .fourcc = V4L2_PIX_FMT_SGRBG8,
  144. .code = MEDIA_BUS_FMT_SGRBG8_1X8,
  145. .l.width = 10,
  146. .l.bpp = 2,
  147. .s.width = 8,
  148. .s.bpp = 1,
  149. .supported = false,
  150. }, {
  151. .name = "RAW8 RGGB",
  152. .fourcc = V4L2_PIX_FMT_SRGGB8,
  153. .code = MEDIA_BUS_FMT_SRGGB8_1X8,
  154. .l.width = 10,
  155. .l.bpp = 2,
  156. .s.width = 8,
  157. .s.bpp = 1,
  158. .supported = false,
  159. }, {
  160. .name = "RGB565 (LE)",
  161. .fourcc = V4L2_PIX_FMT_RGB565,
  162. .code = MEDIA_BUS_FMT_RGB565_2X8_LE,
  163. .l.width = 10,
  164. .l.bpp = 4,
  165. .s.width = 8,
  166. .s.bpp = 2,
  167. .supported = false,
  168. }, {
  169. .name = "RGB565 (BE)",
  170. .fourcc = V4L2_PIX_FMT_RGB565X,
  171. .code = MEDIA_BUS_FMT_RGB565_2X8_BE,
  172. .l.width = 10,
  173. .l.bpp = 4,
  174. .s.width = 8,
  175. .s.bpp = 2,
  176. .supported = false,
  177. },
  178. };
  179. static int
  180. __vpfe_get_format(struct vpfe_device *vpfe,
  181. struct v4l2_format *format, unsigned int *bpp);
  182. static struct vpfe_fmt *find_format_by_code(unsigned int code)
  183. {
  184. struct vpfe_fmt *fmt;
  185. unsigned int k;
  186. for (k = 0; k < ARRAY_SIZE(formats); k++) {
  187. fmt = &formats[k];
  188. if (fmt->code == code)
  189. return fmt;
  190. }
  191. return NULL;
  192. }
  193. static struct vpfe_fmt *find_format_by_pix(unsigned int pixelformat)
  194. {
  195. struct vpfe_fmt *fmt;
  196. unsigned int k;
  197. for (k = 0; k < ARRAY_SIZE(formats); k++) {
  198. fmt = &formats[k];
  199. if (fmt->fourcc == pixelformat)
  200. return fmt;
  201. }
  202. return NULL;
  203. }
  204. static void
  205. mbus_to_pix(struct vpfe_device *vpfe,
  206. const struct v4l2_mbus_framefmt *mbus,
  207. struct v4l2_pix_format *pix, unsigned int *bpp)
  208. {
  209. struct vpfe_subdev_info *sdinfo = vpfe->current_subdev;
  210. unsigned int bus_width = sdinfo->vpfe_param.bus_width;
  211. struct vpfe_fmt *fmt;
  212. fmt = find_format_by_code(mbus->code);
  213. if (WARN_ON(fmt == NULL)) {
  214. pr_err("Invalid mbus code set\n");
  215. *bpp = 1;
  216. return;
  217. }
  218. memset(pix, 0, sizeof(*pix));
  219. v4l2_fill_pix_format(pix, mbus);
  220. pix->pixelformat = fmt->fourcc;
  221. *bpp = (bus_width == 10) ? fmt->l.bpp : fmt->s.bpp;
  222. /* pitch should be 32 bytes aligned */
  223. pix->bytesperline = ALIGN(pix->width * *bpp, 32);
  224. pix->sizeimage = pix->bytesperline * pix->height;
  225. }
  226. static void pix_to_mbus(struct vpfe_device *vpfe,
  227. struct v4l2_pix_format *pix_fmt,
  228. struct v4l2_mbus_framefmt *mbus_fmt)
  229. {
  230. struct vpfe_fmt *fmt;
  231. fmt = find_format_by_pix(pix_fmt->pixelformat);
  232. if (!fmt) {
  233. /* default to first entry */
  234. vpfe_dbg(3, vpfe, "Invalid pixel code: %x, default used instead\n",
  235. pix_fmt->pixelformat);
  236. fmt = &formats[0];
  237. }
  238. memset(mbus_fmt, 0, sizeof(*mbus_fmt));
  239. v4l2_fill_mbus_format(mbus_fmt, pix_fmt, fmt->code);
  240. }
  241. /* Print Four-character-code (FOURCC) */
  242. static char *print_fourcc(u32 fmt)
  243. {
  244. static char code[5];
  245. code[0] = (unsigned char)(fmt & 0xff);
  246. code[1] = (unsigned char)((fmt >> 8) & 0xff);
  247. code[2] = (unsigned char)((fmt >> 16) & 0xff);
  248. code[3] = (unsigned char)((fmt >> 24) & 0xff);
  249. code[4] = '\0';
  250. return code;
  251. }
  252. static int
  253. cmp_v4l2_format(const struct v4l2_format *lhs, const struct v4l2_format *rhs)
  254. {
  255. return lhs->type == rhs->type &&
  256. lhs->fmt.pix.width == rhs->fmt.pix.width &&
  257. lhs->fmt.pix.height == rhs->fmt.pix.height &&
  258. lhs->fmt.pix.pixelformat == rhs->fmt.pix.pixelformat &&
  259. lhs->fmt.pix.field == rhs->fmt.pix.field &&
  260. lhs->fmt.pix.colorspace == rhs->fmt.pix.colorspace &&
  261. lhs->fmt.pix.ycbcr_enc == rhs->fmt.pix.ycbcr_enc &&
  262. lhs->fmt.pix.quantization == rhs->fmt.pix.quantization;
  263. }
  264. static inline u32 vpfe_reg_read(struct vpfe_ccdc *ccdc, u32 offset)
  265. {
  266. return ioread32(ccdc->ccdc_cfg.base_addr + offset);
  267. }
  268. static inline void vpfe_reg_write(struct vpfe_ccdc *ccdc, u32 val, u32 offset)
  269. {
  270. iowrite32(val, ccdc->ccdc_cfg.base_addr + offset);
  271. }
  272. static inline struct vpfe_device *to_vpfe(struct vpfe_ccdc *ccdc)
  273. {
  274. return container_of(ccdc, struct vpfe_device, ccdc);
  275. }
  276. static inline struct vpfe_cap_buffer *to_vpfe_buffer(struct vb2_buffer *vb)
  277. {
  278. return container_of(vb, struct vpfe_cap_buffer, vb);
  279. }
  280. static inline void vpfe_pcr_enable(struct vpfe_ccdc *ccdc, int flag)
  281. {
  282. vpfe_reg_write(ccdc, !!flag, VPFE_PCR);
  283. }
  284. static void vpfe_config_enable(struct vpfe_ccdc *ccdc, int flag)
  285. {
  286. unsigned int cfg;
  287. if (!flag) {
  288. cfg = vpfe_reg_read(ccdc, VPFE_CONFIG);
  289. cfg &= ~(VPFE_CONFIG_EN_ENABLE << VPFE_CONFIG_EN_SHIFT);
  290. } else {
  291. cfg = VPFE_CONFIG_EN_ENABLE << VPFE_CONFIG_EN_SHIFT;
  292. }
  293. vpfe_reg_write(ccdc, cfg, VPFE_CONFIG);
  294. }
  295. static void vpfe_ccdc_setwin(struct vpfe_ccdc *ccdc,
  296. struct v4l2_rect *image_win,
  297. enum ccdc_frmfmt frm_fmt,
  298. int bpp)
  299. {
  300. int horz_start, horz_nr_pixels;
  301. int vert_start, vert_nr_lines;
  302. int val, mid_img;
  303. /*
  304. * ppc - per pixel count. indicates how many pixels per cell
  305. * output to SDRAM. example, for ycbcr, it is one y and one c, so 2.
  306. * raw capture this is 1
  307. */
  308. horz_start = image_win->left * bpp;
  309. horz_nr_pixels = (image_win->width * bpp) - 1;
  310. vpfe_reg_write(ccdc, (horz_start << VPFE_HORZ_INFO_SPH_SHIFT) |
  311. horz_nr_pixels, VPFE_HORZ_INFO);
  312. vert_start = image_win->top;
  313. if (frm_fmt == CCDC_FRMFMT_INTERLACED) {
  314. vert_nr_lines = (image_win->height >> 1) - 1;
  315. vert_start >>= 1;
  316. /* Since first line doesn't have any data */
  317. vert_start += 1;
  318. /* configure VDINT0 */
  319. val = (vert_start << VPFE_VDINT_VDINT0_SHIFT);
  320. } else {
  321. /* Since first line doesn't have any data */
  322. vert_start += 1;
  323. vert_nr_lines = image_win->height - 1;
  324. /*
  325. * configure VDINT0 and VDINT1. VDINT1 will be at half
  326. * of image height
  327. */
  328. mid_img = vert_start + (image_win->height / 2);
  329. val = (vert_start << VPFE_VDINT_VDINT0_SHIFT) |
  330. (mid_img & VPFE_VDINT_VDINT1_MASK);
  331. }
  332. vpfe_reg_write(ccdc, val, VPFE_VDINT);
  333. vpfe_reg_write(ccdc, (vert_start << VPFE_VERT_START_SLV0_SHIFT) |
  334. vert_start, VPFE_VERT_START);
  335. vpfe_reg_write(ccdc, vert_nr_lines, VPFE_VERT_LINES);
  336. }
  337. static void vpfe_reg_dump(struct vpfe_ccdc *ccdc)
  338. {
  339. struct vpfe_device *vpfe = to_vpfe(ccdc);
  340. vpfe_dbg(3, vpfe, "ALAW: 0x%x\n", vpfe_reg_read(ccdc, VPFE_ALAW));
  341. vpfe_dbg(3, vpfe, "CLAMP: 0x%x\n", vpfe_reg_read(ccdc, VPFE_CLAMP));
  342. vpfe_dbg(3, vpfe, "DCSUB: 0x%x\n", vpfe_reg_read(ccdc, VPFE_DCSUB));
  343. vpfe_dbg(3, vpfe, "BLKCMP: 0x%x\n", vpfe_reg_read(ccdc, VPFE_BLKCMP));
  344. vpfe_dbg(3, vpfe, "COLPTN: 0x%x\n", vpfe_reg_read(ccdc, VPFE_COLPTN));
  345. vpfe_dbg(3, vpfe, "SDOFST: 0x%x\n", vpfe_reg_read(ccdc, VPFE_SDOFST));
  346. vpfe_dbg(3, vpfe, "SYN_MODE: 0x%x\n",
  347. vpfe_reg_read(ccdc, VPFE_SYNMODE));
  348. vpfe_dbg(3, vpfe, "HSIZE_OFF: 0x%x\n",
  349. vpfe_reg_read(ccdc, VPFE_HSIZE_OFF));
  350. vpfe_dbg(3, vpfe, "HORZ_INFO: 0x%x\n",
  351. vpfe_reg_read(ccdc, VPFE_HORZ_INFO));
  352. vpfe_dbg(3, vpfe, "VERT_START: 0x%x\n",
  353. vpfe_reg_read(ccdc, VPFE_VERT_START));
  354. vpfe_dbg(3, vpfe, "VERT_LINES: 0x%x\n",
  355. vpfe_reg_read(ccdc, VPFE_VERT_LINES));
  356. }
  357. static int
  358. vpfe_ccdc_validate_param(struct vpfe_ccdc *ccdc,
  359. struct vpfe_ccdc_config_params_raw *ccdcparam)
  360. {
  361. struct vpfe_device *vpfe = to_vpfe(ccdc);
  362. u8 max_gamma, max_data;
  363. if (!ccdcparam->alaw.enable)
  364. return 0;
  365. max_gamma = ccdc_gamma_width_max_bit(ccdcparam->alaw.gamma_wd);
  366. max_data = ccdc_data_size_max_bit(ccdcparam->data_sz);
  367. if (ccdcparam->alaw.gamma_wd > VPFE_CCDC_GAMMA_BITS_09_0 ||
  368. ccdcparam->alaw.gamma_wd < VPFE_CCDC_GAMMA_BITS_15_6 ||
  369. max_gamma > max_data) {
  370. vpfe_dbg(1, vpfe, "Invalid data line select\n");
  371. return -EINVAL;
  372. }
  373. return 0;
  374. }
  375. static void
  376. vpfe_ccdc_update_raw_params(struct vpfe_ccdc *ccdc,
  377. struct vpfe_ccdc_config_params_raw *raw_params)
  378. {
  379. struct vpfe_ccdc_config_params_raw *config_params =
  380. &ccdc->ccdc_cfg.bayer.config_params;
  381. config_params = raw_params;
  382. }
  383. /*
  384. * vpfe_ccdc_restore_defaults()
  385. * This function will write defaults to all CCDC registers
  386. */
  387. static void vpfe_ccdc_restore_defaults(struct vpfe_ccdc *ccdc)
  388. {
  389. int i;
  390. /* Disable CCDC */
  391. vpfe_pcr_enable(ccdc, 0);
  392. /* set all registers to default value */
  393. for (i = 4; i <= 0x94; i += 4)
  394. vpfe_reg_write(ccdc, 0, i);
  395. vpfe_reg_write(ccdc, VPFE_NO_CULLING, VPFE_CULLING);
  396. vpfe_reg_write(ccdc, VPFE_CCDC_GAMMA_BITS_11_2, VPFE_ALAW);
  397. }
  398. static int vpfe_ccdc_close(struct vpfe_ccdc *ccdc, struct device *dev)
  399. {
  400. int dma_cntl, i, pcr;
  401. /* If the CCDC module is still busy wait for it to be done */
  402. for (i = 0; i < 10; i++) {
  403. usleep_range(5000, 6000);
  404. pcr = vpfe_reg_read(ccdc, VPFE_PCR);
  405. if (!pcr)
  406. break;
  407. /* make sure it it is disabled */
  408. vpfe_pcr_enable(ccdc, 0);
  409. }
  410. /* Disable CCDC by resetting all register to default POR values */
  411. vpfe_ccdc_restore_defaults(ccdc);
  412. /* if DMA_CNTL overflow bit is set. Clear it
  413. * It appears to take a while for this to become quiescent ~20ms
  414. */
  415. for (i = 0; i < 10; i++) {
  416. dma_cntl = vpfe_reg_read(ccdc, VPFE_DMA_CNTL);
  417. if (!(dma_cntl & VPFE_DMA_CNTL_OVERFLOW))
  418. break;
  419. /* Clear the overflow bit */
  420. vpfe_reg_write(ccdc, dma_cntl, VPFE_DMA_CNTL);
  421. usleep_range(5000, 6000);
  422. }
  423. /* Disabled the module at the CONFIG level */
  424. vpfe_config_enable(ccdc, 0);
  425. pm_runtime_put_sync(dev);
  426. return 0;
  427. }
  428. static int vpfe_ccdc_set_params(struct vpfe_ccdc *ccdc, void __user *params)
  429. {
  430. struct vpfe_device *vpfe = container_of(ccdc, struct vpfe_device, ccdc);
  431. struct vpfe_ccdc_config_params_raw raw_params;
  432. int x;
  433. if (ccdc->ccdc_cfg.if_type != VPFE_RAW_BAYER)
  434. return -EINVAL;
  435. x = copy_from_user(&raw_params, params, sizeof(raw_params));
  436. if (x) {
  437. vpfe_dbg(1, vpfe,
  438. "vpfe_ccdc_set_params: error in copying ccdc params, %d\n",
  439. x);
  440. return -EFAULT;
  441. }
  442. if (!vpfe_ccdc_validate_param(ccdc, &raw_params)) {
  443. vpfe_ccdc_update_raw_params(ccdc, &raw_params);
  444. return 0;
  445. }
  446. return -EINVAL;
  447. }
  448. /*
  449. * vpfe_ccdc_config_ycbcr()
  450. * This function will configure CCDC for YCbCr video capture
  451. */
  452. static void vpfe_ccdc_config_ycbcr(struct vpfe_ccdc *ccdc)
  453. {
  454. struct vpfe_device *vpfe = container_of(ccdc, struct vpfe_device, ccdc);
  455. struct ccdc_params_ycbcr *params = &ccdc->ccdc_cfg.ycbcr;
  456. u32 syn_mode;
  457. vpfe_dbg(3, vpfe, "vpfe_ccdc_config_ycbcr:\n");
  458. /*
  459. * first restore the CCDC registers to default values
  460. * This is important since we assume default values to be set in
  461. * a lot of registers that we didn't touch
  462. */
  463. vpfe_ccdc_restore_defaults(ccdc);
  464. /*
  465. * configure pixel format, frame format, configure video frame
  466. * format, enable output to SDRAM, enable internal timing generator
  467. * and 8bit pack mode
  468. */
  469. syn_mode = (((params->pix_fmt & VPFE_SYN_MODE_INPMOD_MASK) <<
  470. VPFE_SYN_MODE_INPMOD_SHIFT) |
  471. ((params->frm_fmt & VPFE_SYN_FLDMODE_MASK) <<
  472. VPFE_SYN_FLDMODE_SHIFT) | VPFE_VDHDEN_ENABLE |
  473. VPFE_WEN_ENABLE | VPFE_DATA_PACK_ENABLE);
  474. /* setup BT.656 sync mode */
  475. if (params->bt656_enable) {
  476. vpfe_reg_write(ccdc, VPFE_REC656IF_BT656_EN, VPFE_REC656IF);
  477. /*
  478. * configure the FID, VD, HD pin polarity,
  479. * fld,hd pol positive, vd negative, 8-bit data
  480. */
  481. syn_mode |= VPFE_SYN_MODE_VD_POL_NEGATIVE;
  482. if (ccdc->ccdc_cfg.if_type == VPFE_BT656_10BIT)
  483. syn_mode |= VPFE_SYN_MODE_10BITS;
  484. else
  485. syn_mode |= VPFE_SYN_MODE_8BITS;
  486. } else {
  487. /* y/c external sync mode */
  488. syn_mode |= (((params->fid_pol & VPFE_FID_POL_MASK) <<
  489. VPFE_FID_POL_SHIFT) |
  490. ((params->hd_pol & VPFE_HD_POL_MASK) <<
  491. VPFE_HD_POL_SHIFT) |
  492. ((params->vd_pol & VPFE_VD_POL_MASK) <<
  493. VPFE_VD_POL_SHIFT));
  494. }
  495. vpfe_reg_write(ccdc, syn_mode, VPFE_SYNMODE);
  496. /* configure video window */
  497. vpfe_ccdc_setwin(ccdc, &params->win,
  498. params->frm_fmt, params->bytesperpixel);
  499. /*
  500. * configure the order of y cb cr in SDRAM, and disable latch
  501. * internal register on vsync
  502. */
  503. if (ccdc->ccdc_cfg.if_type == VPFE_BT656_10BIT)
  504. vpfe_reg_write(ccdc,
  505. (params->pix_order << VPFE_CCDCFG_Y8POS_SHIFT) |
  506. VPFE_LATCH_ON_VSYNC_DISABLE |
  507. VPFE_CCDCFG_BW656_10BIT, VPFE_CCDCFG);
  508. else
  509. vpfe_reg_write(ccdc,
  510. (params->pix_order << VPFE_CCDCFG_Y8POS_SHIFT) |
  511. VPFE_LATCH_ON_VSYNC_DISABLE, VPFE_CCDCFG);
  512. /*
  513. * configure the horizontal line offset. This should be a
  514. * on 32 byte boundary. So clear LSB 5 bits
  515. */
  516. vpfe_reg_write(ccdc, params->bytesperline, VPFE_HSIZE_OFF);
  517. /* configure the memory line offset */
  518. if (params->buf_type == CCDC_BUFTYPE_FLD_INTERLEAVED)
  519. /* two fields are interleaved in memory */
  520. vpfe_reg_write(ccdc, VPFE_SDOFST_FIELD_INTERLEAVED,
  521. VPFE_SDOFST);
  522. }
  523. static void
  524. vpfe_ccdc_config_black_clamp(struct vpfe_ccdc *ccdc,
  525. struct vpfe_ccdc_black_clamp *bclamp)
  526. {
  527. u32 val;
  528. if (!bclamp->enable) {
  529. /* configure DCSub */
  530. val = (bclamp->dc_sub) & VPFE_BLK_DC_SUB_MASK;
  531. vpfe_reg_write(ccdc, val, VPFE_DCSUB);
  532. vpfe_reg_write(ccdc, VPFE_CLAMP_DEFAULT_VAL, VPFE_CLAMP);
  533. return;
  534. }
  535. /*
  536. * Configure gain, Start pixel, No of line to be avg,
  537. * No of pixel/line to be avg, & Enable the Black clamping
  538. */
  539. val = ((bclamp->sgain & VPFE_BLK_SGAIN_MASK) |
  540. ((bclamp->start_pixel & VPFE_BLK_ST_PXL_MASK) <<
  541. VPFE_BLK_ST_PXL_SHIFT) |
  542. ((bclamp->sample_ln & VPFE_BLK_SAMPLE_LINE_MASK) <<
  543. VPFE_BLK_SAMPLE_LINE_SHIFT) |
  544. ((bclamp->sample_pixel & VPFE_BLK_SAMPLE_LN_MASK) <<
  545. VPFE_BLK_SAMPLE_LN_SHIFT) | VPFE_BLK_CLAMP_ENABLE);
  546. vpfe_reg_write(ccdc, val, VPFE_CLAMP);
  547. /* If Black clamping is enable then make dcsub 0 */
  548. vpfe_reg_write(ccdc, VPFE_DCSUB_DEFAULT_VAL, VPFE_DCSUB);
  549. }
  550. static void
  551. vpfe_ccdc_config_black_compense(struct vpfe_ccdc *ccdc,
  552. struct vpfe_ccdc_black_compensation *bcomp)
  553. {
  554. u32 val;
  555. val = ((bcomp->b & VPFE_BLK_COMP_MASK) |
  556. ((bcomp->gb & VPFE_BLK_COMP_MASK) <<
  557. VPFE_BLK_COMP_GB_COMP_SHIFT) |
  558. ((bcomp->gr & VPFE_BLK_COMP_MASK) <<
  559. VPFE_BLK_COMP_GR_COMP_SHIFT) |
  560. ((bcomp->r & VPFE_BLK_COMP_MASK) <<
  561. VPFE_BLK_COMP_R_COMP_SHIFT));
  562. vpfe_reg_write(ccdc, val, VPFE_BLKCMP);
  563. }
  564. /*
  565. * vpfe_ccdc_config_raw()
  566. * This function will configure CCDC for Raw capture mode
  567. */
  568. static void vpfe_ccdc_config_raw(struct vpfe_ccdc *ccdc)
  569. {
  570. struct vpfe_device *vpfe = container_of(ccdc, struct vpfe_device, ccdc);
  571. struct vpfe_ccdc_config_params_raw *config_params =
  572. &ccdc->ccdc_cfg.bayer.config_params;
  573. struct ccdc_params_raw *params = &ccdc->ccdc_cfg.bayer;
  574. unsigned int syn_mode;
  575. unsigned int val;
  576. vpfe_dbg(3, vpfe, "vpfe_ccdc_config_raw:\n");
  577. /* Reset CCDC */
  578. vpfe_ccdc_restore_defaults(ccdc);
  579. /* Disable latching function registers on VSYNC */
  580. vpfe_reg_write(ccdc, VPFE_LATCH_ON_VSYNC_DISABLE, VPFE_CCDCFG);
  581. /*
  582. * Configure the vertical sync polarity(SYN_MODE.VDPOL),
  583. * horizontal sync polarity (SYN_MODE.HDPOL), frame id polarity
  584. * (SYN_MODE.FLDPOL), frame format(progressive or interlace),
  585. * data size(SYNMODE.DATSIZ), &pixel format (Input mode), output
  586. * SDRAM, enable internal timing generator
  587. */
  588. syn_mode = (((params->vd_pol & VPFE_VD_POL_MASK) << VPFE_VD_POL_SHIFT) |
  589. ((params->hd_pol & VPFE_HD_POL_MASK) << VPFE_HD_POL_SHIFT) |
  590. ((params->fid_pol & VPFE_FID_POL_MASK) <<
  591. VPFE_FID_POL_SHIFT) | ((params->frm_fmt &
  592. VPFE_FRM_FMT_MASK) << VPFE_FRM_FMT_SHIFT) |
  593. ((config_params->data_sz & VPFE_DATA_SZ_MASK) <<
  594. VPFE_DATA_SZ_SHIFT) | ((params->pix_fmt &
  595. VPFE_PIX_FMT_MASK) << VPFE_PIX_FMT_SHIFT) |
  596. VPFE_WEN_ENABLE | VPFE_VDHDEN_ENABLE);
  597. /* Enable and configure aLaw register if needed */
  598. if (config_params->alaw.enable) {
  599. val = ((config_params->alaw.gamma_wd &
  600. VPFE_ALAW_GAMMA_WD_MASK) | VPFE_ALAW_ENABLE);
  601. vpfe_reg_write(ccdc, val, VPFE_ALAW);
  602. vpfe_dbg(3, vpfe, "\nWriting 0x%x to ALAW...\n", val);
  603. }
  604. /* Configure video window */
  605. vpfe_ccdc_setwin(ccdc, &params->win, params->frm_fmt,
  606. params->bytesperpixel);
  607. /* Configure Black Clamp */
  608. vpfe_ccdc_config_black_clamp(ccdc, &config_params->blk_clamp);
  609. /* Configure Black level compensation */
  610. vpfe_ccdc_config_black_compense(ccdc, &config_params->blk_comp);
  611. /* If data size is 8 bit then pack the data */
  612. if ((config_params->data_sz == VPFE_CCDC_DATA_8BITS) ||
  613. config_params->alaw.enable)
  614. syn_mode |= VPFE_DATA_PACK_ENABLE;
  615. /*
  616. * Configure Horizontal offset register. If pack 8 is enabled then
  617. * 1 pixel will take 1 byte
  618. */
  619. vpfe_reg_write(ccdc, params->bytesperline, VPFE_HSIZE_OFF);
  620. vpfe_dbg(3, vpfe, "Writing %d (%x) to HSIZE_OFF\n",
  621. params->bytesperline, params->bytesperline);
  622. /* Set value for SDOFST */
  623. if (params->frm_fmt == CCDC_FRMFMT_INTERLACED) {
  624. if (params->image_invert_enable) {
  625. /* For interlace inverse mode */
  626. vpfe_reg_write(ccdc, VPFE_INTERLACED_IMAGE_INVERT,
  627. VPFE_SDOFST);
  628. } else {
  629. /* For interlace non inverse mode */
  630. vpfe_reg_write(ccdc, VPFE_INTERLACED_NO_IMAGE_INVERT,
  631. VPFE_SDOFST);
  632. }
  633. } else if (params->frm_fmt == CCDC_FRMFMT_PROGRESSIVE) {
  634. vpfe_reg_write(ccdc, VPFE_PROGRESSIVE_NO_IMAGE_INVERT,
  635. VPFE_SDOFST);
  636. }
  637. vpfe_reg_write(ccdc, syn_mode, VPFE_SYNMODE);
  638. vpfe_reg_dump(ccdc);
  639. }
  640. static inline int
  641. vpfe_ccdc_set_buftype(struct vpfe_ccdc *ccdc,
  642. enum ccdc_buftype buf_type)
  643. {
  644. if (ccdc->ccdc_cfg.if_type == VPFE_RAW_BAYER)
  645. ccdc->ccdc_cfg.bayer.buf_type = buf_type;
  646. else
  647. ccdc->ccdc_cfg.ycbcr.buf_type = buf_type;
  648. return 0;
  649. }
  650. static inline enum ccdc_buftype vpfe_ccdc_get_buftype(struct vpfe_ccdc *ccdc)
  651. {
  652. if (ccdc->ccdc_cfg.if_type == VPFE_RAW_BAYER)
  653. return ccdc->ccdc_cfg.bayer.buf_type;
  654. return ccdc->ccdc_cfg.ycbcr.buf_type;
  655. }
  656. static int vpfe_ccdc_set_pixel_format(struct vpfe_ccdc *ccdc, u32 pixfmt)
  657. {
  658. struct vpfe_device *vpfe = container_of(ccdc, struct vpfe_device, ccdc);
  659. vpfe_dbg(1, vpfe, "vpfe_ccdc_set_pixel_format: if_type: %d, pixfmt:%s\n",
  660. ccdc->ccdc_cfg.if_type, print_fourcc(pixfmt));
  661. if (ccdc->ccdc_cfg.if_type == VPFE_RAW_BAYER) {
  662. ccdc->ccdc_cfg.bayer.pix_fmt = CCDC_PIXFMT_RAW;
  663. /*
  664. * Need to clear it in case it was left on
  665. * after the last capture.
  666. */
  667. ccdc->ccdc_cfg.bayer.config_params.alaw.enable = 0;
  668. switch (pixfmt) {
  669. case V4L2_PIX_FMT_SBGGR8:
  670. ccdc->ccdc_cfg.bayer.config_params.alaw.enable = 1;
  671. break;
  672. case V4L2_PIX_FMT_YUYV:
  673. case V4L2_PIX_FMT_UYVY:
  674. case V4L2_PIX_FMT_YUV420:
  675. case V4L2_PIX_FMT_NV12:
  676. case V4L2_PIX_FMT_RGB565X:
  677. break;
  678. case V4L2_PIX_FMT_SBGGR16:
  679. default:
  680. return -EINVAL;
  681. }
  682. } else {
  683. switch (pixfmt) {
  684. case V4L2_PIX_FMT_YUYV:
  685. ccdc->ccdc_cfg.ycbcr.pix_order = CCDC_PIXORDER_YCBYCR;
  686. break;
  687. case V4L2_PIX_FMT_UYVY:
  688. ccdc->ccdc_cfg.ycbcr.pix_order = CCDC_PIXORDER_CBYCRY;
  689. break;
  690. default:
  691. return -EINVAL;
  692. }
  693. }
  694. return 0;
  695. }
  696. static u32 vpfe_ccdc_get_pixel_format(struct vpfe_ccdc *ccdc)
  697. {
  698. u32 pixfmt;
  699. if (ccdc->ccdc_cfg.if_type == VPFE_RAW_BAYER) {
  700. pixfmt = V4L2_PIX_FMT_YUYV;
  701. } else {
  702. if (ccdc->ccdc_cfg.ycbcr.pix_order == CCDC_PIXORDER_YCBYCR)
  703. pixfmt = V4L2_PIX_FMT_YUYV;
  704. else
  705. pixfmt = V4L2_PIX_FMT_UYVY;
  706. }
  707. return pixfmt;
  708. }
  709. static int
  710. vpfe_ccdc_set_image_window(struct vpfe_ccdc *ccdc,
  711. struct v4l2_rect *win, unsigned int bpp)
  712. {
  713. if (ccdc->ccdc_cfg.if_type == VPFE_RAW_BAYER) {
  714. ccdc->ccdc_cfg.bayer.win = *win;
  715. ccdc->ccdc_cfg.bayer.bytesperpixel = bpp;
  716. ccdc->ccdc_cfg.bayer.bytesperline = ALIGN(win->width * bpp, 32);
  717. } else {
  718. ccdc->ccdc_cfg.ycbcr.win = *win;
  719. ccdc->ccdc_cfg.ycbcr.bytesperpixel = bpp;
  720. ccdc->ccdc_cfg.ycbcr.bytesperline = ALIGN(win->width * bpp, 32);
  721. }
  722. return 0;
  723. }
  724. static inline void
  725. vpfe_ccdc_get_image_window(struct vpfe_ccdc *ccdc,
  726. struct v4l2_rect *win)
  727. {
  728. if (ccdc->ccdc_cfg.if_type == VPFE_RAW_BAYER)
  729. *win = ccdc->ccdc_cfg.bayer.win;
  730. else
  731. *win = ccdc->ccdc_cfg.ycbcr.win;
  732. }
  733. static inline unsigned int vpfe_ccdc_get_line_length(struct vpfe_ccdc *ccdc)
  734. {
  735. if (ccdc->ccdc_cfg.if_type == VPFE_RAW_BAYER)
  736. return ccdc->ccdc_cfg.bayer.bytesperline;
  737. return ccdc->ccdc_cfg.ycbcr.bytesperline;
  738. }
  739. static inline int
  740. vpfe_ccdc_set_frame_format(struct vpfe_ccdc *ccdc,
  741. enum ccdc_frmfmt frm_fmt)
  742. {
  743. if (ccdc->ccdc_cfg.if_type == VPFE_RAW_BAYER)
  744. ccdc->ccdc_cfg.bayer.frm_fmt = frm_fmt;
  745. else
  746. ccdc->ccdc_cfg.ycbcr.frm_fmt = frm_fmt;
  747. return 0;
  748. }
  749. static inline enum ccdc_frmfmt
  750. vpfe_ccdc_get_frame_format(struct vpfe_ccdc *ccdc)
  751. {
  752. if (ccdc->ccdc_cfg.if_type == VPFE_RAW_BAYER)
  753. return ccdc->ccdc_cfg.bayer.frm_fmt;
  754. return ccdc->ccdc_cfg.ycbcr.frm_fmt;
  755. }
  756. static inline int vpfe_ccdc_getfid(struct vpfe_ccdc *ccdc)
  757. {
  758. return (vpfe_reg_read(ccdc, VPFE_SYNMODE) >> 15) & 1;
  759. }
  760. static inline void vpfe_set_sdr_addr(struct vpfe_ccdc *ccdc, unsigned long addr)
  761. {
  762. vpfe_reg_write(ccdc, addr & 0xffffffe0, VPFE_SDR_ADDR);
  763. }
  764. static int vpfe_ccdc_set_hw_if_params(struct vpfe_ccdc *ccdc,
  765. struct vpfe_hw_if_param *params)
  766. {
  767. struct vpfe_device *vpfe = container_of(ccdc, struct vpfe_device, ccdc);
  768. ccdc->ccdc_cfg.if_type = params->if_type;
  769. switch (params->if_type) {
  770. case VPFE_BT656:
  771. case VPFE_YCBCR_SYNC_16:
  772. case VPFE_YCBCR_SYNC_8:
  773. case VPFE_BT656_10BIT:
  774. ccdc->ccdc_cfg.ycbcr.vd_pol = params->vdpol;
  775. ccdc->ccdc_cfg.ycbcr.hd_pol = params->hdpol;
  776. break;
  777. case VPFE_RAW_BAYER:
  778. ccdc->ccdc_cfg.bayer.vd_pol = params->vdpol;
  779. ccdc->ccdc_cfg.bayer.hd_pol = params->hdpol;
  780. if (params->bus_width == 10)
  781. ccdc->ccdc_cfg.bayer.config_params.data_sz =
  782. VPFE_CCDC_DATA_10BITS;
  783. else
  784. ccdc->ccdc_cfg.bayer.config_params.data_sz =
  785. VPFE_CCDC_DATA_8BITS;
  786. vpfe_dbg(1, vpfe, "params.bus_width: %d\n",
  787. params->bus_width);
  788. vpfe_dbg(1, vpfe, "config_params.data_sz: %d\n",
  789. ccdc->ccdc_cfg.bayer.config_params.data_sz);
  790. break;
  791. default:
  792. return -EINVAL;
  793. }
  794. return 0;
  795. }
  796. static void vpfe_clear_intr(struct vpfe_ccdc *ccdc, int vdint)
  797. {
  798. unsigned int vpfe_int_status;
  799. vpfe_int_status = vpfe_reg_read(ccdc, VPFE_IRQ_STS);
  800. switch (vdint) {
  801. /* VD0 interrupt */
  802. case VPFE_VDINT0:
  803. vpfe_int_status &= ~VPFE_VDINT0;
  804. vpfe_int_status |= VPFE_VDINT0;
  805. break;
  806. /* VD1 interrupt */
  807. case VPFE_VDINT1:
  808. vpfe_int_status &= ~VPFE_VDINT1;
  809. vpfe_int_status |= VPFE_VDINT1;
  810. break;
  811. /* VD2 interrupt */
  812. case VPFE_VDINT2:
  813. vpfe_int_status &= ~VPFE_VDINT2;
  814. vpfe_int_status |= VPFE_VDINT2;
  815. break;
  816. /* Clear all interrupts */
  817. default:
  818. vpfe_int_status &= ~(VPFE_VDINT0 |
  819. VPFE_VDINT1 |
  820. VPFE_VDINT2);
  821. vpfe_int_status |= (VPFE_VDINT0 |
  822. VPFE_VDINT1 |
  823. VPFE_VDINT2);
  824. break;
  825. }
  826. /* Clear specific VDINT from the status register */
  827. vpfe_reg_write(ccdc, vpfe_int_status, VPFE_IRQ_STS);
  828. vpfe_int_status = vpfe_reg_read(ccdc, VPFE_IRQ_STS);
  829. /* Acknowledge that we are done with all interrupts */
  830. vpfe_reg_write(ccdc, 1, VPFE_IRQ_EOI);
  831. }
  832. static void vpfe_ccdc_config_defaults(struct vpfe_ccdc *ccdc)
  833. {
  834. ccdc->ccdc_cfg.if_type = VPFE_RAW_BAYER;
  835. ccdc->ccdc_cfg.ycbcr.pix_fmt = CCDC_PIXFMT_YCBCR_8BIT;
  836. ccdc->ccdc_cfg.ycbcr.frm_fmt = CCDC_FRMFMT_INTERLACED;
  837. ccdc->ccdc_cfg.ycbcr.fid_pol = VPFE_PINPOL_POSITIVE;
  838. ccdc->ccdc_cfg.ycbcr.vd_pol = VPFE_PINPOL_POSITIVE;
  839. ccdc->ccdc_cfg.ycbcr.hd_pol = VPFE_PINPOL_POSITIVE;
  840. ccdc->ccdc_cfg.ycbcr.pix_order = CCDC_PIXORDER_CBYCRY;
  841. ccdc->ccdc_cfg.ycbcr.buf_type = CCDC_BUFTYPE_FLD_INTERLEAVED;
  842. ccdc->ccdc_cfg.ycbcr.win.left = 0;
  843. ccdc->ccdc_cfg.ycbcr.win.top = 0;
  844. ccdc->ccdc_cfg.ycbcr.win.width = 720;
  845. ccdc->ccdc_cfg.ycbcr.win.height = 576;
  846. ccdc->ccdc_cfg.ycbcr.bt656_enable = 1;
  847. ccdc->ccdc_cfg.bayer.pix_fmt = CCDC_PIXFMT_RAW;
  848. ccdc->ccdc_cfg.bayer.frm_fmt = CCDC_FRMFMT_PROGRESSIVE;
  849. ccdc->ccdc_cfg.bayer.fid_pol = VPFE_PINPOL_POSITIVE;
  850. ccdc->ccdc_cfg.bayer.vd_pol = VPFE_PINPOL_POSITIVE;
  851. ccdc->ccdc_cfg.bayer.hd_pol = VPFE_PINPOL_POSITIVE;
  852. ccdc->ccdc_cfg.bayer.win.left = 0;
  853. ccdc->ccdc_cfg.bayer.win.top = 0;
  854. ccdc->ccdc_cfg.bayer.win.width = 800;
  855. ccdc->ccdc_cfg.bayer.win.height = 600;
  856. ccdc->ccdc_cfg.bayer.config_params.data_sz = VPFE_CCDC_DATA_8BITS;
  857. ccdc->ccdc_cfg.bayer.config_params.alaw.gamma_wd =
  858. VPFE_CCDC_GAMMA_BITS_09_0;
  859. }
  860. /*
  861. * vpfe_get_ccdc_image_format - Get image parameters based on CCDC settings
  862. */
  863. static int vpfe_get_ccdc_image_format(struct vpfe_device *vpfe,
  864. struct v4l2_format *f)
  865. {
  866. struct v4l2_rect image_win;
  867. enum ccdc_buftype buf_type;
  868. enum ccdc_frmfmt frm_fmt;
  869. memset(f, 0, sizeof(*f));
  870. f->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
  871. vpfe_ccdc_get_image_window(&vpfe->ccdc, &image_win);
  872. f->fmt.pix.width = image_win.width;
  873. f->fmt.pix.height = image_win.height;
  874. f->fmt.pix.bytesperline = vpfe_ccdc_get_line_length(&vpfe->ccdc);
  875. f->fmt.pix.sizeimage = f->fmt.pix.bytesperline *
  876. f->fmt.pix.height;
  877. buf_type = vpfe_ccdc_get_buftype(&vpfe->ccdc);
  878. f->fmt.pix.pixelformat = vpfe_ccdc_get_pixel_format(&vpfe->ccdc);
  879. frm_fmt = vpfe_ccdc_get_frame_format(&vpfe->ccdc);
  880. if (frm_fmt == CCDC_FRMFMT_PROGRESSIVE) {
  881. f->fmt.pix.field = V4L2_FIELD_NONE;
  882. } else if (frm_fmt == CCDC_FRMFMT_INTERLACED) {
  883. if (buf_type == CCDC_BUFTYPE_FLD_INTERLEAVED) {
  884. f->fmt.pix.field = V4L2_FIELD_INTERLACED;
  885. } else if (buf_type == CCDC_BUFTYPE_FLD_SEPARATED) {
  886. f->fmt.pix.field = V4L2_FIELD_SEQ_TB;
  887. } else {
  888. vpfe_err(vpfe, "Invalid buf_type\n");
  889. return -EINVAL;
  890. }
  891. } else {
  892. vpfe_err(vpfe, "Invalid frm_fmt\n");
  893. return -EINVAL;
  894. }
  895. return 0;
  896. }
  897. static int vpfe_config_ccdc_image_format(struct vpfe_device *vpfe)
  898. {
  899. enum ccdc_frmfmt frm_fmt = CCDC_FRMFMT_INTERLACED;
  900. int ret;
  901. vpfe_dbg(2, vpfe, "vpfe_config_ccdc_image_format\n");
  902. vpfe_dbg(1, vpfe, "pixelformat: %s\n",
  903. print_fourcc(vpfe->fmt.fmt.pix.pixelformat));
  904. if (vpfe_ccdc_set_pixel_format(&vpfe->ccdc,
  905. vpfe->fmt.fmt.pix.pixelformat) < 0) {
  906. vpfe_err(vpfe, "couldn't set pix format in ccdc\n");
  907. return -EINVAL;
  908. }
  909. /* configure the image window */
  910. vpfe_ccdc_set_image_window(&vpfe->ccdc, &vpfe->crop, vpfe->bpp);
  911. switch (vpfe->fmt.fmt.pix.field) {
  912. case V4L2_FIELD_INTERLACED:
  913. /* do nothing, since it is default */
  914. ret = vpfe_ccdc_set_buftype(
  915. &vpfe->ccdc,
  916. CCDC_BUFTYPE_FLD_INTERLEAVED);
  917. break;
  918. case V4L2_FIELD_NONE:
  919. frm_fmt = CCDC_FRMFMT_PROGRESSIVE;
  920. /* buffer type only applicable for interlaced scan */
  921. break;
  922. case V4L2_FIELD_SEQ_TB:
  923. ret = vpfe_ccdc_set_buftype(
  924. &vpfe->ccdc,
  925. CCDC_BUFTYPE_FLD_SEPARATED);
  926. break;
  927. default:
  928. return -EINVAL;
  929. }
  930. if (ret)
  931. return ret;
  932. return vpfe_ccdc_set_frame_format(&vpfe->ccdc, frm_fmt);
  933. }
  934. /*
  935. * vpfe_config_image_format()
  936. * For a given standard, this functions sets up the default
  937. * pix format & crop values in the vpfe device and ccdc. It first
  938. * starts with defaults based values from the standard table.
  939. * It then checks if sub device support g_mbus_fmt and then override the
  940. * values based on that.Sets crop values to match with scan resolution
  941. * starting at 0,0. It calls vpfe_config_ccdc_image_format() set the
  942. * values in ccdc
  943. */
  944. static int vpfe_config_image_format(struct vpfe_device *vpfe,
  945. v4l2_std_id std_id)
  946. {
  947. struct v4l2_pix_format *pix = &vpfe->fmt.fmt.pix;
  948. int i, ret;
  949. for (i = 0; i < ARRAY_SIZE(vpfe_standards); i++) {
  950. if (vpfe_standards[i].std_id & std_id) {
  951. vpfe->std_info.active_pixels =
  952. vpfe_standards[i].width;
  953. vpfe->std_info.active_lines =
  954. vpfe_standards[i].height;
  955. vpfe->std_info.frame_format =
  956. vpfe_standards[i].frame_format;
  957. vpfe->std_index = i;
  958. break;
  959. }
  960. }
  961. if (i == ARRAY_SIZE(vpfe_standards)) {
  962. vpfe_err(vpfe, "standard not supported\n");
  963. return -EINVAL;
  964. }
  965. vpfe->crop.top = vpfe->crop.left = 0;
  966. vpfe->crop.width = vpfe->std_info.active_pixels;
  967. vpfe->crop.height = vpfe->std_info.active_lines;
  968. pix->width = vpfe->crop.width;
  969. pix->height = vpfe->crop.height;
  970. pix->pixelformat = V4L2_PIX_FMT_YUYV;
  971. /* first field and frame format based on standard frame format */
  972. if (vpfe->std_info.frame_format)
  973. pix->field = V4L2_FIELD_INTERLACED;
  974. else
  975. pix->field = V4L2_FIELD_NONE;
  976. ret = __vpfe_get_format(vpfe, &vpfe->fmt, &vpfe->bpp);
  977. if (ret)
  978. return ret;
  979. /* Update the crop window based on found values */
  980. vpfe->crop.width = pix->width;
  981. vpfe->crop.height = pix->height;
  982. return vpfe_config_ccdc_image_format(vpfe);
  983. }
  984. static int vpfe_initialize_device(struct vpfe_device *vpfe)
  985. {
  986. struct vpfe_subdev_info *sdinfo;
  987. int ret;
  988. sdinfo = &vpfe->cfg->sub_devs[0];
  989. sdinfo->sd = vpfe->sd[0];
  990. vpfe->current_input = 0;
  991. vpfe->std_index = 0;
  992. /* Configure the default format information */
  993. ret = vpfe_config_image_format(vpfe,
  994. vpfe_standards[vpfe->std_index].std_id);
  995. if (ret)
  996. return ret;
  997. pm_runtime_get_sync(vpfe->pdev);
  998. vpfe_config_enable(&vpfe->ccdc, 1);
  999. vpfe_ccdc_restore_defaults(&vpfe->ccdc);
  1000. /* Clear all VPFE interrupts */
  1001. vpfe_clear_intr(&vpfe->ccdc, -1);
  1002. return ret;
  1003. }
  1004. /*
  1005. * vpfe_release : This function is based on the vb2_fop_release
  1006. * helper function.
  1007. * It has been augmented to handle module power management,
  1008. * by disabling/enabling h/w module fcntl clock when necessary.
  1009. */
  1010. static int vpfe_release(struct file *file)
  1011. {
  1012. struct vpfe_device *vpfe = video_drvdata(file);
  1013. int ret;
  1014. mutex_lock(&vpfe->lock);
  1015. if (v4l2_fh_is_singular_file(file))
  1016. vpfe_ccdc_close(&vpfe->ccdc, vpfe->pdev);
  1017. ret = _vb2_fop_release(file, NULL);
  1018. mutex_unlock(&vpfe->lock);
  1019. return ret;
  1020. }
  1021. /*
  1022. * vpfe_open : This function is based on the v4l2_fh_open helper function.
  1023. * It has been augmented to handle module power management,
  1024. * by disabling/enabling h/w module fcntl clock when necessary.
  1025. */
  1026. static int vpfe_open(struct file *file)
  1027. {
  1028. struct vpfe_device *vpfe = video_drvdata(file);
  1029. int ret;
  1030. mutex_lock(&vpfe->lock);
  1031. ret = v4l2_fh_open(file);
  1032. if (ret) {
  1033. vpfe_err(vpfe, "v4l2_fh_open failed\n");
  1034. goto unlock;
  1035. }
  1036. if (!v4l2_fh_is_singular_file(file))
  1037. goto unlock;
  1038. if (vpfe_initialize_device(vpfe)) {
  1039. v4l2_fh_release(file);
  1040. ret = -ENODEV;
  1041. }
  1042. unlock:
  1043. mutex_unlock(&vpfe->lock);
  1044. return ret;
  1045. }
  1046. /**
  1047. * vpfe_schedule_next_buffer: set next buffer address for capture
  1048. * @vpfe : ptr to vpfe device
  1049. *
  1050. * This function will get next buffer from the dma queue and
  1051. * set the buffer address in the vpfe register for capture.
  1052. * the buffer is marked active
  1053. *
  1054. * Assumes caller is holding vpfe->dma_queue_lock already
  1055. */
  1056. static inline void vpfe_schedule_next_buffer(struct vpfe_device *vpfe)
  1057. {
  1058. vpfe->next_frm = list_entry(vpfe->dma_queue.next,
  1059. struct vpfe_cap_buffer, list);
  1060. list_del(&vpfe->next_frm->list);
  1061. vpfe_set_sdr_addr(&vpfe->ccdc,
  1062. vb2_dma_contig_plane_dma_addr(&vpfe->next_frm->vb, 0));
  1063. }
  1064. static inline void vpfe_schedule_bottom_field(struct vpfe_device *vpfe)
  1065. {
  1066. unsigned long addr;
  1067. addr = vb2_dma_contig_plane_dma_addr(&vpfe->next_frm->vb, 0) +
  1068. vpfe->field_off;
  1069. vpfe_set_sdr_addr(&vpfe->ccdc, addr);
  1070. }
  1071. /*
  1072. * vpfe_process_buffer_complete: process a completed buffer
  1073. * @vpfe : ptr to vpfe device
  1074. *
  1075. * This function time stamp the buffer and mark it as DONE. It also
  1076. * wake up any process waiting on the QUEUE and set the next buffer
  1077. * as current
  1078. */
  1079. static inline void vpfe_process_buffer_complete(struct vpfe_device *vpfe)
  1080. {
  1081. v4l2_get_timestamp(&vpfe->cur_frm->vb.v4l2_buf.timestamp);
  1082. vpfe->cur_frm->vb.v4l2_buf.field = vpfe->fmt.fmt.pix.field;
  1083. vpfe->cur_frm->vb.v4l2_buf.sequence = vpfe->sequence++;
  1084. vb2_buffer_done(&vpfe->cur_frm->vb, VB2_BUF_STATE_DONE);
  1085. vpfe->cur_frm = vpfe->next_frm;
  1086. }
  1087. /*
  1088. * vpfe_isr : ISR handler for vpfe capture (VINT0)
  1089. * @irq: irq number
  1090. * @dev_id: dev_id ptr
  1091. *
  1092. * It changes status of the captured buffer, takes next buffer from the queue
  1093. * and sets its address in VPFE registers
  1094. */
  1095. static irqreturn_t vpfe_isr(int irq, void *dev)
  1096. {
  1097. struct vpfe_device *vpfe = (struct vpfe_device *)dev;
  1098. enum v4l2_field field;
  1099. int intr_status;
  1100. int fid;
  1101. intr_status = vpfe_reg_read(&vpfe->ccdc, VPFE_IRQ_STS);
  1102. if (intr_status & VPFE_VDINT0) {
  1103. field = vpfe->fmt.fmt.pix.field;
  1104. if (field == V4L2_FIELD_NONE) {
  1105. /* handle progressive frame capture */
  1106. if (vpfe->cur_frm != vpfe->next_frm)
  1107. vpfe_process_buffer_complete(vpfe);
  1108. goto next_intr;
  1109. }
  1110. /* interlaced or TB capture check which field
  1111. we are in hardware */
  1112. fid = vpfe_ccdc_getfid(&vpfe->ccdc);
  1113. /* switch the software maintained field id */
  1114. vpfe->field ^= 1;
  1115. if (fid == vpfe->field) {
  1116. /* we are in-sync here,continue */
  1117. if (fid == 0) {
  1118. /*
  1119. * One frame is just being captured. If the
  1120. * next frame is available, release the
  1121. * current frame and move on
  1122. */
  1123. if (vpfe->cur_frm != vpfe->next_frm)
  1124. vpfe_process_buffer_complete(vpfe);
  1125. /*
  1126. * based on whether the two fields are stored
  1127. * interleave or separately in memory,
  1128. * reconfigure the CCDC memory address
  1129. */
  1130. if (field == V4L2_FIELD_SEQ_TB)
  1131. vpfe_schedule_bottom_field(vpfe);
  1132. goto next_intr;
  1133. }
  1134. /*
  1135. * if one field is just being captured configure
  1136. * the next frame get the next frame from the empty
  1137. * queue if no frame is available hold on to the
  1138. * current buffer
  1139. */
  1140. spin_lock(&vpfe->dma_queue_lock);
  1141. if (!list_empty(&vpfe->dma_queue) &&
  1142. vpfe->cur_frm == vpfe->next_frm)
  1143. vpfe_schedule_next_buffer(vpfe);
  1144. spin_unlock(&vpfe->dma_queue_lock);
  1145. } else if (fid == 0) {
  1146. /*
  1147. * out of sync. Recover from any hardware out-of-sync.
  1148. * May loose one frame
  1149. */
  1150. vpfe->field = fid;
  1151. }
  1152. }
  1153. next_intr:
  1154. if (intr_status & VPFE_VDINT1) {
  1155. spin_lock(&vpfe->dma_queue_lock);
  1156. if (vpfe->fmt.fmt.pix.field == V4L2_FIELD_NONE &&
  1157. !list_empty(&vpfe->dma_queue) &&
  1158. vpfe->cur_frm == vpfe->next_frm)
  1159. vpfe_schedule_next_buffer(vpfe);
  1160. spin_unlock(&vpfe->dma_queue_lock);
  1161. }
  1162. vpfe_clear_intr(&vpfe->ccdc, intr_status);
  1163. return IRQ_HANDLED;
  1164. }
  1165. static inline void vpfe_detach_irq(struct vpfe_device *vpfe)
  1166. {
  1167. unsigned int intr = VPFE_VDINT0;
  1168. enum ccdc_frmfmt frame_format;
  1169. frame_format = vpfe_ccdc_get_frame_format(&vpfe->ccdc);
  1170. if (frame_format == CCDC_FRMFMT_PROGRESSIVE)
  1171. intr |= VPFE_VDINT1;
  1172. vpfe_reg_write(&vpfe->ccdc, intr, VPFE_IRQ_EN_CLR);
  1173. }
  1174. static inline void vpfe_attach_irq(struct vpfe_device *vpfe)
  1175. {
  1176. unsigned int intr = VPFE_VDINT0;
  1177. enum ccdc_frmfmt frame_format;
  1178. frame_format = vpfe_ccdc_get_frame_format(&vpfe->ccdc);
  1179. if (frame_format == CCDC_FRMFMT_PROGRESSIVE)
  1180. intr |= VPFE_VDINT1;
  1181. vpfe_reg_write(&vpfe->ccdc, intr, VPFE_IRQ_EN_SET);
  1182. }
  1183. static int vpfe_querycap(struct file *file, void *priv,
  1184. struct v4l2_capability *cap)
  1185. {
  1186. struct vpfe_device *vpfe = video_drvdata(file);
  1187. vpfe_dbg(2, vpfe, "vpfe_querycap\n");
  1188. strlcpy(cap->driver, VPFE_MODULE_NAME, sizeof(cap->driver));
  1189. strlcpy(cap->card, "TI AM437x VPFE", sizeof(cap->card));
  1190. snprintf(cap->bus_info, sizeof(cap->bus_info),
  1191. "platform:%s", vpfe->v4l2_dev.name);
  1192. cap->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING |
  1193. V4L2_CAP_READWRITE;
  1194. cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
  1195. return 0;
  1196. }
  1197. /* get the format set at output pad of the adjacent subdev */
  1198. static int __vpfe_get_format(struct vpfe_device *vpfe,
  1199. struct v4l2_format *format, unsigned int *bpp)
  1200. {
  1201. struct v4l2_mbus_framefmt mbus_fmt;
  1202. struct vpfe_subdev_info *sdinfo;
  1203. struct v4l2_subdev_format fmt;
  1204. int ret;
  1205. sdinfo = vpfe->current_subdev;
  1206. if (!sdinfo->sd)
  1207. return -EINVAL;
  1208. fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE;
  1209. fmt.pad = 0;
  1210. ret = v4l2_subdev_call(sdinfo->sd, pad, get_fmt, NULL, &fmt);
  1211. if (ret && ret != -ENOIOCTLCMD && ret != -ENODEV)
  1212. return ret;
  1213. if (!ret) {
  1214. v4l2_fill_pix_format(&format->fmt.pix, &fmt.format);
  1215. mbus_to_pix(vpfe, &fmt.format, &format->fmt.pix, bpp);
  1216. } else {
  1217. ret = v4l2_device_call_until_err(&vpfe->v4l2_dev,
  1218. sdinfo->grp_id,
  1219. video, g_mbus_fmt,
  1220. &mbus_fmt);
  1221. if (ret && ret != -ENOIOCTLCMD && ret != -ENODEV)
  1222. return ret;
  1223. v4l2_fill_pix_format(&format->fmt.pix, &mbus_fmt);
  1224. mbus_to_pix(vpfe, &mbus_fmt, &format->fmt.pix, bpp);
  1225. }
  1226. format->type = vpfe->fmt.type;
  1227. vpfe_dbg(1, vpfe,
  1228. "%s size %dx%d (%s) bytesperline = %d, size = %d, bpp = %d\n",
  1229. __func__, format->fmt.pix.width, format->fmt.pix.height,
  1230. print_fourcc(format->fmt.pix.pixelformat),
  1231. format->fmt.pix.bytesperline, format->fmt.pix.sizeimage, *bpp);
  1232. return 0;
  1233. }
  1234. /* set the format at output pad of the adjacent subdev */
  1235. static int __vpfe_set_format(struct vpfe_device *vpfe,
  1236. struct v4l2_format *format, unsigned int *bpp)
  1237. {
  1238. struct v4l2_mbus_framefmt mbus_fmt;
  1239. struct vpfe_subdev_info *sdinfo;
  1240. struct v4l2_subdev_format fmt;
  1241. int ret;
  1242. vpfe_dbg(2, vpfe, "__vpfe_set_format\n");
  1243. sdinfo = vpfe->current_subdev;
  1244. if (!sdinfo->sd)
  1245. return -EINVAL;
  1246. fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE;
  1247. fmt.pad = 0;
  1248. pix_to_mbus(vpfe, &format->fmt.pix, &fmt.format);
  1249. ret = v4l2_subdev_call(sdinfo->sd, pad, set_fmt, NULL, &fmt);
  1250. if (ret && ret != -ENOIOCTLCMD && ret != -ENODEV)
  1251. return ret;
  1252. if (!ret) {
  1253. v4l2_fill_pix_format(&format->fmt.pix, &fmt.format);
  1254. mbus_to_pix(vpfe, &fmt.format, &format->fmt.pix, bpp);
  1255. } else {
  1256. ret = v4l2_device_call_until_err(&vpfe->v4l2_dev,
  1257. sdinfo->grp_id,
  1258. video, s_mbus_fmt,
  1259. &mbus_fmt);
  1260. if (ret && ret != -ENOIOCTLCMD && ret != -ENODEV)
  1261. return ret;
  1262. v4l2_fill_pix_format(&format->fmt.pix, &mbus_fmt);
  1263. mbus_to_pix(vpfe, &mbus_fmt, &format->fmt.pix, bpp);
  1264. }
  1265. format->type = vpfe->fmt.type;
  1266. vpfe_dbg(1, vpfe,
  1267. "%s size %dx%d (%s) bytesperline = %d, size = %d, bpp = %d\n",
  1268. __func__, format->fmt.pix.width, format->fmt.pix.height,
  1269. print_fourcc(format->fmt.pix.pixelformat),
  1270. format->fmt.pix.bytesperline, format->fmt.pix.sizeimage, *bpp);
  1271. return 0;
  1272. }
  1273. static int vpfe_g_fmt(struct file *file, void *priv,
  1274. struct v4l2_format *fmt)
  1275. {
  1276. struct vpfe_device *vpfe = video_drvdata(file);
  1277. vpfe_dbg(2, vpfe, "vpfe_g_fmt\n");
  1278. *fmt = vpfe->fmt;
  1279. return 0;
  1280. }
  1281. static int vpfe_enum_fmt(struct file *file, void *priv,
  1282. struct v4l2_fmtdesc *f)
  1283. {
  1284. struct vpfe_device *vpfe = video_drvdata(file);
  1285. struct vpfe_subdev_info *sdinfo;
  1286. struct vpfe_fmt *fmt = NULL;
  1287. unsigned int k;
  1288. vpfe_dbg(2, vpfe, "vpfe_enum_format index:%d\n",
  1289. f->index);
  1290. sdinfo = vpfe->current_subdev;
  1291. if (!sdinfo->sd)
  1292. return -EINVAL;
  1293. if (f->index > ARRAY_SIZE(formats))
  1294. return -EINVAL;
  1295. for (k = 0; k < ARRAY_SIZE(formats); k++) {
  1296. if (formats[k].index == f->index) {
  1297. fmt = &formats[k];
  1298. break;
  1299. }
  1300. }
  1301. if (!fmt)
  1302. return -EINVAL;
  1303. strncpy(f->description, fmt->name, sizeof(f->description) - 1);
  1304. f->pixelformat = fmt->fourcc;
  1305. f->type = vpfe->fmt.type;
  1306. vpfe_dbg(1, vpfe, "vpfe_enum_format: mbus index: %d code: %x pixelformat: %s [%s]\n",
  1307. f->index, fmt->code, print_fourcc(fmt->fourcc), fmt->name);
  1308. return 0;
  1309. }
  1310. static int vpfe_try_fmt(struct file *file, void *priv,
  1311. struct v4l2_format *fmt)
  1312. {
  1313. struct vpfe_device *vpfe = video_drvdata(file);
  1314. unsigned int bpp;
  1315. vpfe_dbg(2, vpfe, "vpfe_try_fmt\n");
  1316. return __vpfe_get_format(vpfe, fmt, &bpp);
  1317. }
  1318. static int vpfe_s_fmt(struct file *file, void *priv,
  1319. struct v4l2_format *fmt)
  1320. {
  1321. struct vpfe_device *vpfe = video_drvdata(file);
  1322. struct v4l2_format format;
  1323. unsigned int bpp;
  1324. int ret;
  1325. vpfe_dbg(2, vpfe, "vpfe_s_fmt\n");
  1326. /* If streaming is started, return error */
  1327. if (vb2_is_busy(&vpfe->buffer_queue)) {
  1328. vpfe_err(vpfe, "%s device busy\n", __func__);
  1329. return -EBUSY;
  1330. }
  1331. ret = vpfe_try_fmt(file, priv, fmt);
  1332. if (ret)
  1333. return ret;
  1334. if (!cmp_v4l2_format(fmt, &format)) {
  1335. /* Sensor format is different from the requested format
  1336. * so we need to change it
  1337. */
  1338. ret = __vpfe_set_format(vpfe, fmt, &bpp);
  1339. if (ret)
  1340. return ret;
  1341. } else /* Just make sure all of the fields are consistent */
  1342. *fmt = format;
  1343. /* First detach any IRQ if currently attached */
  1344. vpfe_detach_irq(vpfe);
  1345. vpfe->fmt = *fmt;
  1346. vpfe->bpp = bpp;
  1347. /* Update the crop window based on found values */
  1348. vpfe->crop.width = fmt->fmt.pix.width;
  1349. vpfe->crop.height = fmt->fmt.pix.height;
  1350. /* set image capture parameters in the ccdc */
  1351. return vpfe_config_ccdc_image_format(vpfe);
  1352. }
  1353. static int vpfe_enum_size(struct file *file, void *priv,
  1354. struct v4l2_frmsizeenum *fsize)
  1355. {
  1356. struct vpfe_device *vpfe = video_drvdata(file);
  1357. struct v4l2_subdev_frame_size_enum fse;
  1358. struct vpfe_subdev_info *sdinfo;
  1359. struct v4l2_mbus_framefmt mbus;
  1360. struct v4l2_pix_format pix;
  1361. struct vpfe_fmt *fmt;
  1362. int ret;
  1363. vpfe_dbg(2, vpfe, "vpfe_enum_size\n");
  1364. /* check for valid format */
  1365. fmt = find_format_by_pix(fsize->pixel_format);
  1366. if (!fmt) {
  1367. vpfe_dbg(3, vpfe, "Invalid pixel code: %x, default used instead\n",
  1368. fsize->pixel_format);
  1369. return -EINVAL;
  1370. }
  1371. memset(fsize->reserved, 0x0, sizeof(fsize->reserved));
  1372. sdinfo = vpfe->current_subdev;
  1373. if (!sdinfo->sd)
  1374. return -EINVAL;
  1375. memset(&pix, 0x0, sizeof(pix));
  1376. /* Construct pix from parameter and use default for the rest */
  1377. pix.pixelformat = fsize->pixel_format;
  1378. pix.width = 640;
  1379. pix.height = 480;
  1380. pix.colorspace = V4L2_COLORSPACE_SRGB;
  1381. pix.field = V4L2_FIELD_NONE;
  1382. pix_to_mbus(vpfe, &pix, &mbus);
  1383. memset(&fse, 0x0, sizeof(fse));
  1384. fse.index = fsize->index;
  1385. fse.pad = 0;
  1386. fse.code = mbus.code;
  1387. fse.which = V4L2_SUBDEV_FORMAT_ACTIVE;
  1388. ret = v4l2_subdev_call(sdinfo->sd, pad, enum_frame_size, NULL, &fse);
  1389. if (ret)
  1390. return -EINVAL;
  1391. vpfe_dbg(1, vpfe, "vpfe_enum_size: index: %d code: %x W:[%d,%d] H:[%d,%d]\n",
  1392. fse.index, fse.code, fse.min_width, fse.max_width,
  1393. fse.min_height, fse.max_height);
  1394. fsize->type = V4L2_FRMSIZE_TYPE_DISCRETE;
  1395. fsize->discrete.width = fse.max_width;
  1396. fsize->discrete.height = fse.max_height;
  1397. vpfe_dbg(1, vpfe, "vpfe_enum_size: index: %d pixformat: %s size: %dx%d\n",
  1398. fsize->index, print_fourcc(fsize->pixel_format),
  1399. fsize->discrete.width, fsize->discrete.height);
  1400. return 0;
  1401. }
  1402. /*
  1403. * vpfe_get_subdev_input_index - Get subdev index and subdev input index for a
  1404. * given app input index
  1405. */
  1406. static int
  1407. vpfe_get_subdev_input_index(struct vpfe_device *vpfe,
  1408. int *subdev_index,
  1409. int *subdev_input_index,
  1410. int app_input_index)
  1411. {
  1412. struct vpfe_config *cfg = vpfe->cfg;
  1413. struct vpfe_subdev_info *sdinfo;
  1414. int i, j = 0;
  1415. for (i = 0; i < ARRAY_SIZE(vpfe->cfg->asd); i++) {
  1416. sdinfo = &cfg->sub_devs[i];
  1417. if (app_input_index < (j + 1)) {
  1418. *subdev_index = i;
  1419. *subdev_input_index = app_input_index - j;
  1420. return 0;
  1421. }
  1422. j++;
  1423. }
  1424. return -EINVAL;
  1425. }
  1426. /*
  1427. * vpfe_get_app_input - Get app input index for a given subdev input index
  1428. * driver stores the input index of the current sub device and translate it
  1429. * when application request the current input
  1430. */
  1431. static int vpfe_get_app_input_index(struct vpfe_device *vpfe,
  1432. int *app_input_index)
  1433. {
  1434. struct vpfe_config *cfg = vpfe->cfg;
  1435. struct vpfe_subdev_info *sdinfo;
  1436. struct i2c_client *client;
  1437. struct i2c_client *curr_client;
  1438. int i, j = 0;
  1439. curr_client = v4l2_get_subdevdata(vpfe->current_subdev->sd);
  1440. for (i = 0; i < ARRAY_SIZE(vpfe->cfg->asd); i++) {
  1441. sdinfo = &cfg->sub_devs[i];
  1442. client = v4l2_get_subdevdata(sdinfo->sd);
  1443. if (client->addr == curr_client->addr &&
  1444. client->adapter->nr == client->adapter->nr) {
  1445. if (vpfe->current_input >= 1)
  1446. return -1;
  1447. *app_input_index = j + vpfe->current_input;
  1448. return 0;
  1449. }
  1450. j++;
  1451. }
  1452. return -EINVAL;
  1453. }
  1454. static int vpfe_enum_input(struct file *file, void *priv,
  1455. struct v4l2_input *inp)
  1456. {
  1457. struct vpfe_device *vpfe = video_drvdata(file);
  1458. struct vpfe_subdev_info *sdinfo;
  1459. int subdev, index;
  1460. vpfe_dbg(2, vpfe, "vpfe_enum_input\n");
  1461. if (vpfe_get_subdev_input_index(vpfe, &subdev, &index,
  1462. inp->index) < 0) {
  1463. vpfe_dbg(1, vpfe,
  1464. "input information not found for the subdev\n");
  1465. return -EINVAL;
  1466. }
  1467. sdinfo = &vpfe->cfg->sub_devs[subdev];
  1468. *inp = sdinfo->inputs[index];
  1469. return 0;
  1470. }
  1471. static int vpfe_g_input(struct file *file, void *priv, unsigned int *index)
  1472. {
  1473. struct vpfe_device *vpfe = video_drvdata(file);
  1474. vpfe_dbg(2, vpfe, "vpfe_g_input\n");
  1475. return vpfe_get_app_input_index(vpfe, index);
  1476. }
  1477. /* Assumes caller is holding vpfe_dev->lock */
  1478. static int vpfe_set_input(struct vpfe_device *vpfe, unsigned int index)
  1479. {
  1480. int subdev_index = 0, inp_index = 0;
  1481. struct vpfe_subdev_info *sdinfo;
  1482. struct vpfe_route *route;
  1483. u32 input, output;
  1484. int ret;
  1485. vpfe_dbg(2, vpfe, "vpfe_set_input: index: %d\n", index);
  1486. /* If streaming is started, return error */
  1487. if (vb2_is_busy(&vpfe->buffer_queue)) {
  1488. vpfe_err(vpfe, "%s device busy\n", __func__);
  1489. return -EBUSY;
  1490. }
  1491. ret = vpfe_get_subdev_input_index(vpfe,
  1492. &subdev_index,
  1493. &inp_index,
  1494. index);
  1495. if (ret < 0) {
  1496. vpfe_err(vpfe, "invalid input index: %d\n", index);
  1497. goto get_out;
  1498. }
  1499. sdinfo = &vpfe->cfg->sub_devs[subdev_index];
  1500. sdinfo->sd = vpfe->sd[subdev_index];
  1501. route = &sdinfo->routes[inp_index];
  1502. if (route && sdinfo->can_route) {
  1503. input = route->input;
  1504. output = route->output;
  1505. if (sdinfo->sd) {
  1506. ret = v4l2_subdev_call(sdinfo->sd, video,
  1507. s_routing, input, output, 0);
  1508. if (ret) {
  1509. vpfe_err(vpfe, "s_routing failed\n");
  1510. ret = -EINVAL;
  1511. goto get_out;
  1512. }
  1513. }
  1514. }
  1515. vpfe->current_subdev = sdinfo;
  1516. if (sdinfo->sd)
  1517. vpfe->v4l2_dev.ctrl_handler = sdinfo->sd->ctrl_handler;
  1518. vpfe->current_input = index;
  1519. vpfe->std_index = 0;
  1520. /* set the bus/interface parameter for the sub device in ccdc */
  1521. ret = vpfe_ccdc_set_hw_if_params(&vpfe->ccdc, &sdinfo->vpfe_param);
  1522. if (ret)
  1523. return ret;
  1524. /* set the default image parameters in the device */
  1525. return vpfe_config_image_format(vpfe,
  1526. vpfe_standards[vpfe->std_index].std_id);
  1527. get_out:
  1528. return ret;
  1529. }
  1530. static int vpfe_s_input(struct file *file, void *priv, unsigned int index)
  1531. {
  1532. struct vpfe_device *vpfe = video_drvdata(file);
  1533. vpfe_dbg(2, vpfe,
  1534. "vpfe_s_input: index: %d\n", index);
  1535. return vpfe_set_input(vpfe, index);
  1536. }
  1537. static int vpfe_querystd(struct file *file, void *priv, v4l2_std_id *std_id)
  1538. {
  1539. struct vpfe_device *vpfe = video_drvdata(file);
  1540. struct vpfe_subdev_info *sdinfo;
  1541. vpfe_dbg(2, vpfe, "vpfe_querystd\n");
  1542. sdinfo = vpfe->current_subdev;
  1543. if (!(sdinfo->inputs[0].capabilities & V4L2_IN_CAP_STD))
  1544. return -ENODATA;
  1545. /* Call querystd function of decoder device */
  1546. return v4l2_device_call_until_err(&vpfe->v4l2_dev, sdinfo->grp_id,
  1547. video, querystd, std_id);
  1548. }
  1549. static int vpfe_s_std(struct file *file, void *priv, v4l2_std_id std_id)
  1550. {
  1551. struct vpfe_device *vpfe = video_drvdata(file);
  1552. struct vpfe_subdev_info *sdinfo;
  1553. int ret;
  1554. vpfe_dbg(2, vpfe, "vpfe_s_std\n");
  1555. sdinfo = vpfe->current_subdev;
  1556. if (!(sdinfo->inputs[0].capabilities & V4L2_IN_CAP_STD))
  1557. return -ENODATA;
  1558. /* If streaming is started, return error */
  1559. if (vb2_is_busy(&vpfe->buffer_queue)) {
  1560. vpfe_err(vpfe, "%s device busy\n", __func__);
  1561. ret = -EBUSY;
  1562. return ret;
  1563. }
  1564. ret = v4l2_device_call_until_err(&vpfe->v4l2_dev, sdinfo->grp_id,
  1565. video, s_std, std_id);
  1566. if (ret < 0) {
  1567. vpfe_err(vpfe, "Failed to set standard\n");
  1568. return ret;
  1569. }
  1570. ret = vpfe_config_image_format(vpfe, std_id);
  1571. return ret;
  1572. }
  1573. static int vpfe_g_std(struct file *file, void *priv, v4l2_std_id *std_id)
  1574. {
  1575. struct vpfe_device *vpfe = video_drvdata(file);
  1576. struct vpfe_subdev_info *sdinfo;
  1577. vpfe_dbg(2, vpfe, "vpfe_g_std\n");
  1578. sdinfo = vpfe->current_subdev;
  1579. if (sdinfo->inputs[0].capabilities != V4L2_IN_CAP_STD)
  1580. return -ENODATA;
  1581. *std_id = vpfe_standards[vpfe->std_index].std_id;
  1582. return 0;
  1583. }
  1584. /*
  1585. * vpfe_calculate_offsets : This function calculates buffers offset
  1586. * for top and bottom field
  1587. */
  1588. static void vpfe_calculate_offsets(struct vpfe_device *vpfe)
  1589. {
  1590. struct v4l2_rect image_win;
  1591. vpfe_dbg(2, vpfe, "vpfe_calculate_offsets\n");
  1592. vpfe_ccdc_get_image_window(&vpfe->ccdc, &image_win);
  1593. vpfe->field_off = image_win.height * image_win.width;
  1594. }
  1595. /*
  1596. * vpfe_queue_setup - Callback function for buffer setup.
  1597. * @vq: vb2_queue ptr
  1598. * @fmt: v4l2 format
  1599. * @nbuffers: ptr to number of buffers requested by application
  1600. * @nplanes:: contains number of distinct video planes needed to hold a frame
  1601. * @sizes[]: contains the size (in bytes) of each plane.
  1602. * @alloc_ctxs: ptr to allocation context
  1603. *
  1604. * This callback function is called when reqbuf() is called to adjust
  1605. * the buffer count and buffer size
  1606. */
  1607. static int vpfe_queue_setup(struct vb2_queue *vq,
  1608. const struct v4l2_format *fmt,
  1609. unsigned int *nbuffers, unsigned int *nplanes,
  1610. unsigned int sizes[], void *alloc_ctxs[])
  1611. {
  1612. struct vpfe_device *vpfe = vb2_get_drv_priv(vq);
  1613. if (fmt && fmt->fmt.pix.sizeimage < vpfe->fmt.fmt.pix.sizeimage)
  1614. return -EINVAL;
  1615. if (vq->num_buffers + *nbuffers < 3)
  1616. *nbuffers = 3 - vq->num_buffers;
  1617. *nplanes = 1;
  1618. sizes[0] = fmt ? fmt->fmt.pix.sizeimage : vpfe->fmt.fmt.pix.sizeimage;
  1619. alloc_ctxs[0] = vpfe->alloc_ctx;
  1620. vpfe_dbg(1, vpfe,
  1621. "nbuffers=%d, size=%u\n", *nbuffers, sizes[0]);
  1622. /* Calculate field offset */
  1623. vpfe_calculate_offsets(vpfe);
  1624. return 0;
  1625. }
  1626. /*
  1627. * vpfe_buffer_prepare : callback function for buffer prepare
  1628. * @vb: ptr to vb2_buffer
  1629. *
  1630. * This is the callback function for buffer prepare when vb2_qbuf()
  1631. * function is called. The buffer is prepared and user space virtual address
  1632. * or user address is converted into physical address
  1633. */
  1634. static int vpfe_buffer_prepare(struct vb2_buffer *vb)
  1635. {
  1636. struct vpfe_device *vpfe = vb2_get_drv_priv(vb->vb2_queue);
  1637. vb2_set_plane_payload(vb, 0, vpfe->fmt.fmt.pix.sizeimage);
  1638. if (vb2_get_plane_payload(vb, 0) > vb2_plane_size(vb, 0))
  1639. return -EINVAL;
  1640. vb->v4l2_buf.field = vpfe->fmt.fmt.pix.field;
  1641. return 0;
  1642. }
  1643. /*
  1644. * vpfe_buffer_queue : Callback function to add buffer to DMA queue
  1645. * @vb: ptr to vb2_buffer
  1646. */
  1647. static void vpfe_buffer_queue(struct vb2_buffer *vb)
  1648. {
  1649. struct vpfe_device *vpfe = vb2_get_drv_priv(vb->vb2_queue);
  1650. struct vpfe_cap_buffer *buf = to_vpfe_buffer(vb);
  1651. unsigned long flags = 0;
  1652. /* add the buffer to the DMA queue */
  1653. spin_lock_irqsave(&vpfe->dma_queue_lock, flags);
  1654. list_add_tail(&buf->list, &vpfe->dma_queue);
  1655. spin_unlock_irqrestore(&vpfe->dma_queue_lock, flags);
  1656. }
  1657. /*
  1658. * vpfe_start_streaming : Starts the DMA engine for streaming
  1659. * @vb: ptr to vb2_buffer
  1660. * @count: number of buffers
  1661. */
  1662. static int vpfe_start_streaming(struct vb2_queue *vq, unsigned int count)
  1663. {
  1664. struct vpfe_device *vpfe = vb2_get_drv_priv(vq);
  1665. struct vpfe_cap_buffer *buf, *tmp;
  1666. struct vpfe_subdev_info *sdinfo;
  1667. unsigned long flags;
  1668. unsigned long addr;
  1669. int ret;
  1670. spin_lock_irqsave(&vpfe->dma_queue_lock, flags);
  1671. vpfe->field = 0;
  1672. vpfe->sequence = 0;
  1673. sdinfo = vpfe->current_subdev;
  1674. vpfe_attach_irq(vpfe);
  1675. if (vpfe->ccdc.ccdc_cfg.if_type == VPFE_RAW_BAYER)
  1676. vpfe_ccdc_config_raw(&vpfe->ccdc);
  1677. else
  1678. vpfe_ccdc_config_ycbcr(&vpfe->ccdc);
  1679. /* Get the next frame from the buffer queue */
  1680. vpfe->next_frm = list_entry(vpfe->dma_queue.next,
  1681. struct vpfe_cap_buffer, list);
  1682. vpfe->cur_frm = vpfe->next_frm;
  1683. /* Remove buffer from the buffer queue */
  1684. list_del(&vpfe->cur_frm->list);
  1685. spin_unlock_irqrestore(&vpfe->dma_queue_lock, flags);
  1686. addr = vb2_dma_contig_plane_dma_addr(&vpfe->cur_frm->vb, 0);
  1687. vpfe_set_sdr_addr(&vpfe->ccdc, (unsigned long)(addr));
  1688. vpfe_pcr_enable(&vpfe->ccdc, 1);
  1689. ret = v4l2_subdev_call(sdinfo->sd, video, s_stream, 1);
  1690. if (ret < 0) {
  1691. vpfe_err(vpfe, "Error in attaching interrupt handle\n");
  1692. goto err;
  1693. }
  1694. return 0;
  1695. err:
  1696. list_for_each_entry_safe(buf, tmp, &vpfe->dma_queue, list) {
  1697. list_del(&buf->list);
  1698. vb2_buffer_done(&buf->vb, VB2_BUF_STATE_QUEUED);
  1699. }
  1700. return ret;
  1701. }
  1702. /*
  1703. * vpfe_stop_streaming : Stop the DMA engine
  1704. * @vq: ptr to vb2_queue
  1705. *
  1706. * This callback stops the DMA engine and any remaining buffers
  1707. * in the DMA queue are released.
  1708. */
  1709. static void vpfe_stop_streaming(struct vb2_queue *vq)
  1710. {
  1711. struct vpfe_device *vpfe = vb2_get_drv_priv(vq);
  1712. struct vpfe_subdev_info *sdinfo;
  1713. unsigned long flags;
  1714. int ret;
  1715. vpfe_pcr_enable(&vpfe->ccdc, 0);
  1716. vpfe_detach_irq(vpfe);
  1717. sdinfo = vpfe->current_subdev;
  1718. ret = v4l2_subdev_call(sdinfo->sd, video, s_stream, 0);
  1719. if (ret && ret != -ENOIOCTLCMD && ret != -ENODEV)
  1720. vpfe_dbg(1, vpfe, "stream off failed in subdev\n");
  1721. /* release all active buffers */
  1722. spin_lock_irqsave(&vpfe->dma_queue_lock, flags);
  1723. if (vpfe->cur_frm == vpfe->next_frm) {
  1724. vb2_buffer_done(&vpfe->cur_frm->vb, VB2_BUF_STATE_ERROR);
  1725. } else {
  1726. if (vpfe->cur_frm != NULL)
  1727. vb2_buffer_done(&vpfe->cur_frm->vb,
  1728. VB2_BUF_STATE_ERROR);
  1729. if (vpfe->next_frm != NULL)
  1730. vb2_buffer_done(&vpfe->next_frm->vb,
  1731. VB2_BUF_STATE_ERROR);
  1732. }
  1733. while (!list_empty(&vpfe->dma_queue)) {
  1734. vpfe->next_frm = list_entry(vpfe->dma_queue.next,
  1735. struct vpfe_cap_buffer, list);
  1736. list_del(&vpfe->next_frm->list);
  1737. vb2_buffer_done(&vpfe->next_frm->vb, VB2_BUF_STATE_ERROR);
  1738. }
  1739. spin_unlock_irqrestore(&vpfe->dma_queue_lock, flags);
  1740. }
  1741. static int vpfe_cropcap(struct file *file, void *priv,
  1742. struct v4l2_cropcap *crop)
  1743. {
  1744. struct vpfe_device *vpfe = video_drvdata(file);
  1745. vpfe_dbg(2, vpfe, "vpfe_cropcap\n");
  1746. if (vpfe->std_index >= ARRAY_SIZE(vpfe_standards))
  1747. return -EINVAL;
  1748. memset(crop, 0, sizeof(struct v4l2_cropcap));
  1749. crop->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
  1750. crop->defrect.width = vpfe_standards[vpfe->std_index].width;
  1751. crop->bounds.width = crop->defrect.width;
  1752. crop->defrect.height = vpfe_standards[vpfe->std_index].height;
  1753. crop->bounds.height = crop->defrect.height;
  1754. crop->pixelaspect = vpfe_standards[vpfe->std_index].pixelaspect;
  1755. return 0;
  1756. }
  1757. static int
  1758. vpfe_g_selection(struct file *file, void *fh, struct v4l2_selection *s)
  1759. {
  1760. struct vpfe_device *vpfe = video_drvdata(file);
  1761. switch (s->target) {
  1762. case V4L2_SEL_TGT_CROP_BOUNDS:
  1763. case V4L2_SEL_TGT_CROP_DEFAULT:
  1764. s->r.left = s->r.top = 0;
  1765. s->r.width = vpfe->crop.width;
  1766. s->r.height = vpfe->crop.height;
  1767. break;
  1768. case V4L2_SEL_TGT_CROP:
  1769. s->r = vpfe->crop;
  1770. break;
  1771. default:
  1772. return -EINVAL;
  1773. }
  1774. return 0;
  1775. }
  1776. static int enclosed_rectangle(struct v4l2_rect *a, struct v4l2_rect *b)
  1777. {
  1778. if (a->left < b->left || a->top < b->top)
  1779. return 0;
  1780. if (a->left + a->width > b->left + b->width)
  1781. return 0;
  1782. if (a->top + a->height > b->top + b->height)
  1783. return 0;
  1784. return 1;
  1785. }
  1786. static int
  1787. vpfe_s_selection(struct file *file, void *fh, struct v4l2_selection *s)
  1788. {
  1789. struct vpfe_device *vpfe = video_drvdata(file);
  1790. struct v4l2_rect cr = vpfe->crop;
  1791. struct v4l2_rect r = s->r;
  1792. /* If streaming is started, return error */
  1793. if (vb2_is_busy(&vpfe->buffer_queue)) {
  1794. vpfe_err(vpfe, "%s device busy\n", __func__);
  1795. return -EBUSY;
  1796. }
  1797. if (s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE ||
  1798. s->target != V4L2_SEL_TGT_CROP)
  1799. return -EINVAL;
  1800. v4l_bound_align_image(&r.width, 0, cr.width, 0,
  1801. &r.height, 0, cr.height, 0, 0);
  1802. r.left = clamp_t(unsigned int, r.left, 0, cr.width - r.width);
  1803. r.top = clamp_t(unsigned int, r.top, 0, cr.height - r.height);
  1804. if (s->flags & V4L2_SEL_FLAG_LE && !enclosed_rectangle(&r, &s->r))
  1805. return -ERANGE;
  1806. if (s->flags & V4L2_SEL_FLAG_GE && !enclosed_rectangle(&s->r, &r))
  1807. return -ERANGE;
  1808. s->r = vpfe->crop = r;
  1809. vpfe_ccdc_set_image_window(&vpfe->ccdc, &r, vpfe->bpp);
  1810. vpfe->fmt.fmt.pix.width = r.width;
  1811. vpfe->fmt.fmt.pix.height = r.height;
  1812. vpfe->fmt.fmt.pix.bytesperline = vpfe_ccdc_get_line_length(&vpfe->ccdc);
  1813. vpfe->fmt.fmt.pix.sizeimage = vpfe->fmt.fmt.pix.bytesperline *
  1814. vpfe->fmt.fmt.pix.height;
  1815. vpfe_dbg(1, vpfe, "cropped (%d,%d)/%dx%d of %dx%d\n",
  1816. r.left, r.top, r.width, r.height, cr.width, cr.height);
  1817. return 0;
  1818. }
  1819. static long vpfe_ioctl_default(struct file *file, void *priv,
  1820. bool valid_prio, unsigned int cmd, void *param)
  1821. {
  1822. struct vpfe_device *vpfe = video_drvdata(file);
  1823. int ret;
  1824. vpfe_dbg(2, vpfe, "vpfe_ioctl_default\n");
  1825. if (!valid_prio) {
  1826. vpfe_err(vpfe, "%s device busy\n", __func__);
  1827. return -EBUSY;
  1828. }
  1829. /* If streaming is started, return error */
  1830. if (vb2_is_busy(&vpfe->buffer_queue)) {
  1831. vpfe_err(vpfe, "%s device busy\n", __func__);
  1832. return -EBUSY;
  1833. }
  1834. switch (cmd) {
  1835. case VIDIOC_AM437X_CCDC_CFG:
  1836. ret = vpfe_ccdc_set_params(&vpfe->ccdc, (void __user *)param);
  1837. if (ret) {
  1838. vpfe_dbg(2, vpfe,
  1839. "Error setting parameters in CCDC\n");
  1840. return ret;
  1841. }
  1842. ret = vpfe_get_ccdc_image_format(vpfe,
  1843. &vpfe->fmt);
  1844. if (ret < 0) {
  1845. vpfe_dbg(2, vpfe,
  1846. "Invalid image format at CCDC\n");
  1847. return ret;
  1848. }
  1849. break;
  1850. default:
  1851. ret = -ENOTTY;
  1852. break;
  1853. }
  1854. return ret;
  1855. }
  1856. static const struct vb2_ops vpfe_video_qops = {
  1857. .wait_prepare = vb2_ops_wait_prepare,
  1858. .wait_finish = vb2_ops_wait_finish,
  1859. .queue_setup = vpfe_queue_setup,
  1860. .buf_prepare = vpfe_buffer_prepare,
  1861. .buf_queue = vpfe_buffer_queue,
  1862. .start_streaming = vpfe_start_streaming,
  1863. .stop_streaming = vpfe_stop_streaming,
  1864. };
  1865. /* vpfe capture driver file operations */
  1866. static const struct v4l2_file_operations vpfe_fops = {
  1867. .owner = THIS_MODULE,
  1868. .open = vpfe_open,
  1869. .release = vpfe_release,
  1870. .read = vb2_fop_read,
  1871. .poll = vb2_fop_poll,
  1872. .unlocked_ioctl = video_ioctl2,
  1873. .mmap = vb2_fop_mmap,
  1874. };
  1875. /* vpfe capture ioctl operations */
  1876. static const struct v4l2_ioctl_ops vpfe_ioctl_ops = {
  1877. .vidioc_querycap = vpfe_querycap,
  1878. .vidioc_enum_fmt_vid_cap = vpfe_enum_fmt,
  1879. .vidioc_g_fmt_vid_cap = vpfe_g_fmt,
  1880. .vidioc_s_fmt_vid_cap = vpfe_s_fmt,
  1881. .vidioc_try_fmt_vid_cap = vpfe_try_fmt,
  1882. .vidioc_enum_framesizes = vpfe_enum_size,
  1883. .vidioc_enum_input = vpfe_enum_input,
  1884. .vidioc_g_input = vpfe_g_input,
  1885. .vidioc_s_input = vpfe_s_input,
  1886. .vidioc_querystd = vpfe_querystd,
  1887. .vidioc_s_std = vpfe_s_std,
  1888. .vidioc_g_std = vpfe_g_std,
  1889. .vidioc_reqbufs = vb2_ioctl_reqbufs,
  1890. .vidioc_create_bufs = vb2_ioctl_create_bufs,
  1891. .vidioc_prepare_buf = vb2_ioctl_prepare_buf,
  1892. .vidioc_querybuf = vb2_ioctl_querybuf,
  1893. .vidioc_qbuf = vb2_ioctl_qbuf,
  1894. .vidioc_dqbuf = vb2_ioctl_dqbuf,
  1895. .vidioc_expbuf = vb2_ioctl_expbuf,
  1896. .vidioc_streamon = vb2_ioctl_streamon,
  1897. .vidioc_streamoff = vb2_ioctl_streamoff,
  1898. .vidioc_log_status = v4l2_ctrl_log_status,
  1899. .vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
  1900. .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
  1901. .vidioc_cropcap = vpfe_cropcap,
  1902. .vidioc_g_selection = vpfe_g_selection,
  1903. .vidioc_s_selection = vpfe_s_selection,
  1904. .vidioc_default = vpfe_ioctl_default,
  1905. };
  1906. static int
  1907. vpfe_async_bound(struct v4l2_async_notifier *notifier,
  1908. struct v4l2_subdev *subdev,
  1909. struct v4l2_async_subdev *asd)
  1910. {
  1911. struct vpfe_device *vpfe = container_of(notifier->v4l2_dev,
  1912. struct vpfe_device, v4l2_dev);
  1913. struct v4l2_subdev_mbus_code_enum mbus_code;
  1914. struct vpfe_subdev_info *sdinfo;
  1915. bool found = false;
  1916. int i, j;
  1917. vpfe_dbg(1, vpfe, "vpfe_async_bound\n");
  1918. for (i = 0; i < ARRAY_SIZE(vpfe->cfg->asd); i++) {
  1919. if (vpfe->cfg->asd[i]->match.of.node == asd[i].match.of.node) {
  1920. sdinfo = &vpfe->cfg->sub_devs[i];
  1921. vpfe->sd[i] = subdev;
  1922. vpfe->sd[i]->grp_id = sdinfo->grp_id;
  1923. found = true;
  1924. break;
  1925. }
  1926. }
  1927. if (!found) {
  1928. vpfe_info(vpfe, "sub device (%s) not matched\n", subdev->name);
  1929. return -EINVAL;
  1930. }
  1931. vpfe->video_dev.tvnorms |= sdinfo->inputs[0].std;
  1932. /* setup the supported formats & indexes */
  1933. for (j = 0, i = 0; ; ++j) {
  1934. struct vpfe_fmt *fmt;
  1935. int ret;
  1936. memset(&mbus_code, 0, sizeof(mbus_code));
  1937. mbus_code.index = j;
  1938. mbus_code.which = V4L2_SUBDEV_FORMAT_ACTIVE;
  1939. ret = v4l2_subdev_call(subdev, pad, enum_mbus_code,
  1940. NULL, &mbus_code);
  1941. if (ret)
  1942. break;
  1943. fmt = find_format_by_code(mbus_code.code);
  1944. if (!fmt)
  1945. continue;
  1946. fmt->supported = true;
  1947. fmt->index = i++;
  1948. }
  1949. return 0;
  1950. }
  1951. static int vpfe_probe_complete(struct vpfe_device *vpfe)
  1952. {
  1953. struct video_device *vdev;
  1954. struct vb2_queue *q;
  1955. int err;
  1956. spin_lock_init(&vpfe->dma_queue_lock);
  1957. mutex_init(&vpfe->lock);
  1958. vpfe->fmt.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
  1959. /* set first sub device as current one */
  1960. vpfe->current_subdev = &vpfe->cfg->sub_devs[0];
  1961. vpfe->v4l2_dev.ctrl_handler = vpfe->sd[0]->ctrl_handler;
  1962. err = vpfe_set_input(vpfe, 0);
  1963. if (err)
  1964. goto probe_out;
  1965. /* Initialize videobuf2 queue as per the buffer type */
  1966. vpfe->alloc_ctx = vb2_dma_contig_init_ctx(vpfe->pdev);
  1967. if (IS_ERR(vpfe->alloc_ctx)) {
  1968. vpfe_err(vpfe, "Failed to get the context\n");
  1969. err = PTR_ERR(vpfe->alloc_ctx);
  1970. goto probe_out;
  1971. }
  1972. q = &vpfe->buffer_queue;
  1973. q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
  1974. q->io_modes = VB2_MMAP | VB2_DMABUF | VB2_READ;
  1975. q->drv_priv = vpfe;
  1976. q->ops = &vpfe_video_qops;
  1977. q->mem_ops = &vb2_dma_contig_memops;
  1978. q->buf_struct_size = sizeof(struct vpfe_cap_buffer);
  1979. q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
  1980. q->lock = &vpfe->lock;
  1981. q->min_buffers_needed = 1;
  1982. err = vb2_queue_init(q);
  1983. if (err) {
  1984. vpfe_err(vpfe, "vb2_queue_init() failed\n");
  1985. vb2_dma_contig_cleanup_ctx(vpfe->alloc_ctx);
  1986. goto probe_out;
  1987. }
  1988. INIT_LIST_HEAD(&vpfe->dma_queue);
  1989. vdev = &vpfe->video_dev;
  1990. strlcpy(vdev->name, VPFE_MODULE_NAME, sizeof(vdev->name));
  1991. vdev->release = video_device_release_empty;
  1992. vdev->fops = &vpfe_fops;
  1993. vdev->ioctl_ops = &vpfe_ioctl_ops;
  1994. vdev->v4l2_dev = &vpfe->v4l2_dev;
  1995. vdev->vfl_dir = VFL_DIR_RX;
  1996. vdev->queue = q;
  1997. vdev->lock = &vpfe->lock;
  1998. video_set_drvdata(vdev, vpfe);
  1999. err = video_register_device(&vpfe->video_dev, VFL_TYPE_GRABBER, -1);
  2000. if (err) {
  2001. vpfe_err(vpfe,
  2002. "Unable to register video device.\n");
  2003. goto probe_out;
  2004. }
  2005. return 0;
  2006. probe_out:
  2007. v4l2_device_unregister(&vpfe->v4l2_dev);
  2008. return err;
  2009. }
  2010. static int vpfe_async_complete(struct v4l2_async_notifier *notifier)
  2011. {
  2012. struct vpfe_device *vpfe = container_of(notifier->v4l2_dev,
  2013. struct vpfe_device, v4l2_dev);
  2014. return vpfe_probe_complete(vpfe);
  2015. }
  2016. static struct vpfe_config *
  2017. vpfe_get_pdata(struct platform_device *pdev)
  2018. {
  2019. struct device_node *endpoint = NULL;
  2020. struct v4l2_of_endpoint bus_cfg;
  2021. struct vpfe_subdev_info *sdinfo;
  2022. struct vpfe_config *pdata;
  2023. unsigned int flags;
  2024. unsigned int i;
  2025. int err;
  2026. dev_dbg(&pdev->dev, "vpfe_get_pdata\n");
  2027. if (!IS_ENABLED(CONFIG_OF) || !pdev->dev.of_node)
  2028. return pdev->dev.platform_data;
  2029. pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
  2030. if (!pdata)
  2031. return NULL;
  2032. for (i = 0; ; i++) {
  2033. struct device_node *rem;
  2034. endpoint = of_graph_get_next_endpoint(pdev->dev.of_node,
  2035. endpoint);
  2036. if (!endpoint)
  2037. break;
  2038. sdinfo = &pdata->sub_devs[i];
  2039. sdinfo->grp_id = 0;
  2040. /* we only support camera */
  2041. sdinfo->inputs[0].index = i;
  2042. strcpy(sdinfo->inputs[0].name, "Camera");
  2043. sdinfo->inputs[0].type = V4L2_INPUT_TYPE_CAMERA;
  2044. sdinfo->inputs[0].std = V4L2_STD_ALL;
  2045. sdinfo->inputs[0].capabilities = V4L2_IN_CAP_STD;
  2046. sdinfo->can_route = 0;
  2047. sdinfo->routes = NULL;
  2048. of_property_read_u32(endpoint, "ti,am437x-vpfe-interface",
  2049. &sdinfo->vpfe_param.if_type);
  2050. if (sdinfo->vpfe_param.if_type < 0 ||
  2051. sdinfo->vpfe_param.if_type > 4) {
  2052. sdinfo->vpfe_param.if_type = VPFE_RAW_BAYER;
  2053. }
  2054. err = v4l2_of_parse_endpoint(endpoint, &bus_cfg);
  2055. if (err) {
  2056. dev_err(&pdev->dev, "Could not parse the endpoint\n");
  2057. goto done;
  2058. }
  2059. sdinfo->vpfe_param.bus_width = bus_cfg.bus.parallel.bus_width;
  2060. if (sdinfo->vpfe_param.bus_width < 8 ||
  2061. sdinfo->vpfe_param.bus_width > 16) {
  2062. dev_err(&pdev->dev, "Invalid bus width.\n");
  2063. goto done;
  2064. }
  2065. flags = bus_cfg.bus.parallel.flags;
  2066. if (flags & V4L2_MBUS_HSYNC_ACTIVE_HIGH)
  2067. sdinfo->vpfe_param.hdpol = 1;
  2068. if (flags & V4L2_MBUS_VSYNC_ACTIVE_HIGH)
  2069. sdinfo->vpfe_param.vdpol = 1;
  2070. rem = of_graph_get_remote_port_parent(endpoint);
  2071. if (!rem) {
  2072. dev_err(&pdev->dev, "Remote device at %s not found\n",
  2073. endpoint->full_name);
  2074. goto done;
  2075. }
  2076. pdata->asd[i] = devm_kzalloc(&pdev->dev,
  2077. sizeof(struct v4l2_async_subdev),
  2078. GFP_KERNEL);
  2079. if (!pdata->asd[i]) {
  2080. of_node_put(rem);
  2081. pdata = NULL;
  2082. goto done;
  2083. }
  2084. pdata->asd[i]->match_type = V4L2_ASYNC_MATCH_OF;
  2085. pdata->asd[i]->match.of.node = rem;
  2086. of_node_put(rem);
  2087. }
  2088. of_node_put(endpoint);
  2089. return pdata;
  2090. done:
  2091. of_node_put(endpoint);
  2092. return NULL;
  2093. }
  2094. /*
  2095. * vpfe_probe : This function creates device entries by register
  2096. * itself to the V4L2 driver and initializes fields of each
  2097. * device objects
  2098. */
  2099. static int vpfe_probe(struct platform_device *pdev)
  2100. {
  2101. struct vpfe_config *vpfe_cfg = vpfe_get_pdata(pdev);
  2102. struct vpfe_device *vpfe;
  2103. struct vpfe_ccdc *ccdc;
  2104. struct resource *res;
  2105. int ret;
  2106. if (!vpfe_cfg) {
  2107. dev_err(&pdev->dev, "No platform data\n");
  2108. return -EINVAL;
  2109. }
  2110. vpfe = devm_kzalloc(&pdev->dev, sizeof(*vpfe), GFP_KERNEL);
  2111. if (!vpfe)
  2112. return -ENOMEM;
  2113. vpfe->pdev = &pdev->dev;
  2114. vpfe->cfg = vpfe_cfg;
  2115. ccdc = &vpfe->ccdc;
  2116. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  2117. ccdc->ccdc_cfg.base_addr = devm_ioremap_resource(&pdev->dev, res);
  2118. if (IS_ERR(ccdc->ccdc_cfg.base_addr))
  2119. return PTR_ERR(ccdc->ccdc_cfg.base_addr);
  2120. vpfe->irq = platform_get_irq(pdev, 0);
  2121. if (vpfe->irq <= 0) {
  2122. dev_err(&pdev->dev, "No IRQ resource\n");
  2123. return -ENODEV;
  2124. }
  2125. ret = devm_request_irq(vpfe->pdev, vpfe->irq, vpfe_isr, 0,
  2126. "vpfe_capture0", vpfe);
  2127. if (ret) {
  2128. dev_err(&pdev->dev, "Unable to request interrupt\n");
  2129. return -EINVAL;
  2130. }
  2131. ret = v4l2_device_register(&pdev->dev, &vpfe->v4l2_dev);
  2132. if (ret) {
  2133. vpfe_err(vpfe,
  2134. "Unable to register v4l2 device.\n");
  2135. return ret;
  2136. }
  2137. /* set the driver data in platform device */
  2138. platform_set_drvdata(pdev, vpfe);
  2139. /* Enabling module functional clock */
  2140. pm_runtime_enable(&pdev->dev);
  2141. /* for now just enable it here instead of waiting for the open */
  2142. pm_runtime_get_sync(&pdev->dev);
  2143. vpfe_ccdc_config_defaults(ccdc);
  2144. pm_runtime_put_sync(&pdev->dev);
  2145. vpfe->sd = devm_kzalloc(&pdev->dev, sizeof(struct v4l2_subdev *) *
  2146. ARRAY_SIZE(vpfe->cfg->asd), GFP_KERNEL);
  2147. if (!vpfe->sd) {
  2148. ret = -ENOMEM;
  2149. goto probe_out_v4l2_unregister;
  2150. }
  2151. vpfe->notifier.subdevs = vpfe->cfg->asd;
  2152. vpfe->notifier.num_subdevs = ARRAY_SIZE(vpfe->cfg->asd);
  2153. vpfe->notifier.bound = vpfe_async_bound;
  2154. vpfe->notifier.complete = vpfe_async_complete;
  2155. ret = v4l2_async_notifier_register(&vpfe->v4l2_dev,
  2156. &vpfe->notifier);
  2157. if (ret) {
  2158. vpfe_err(vpfe, "Error registering async notifier\n");
  2159. ret = -EINVAL;
  2160. goto probe_out_v4l2_unregister;
  2161. }
  2162. return 0;
  2163. probe_out_v4l2_unregister:
  2164. v4l2_device_unregister(&vpfe->v4l2_dev);
  2165. return ret;
  2166. }
  2167. /*
  2168. * vpfe_remove : It un-register device from V4L2 driver
  2169. */
  2170. static int vpfe_remove(struct platform_device *pdev)
  2171. {
  2172. struct vpfe_device *vpfe = platform_get_drvdata(pdev);
  2173. vpfe_dbg(2, vpfe, "vpfe_remove\n");
  2174. pm_runtime_disable(&pdev->dev);
  2175. v4l2_async_notifier_unregister(&vpfe->notifier);
  2176. v4l2_device_unregister(&vpfe->v4l2_dev);
  2177. video_unregister_device(&vpfe->video_dev);
  2178. return 0;
  2179. }
  2180. #ifdef CONFIG_PM_SLEEP
  2181. static void vpfe_save_context(struct vpfe_ccdc *ccdc)
  2182. {
  2183. ccdc->ccdc_ctx[VPFE_PCR >> 2] = vpfe_reg_read(ccdc, VPFE_PCR);
  2184. ccdc->ccdc_ctx[VPFE_SYNMODE >> 2] = vpfe_reg_read(ccdc, VPFE_SYNMODE);
  2185. ccdc->ccdc_ctx[VPFE_SDOFST >> 2] = vpfe_reg_read(ccdc, VPFE_SDOFST);
  2186. ccdc->ccdc_ctx[VPFE_SDR_ADDR >> 2] = vpfe_reg_read(ccdc, VPFE_SDR_ADDR);
  2187. ccdc->ccdc_ctx[VPFE_CLAMP >> 2] = vpfe_reg_read(ccdc, VPFE_CLAMP);
  2188. ccdc->ccdc_ctx[VPFE_DCSUB >> 2] = vpfe_reg_read(ccdc, VPFE_DCSUB);
  2189. ccdc->ccdc_ctx[VPFE_COLPTN >> 2] = vpfe_reg_read(ccdc, VPFE_COLPTN);
  2190. ccdc->ccdc_ctx[VPFE_BLKCMP >> 2] = vpfe_reg_read(ccdc, VPFE_BLKCMP);
  2191. ccdc->ccdc_ctx[VPFE_VDINT >> 2] = vpfe_reg_read(ccdc, VPFE_VDINT);
  2192. ccdc->ccdc_ctx[VPFE_ALAW >> 2] = vpfe_reg_read(ccdc, VPFE_ALAW);
  2193. ccdc->ccdc_ctx[VPFE_REC656IF >> 2] = vpfe_reg_read(ccdc, VPFE_REC656IF);
  2194. ccdc->ccdc_ctx[VPFE_CCDCFG >> 2] = vpfe_reg_read(ccdc, VPFE_CCDCFG);
  2195. ccdc->ccdc_ctx[VPFE_CULLING >> 2] = vpfe_reg_read(ccdc, VPFE_CULLING);
  2196. ccdc->ccdc_ctx[VPFE_HD_VD_WID >> 2] = vpfe_reg_read(ccdc,
  2197. VPFE_HD_VD_WID);
  2198. ccdc->ccdc_ctx[VPFE_PIX_LINES >> 2] = vpfe_reg_read(ccdc,
  2199. VPFE_PIX_LINES);
  2200. ccdc->ccdc_ctx[VPFE_HORZ_INFO >> 2] = vpfe_reg_read(ccdc,
  2201. VPFE_HORZ_INFO);
  2202. ccdc->ccdc_ctx[VPFE_VERT_START >> 2] = vpfe_reg_read(ccdc,
  2203. VPFE_VERT_START);
  2204. ccdc->ccdc_ctx[VPFE_VERT_LINES >> 2] = vpfe_reg_read(ccdc,
  2205. VPFE_VERT_LINES);
  2206. ccdc->ccdc_ctx[VPFE_HSIZE_OFF >> 2] = vpfe_reg_read(ccdc,
  2207. VPFE_HSIZE_OFF);
  2208. }
  2209. static int vpfe_suspend(struct device *dev)
  2210. {
  2211. struct platform_device *pdev = to_platform_device(dev);
  2212. struct vpfe_device *vpfe = platform_get_drvdata(pdev);
  2213. struct vpfe_ccdc *ccdc = &vpfe->ccdc;
  2214. /* if streaming has not started we don't care */
  2215. if (!vb2_start_streaming_called(&vpfe->buffer_queue))
  2216. return 0;
  2217. pm_runtime_get_sync(dev);
  2218. vpfe_config_enable(ccdc, 1);
  2219. /* Save VPFE context */
  2220. vpfe_save_context(ccdc);
  2221. /* Disable CCDC */
  2222. vpfe_pcr_enable(ccdc, 0);
  2223. vpfe_config_enable(ccdc, 0);
  2224. /* Disable both master and slave clock */
  2225. pm_runtime_put_sync(dev);
  2226. /* Select sleep pin state */
  2227. pinctrl_pm_select_sleep_state(dev);
  2228. return 0;
  2229. }
  2230. static void vpfe_restore_context(struct vpfe_ccdc *ccdc)
  2231. {
  2232. vpfe_reg_write(ccdc, ccdc->ccdc_ctx[VPFE_SYNMODE >> 2], VPFE_SYNMODE);
  2233. vpfe_reg_write(ccdc, ccdc->ccdc_ctx[VPFE_CULLING >> 2], VPFE_CULLING);
  2234. vpfe_reg_write(ccdc, ccdc->ccdc_ctx[VPFE_SDOFST >> 2], VPFE_SDOFST);
  2235. vpfe_reg_write(ccdc, ccdc->ccdc_ctx[VPFE_SDR_ADDR >> 2], VPFE_SDR_ADDR);
  2236. vpfe_reg_write(ccdc, ccdc->ccdc_ctx[VPFE_CLAMP >> 2], VPFE_CLAMP);
  2237. vpfe_reg_write(ccdc, ccdc->ccdc_ctx[VPFE_DCSUB >> 2], VPFE_DCSUB);
  2238. vpfe_reg_write(ccdc, ccdc->ccdc_ctx[VPFE_COLPTN >> 2], VPFE_COLPTN);
  2239. vpfe_reg_write(ccdc, ccdc->ccdc_ctx[VPFE_BLKCMP >> 2], VPFE_BLKCMP);
  2240. vpfe_reg_write(ccdc, ccdc->ccdc_ctx[VPFE_VDINT >> 2], VPFE_VDINT);
  2241. vpfe_reg_write(ccdc, ccdc->ccdc_ctx[VPFE_ALAW >> 2], VPFE_ALAW);
  2242. vpfe_reg_write(ccdc, ccdc->ccdc_ctx[VPFE_REC656IF >> 2], VPFE_REC656IF);
  2243. vpfe_reg_write(ccdc, ccdc->ccdc_ctx[VPFE_CCDCFG >> 2], VPFE_CCDCFG);
  2244. vpfe_reg_write(ccdc, ccdc->ccdc_ctx[VPFE_PCR >> 2], VPFE_PCR);
  2245. vpfe_reg_write(ccdc, ccdc->ccdc_ctx[VPFE_HD_VD_WID >> 2],
  2246. VPFE_HD_VD_WID);
  2247. vpfe_reg_write(ccdc, ccdc->ccdc_ctx[VPFE_PIX_LINES >> 2],
  2248. VPFE_PIX_LINES);
  2249. vpfe_reg_write(ccdc, ccdc->ccdc_ctx[VPFE_HORZ_INFO >> 2],
  2250. VPFE_HORZ_INFO);
  2251. vpfe_reg_write(ccdc, ccdc->ccdc_ctx[VPFE_VERT_START >> 2],
  2252. VPFE_VERT_START);
  2253. vpfe_reg_write(ccdc, ccdc->ccdc_ctx[VPFE_VERT_LINES >> 2],
  2254. VPFE_VERT_LINES);
  2255. vpfe_reg_write(ccdc, ccdc->ccdc_ctx[VPFE_HSIZE_OFF >> 2],
  2256. VPFE_HSIZE_OFF);
  2257. }
  2258. static int vpfe_resume(struct device *dev)
  2259. {
  2260. struct platform_device *pdev = to_platform_device(dev);
  2261. struct vpfe_device *vpfe = platform_get_drvdata(pdev);
  2262. struct vpfe_ccdc *ccdc = &vpfe->ccdc;
  2263. /* if streaming has not started we don't care */
  2264. if (!vb2_start_streaming_called(&vpfe->buffer_queue))
  2265. return 0;
  2266. /* Enable both master and slave clock */
  2267. pm_runtime_get_sync(dev);
  2268. vpfe_config_enable(ccdc, 1);
  2269. /* Restore VPFE context */
  2270. vpfe_restore_context(ccdc);
  2271. vpfe_config_enable(ccdc, 0);
  2272. pm_runtime_put_sync(dev);
  2273. /* Select default pin state */
  2274. pinctrl_pm_select_default_state(dev);
  2275. return 0;
  2276. }
  2277. #endif
  2278. static SIMPLE_DEV_PM_OPS(vpfe_pm_ops, vpfe_suspend, vpfe_resume);
  2279. static const struct of_device_id vpfe_of_match[] = {
  2280. { .compatible = "ti,am437x-vpfe", },
  2281. { /* sentinel */ },
  2282. };
  2283. MODULE_DEVICE_TABLE(of, vpfe_of_match);
  2284. static struct platform_driver vpfe_driver = {
  2285. .probe = vpfe_probe,
  2286. .remove = vpfe_remove,
  2287. .driver = {
  2288. .name = VPFE_MODULE_NAME,
  2289. .pm = &vpfe_pm_ops,
  2290. .of_match_table = of_match_ptr(vpfe_of_match),
  2291. },
  2292. };
  2293. module_platform_driver(vpfe_driver);
  2294. MODULE_AUTHOR("Texas Instruments");
  2295. MODULE_DESCRIPTION("TI AM437x VPFE driver");
  2296. MODULE_LICENSE("GPL");
  2297. MODULE_VERSION(VPFE_VERSION);