ipu-image-convert.c 45 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714
  1. /*
  2. * Copyright (C) 2012-2016 Mentor Graphics Inc.
  3. *
  4. * Queued image conversion support, with tiling and rotation.
  5. *
  6. * This program is free software; you can redistribute it and/or modify it
  7. * under the terms of the GNU General Public License as published by the
  8. * Free Software Foundation; either version 2 of the License, or (at your
  9. * option) any later version.
  10. *
  11. * This program is distributed in the hope that it will be useful, but
  12. * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
  13. * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
  14. * for more details.
  15. */
  16. #include <linux/interrupt.h>
  17. #include <linux/dma-mapping.h>
  18. #include <video/imx-ipu-image-convert.h>
  19. #include "ipu-prv.h"
  20. /*
  21. * The IC Resizer has a restriction that the output frame from the
  22. * resizer must be 1024 or less in both width (pixels) and height
  23. * (lines).
  24. *
  25. * The image converter attempts to split up a conversion when
  26. * the desired output (converted) frame resolution exceeds the
  27. * IC resizer limit of 1024 in either dimension.
  28. *
  29. * If either dimension of the output frame exceeds the limit, the
  30. * dimension is split into 1, 2, or 4 equal stripes, for a maximum
  31. * of 4*4 or 16 tiles. A conversion is then carried out for each
  32. * tile (but taking care to pass the full frame stride length to
  33. * the DMA channel's parameter memory!). IDMA double-buffering is used
  34. * to convert each tile back-to-back when possible (see note below
  35. * when double_buffering boolean is set).
  36. *
  37. * Note that the input frame must be split up into the same number
  38. * of tiles as the output frame.
  39. *
  40. * FIXME: at this point there is no attempt to deal with visible seams
  41. * at the tile boundaries when upscaling. The seams are caused by a reset
  42. * of the bilinear upscale interpolation when starting a new tile. The
  43. * seams are barely visible for small upscale factors, but become
  44. * increasingly visible as the upscale factor gets larger, since more
  45. * interpolated pixels get thrown out at the tile boundaries. A possilble
  46. * fix might be to overlap tiles of different sizes, but this must be done
  47. * while also maintaining the IDMAC dma buffer address alignment and 8x8 IRT
  48. * alignment restrictions of each tile.
  49. */
  50. #define MAX_STRIPES_W 4
  51. #define MAX_STRIPES_H 4
  52. #define MAX_TILES (MAX_STRIPES_W * MAX_STRIPES_H)
  53. #define MIN_W 16
  54. #define MIN_H 8
  55. #define MAX_W 4096
  56. #define MAX_H 4096
  57. enum ipu_image_convert_type {
  58. IMAGE_CONVERT_IN = 0,
  59. IMAGE_CONVERT_OUT,
  60. };
  61. struct ipu_image_convert_dma_buf {
  62. void *virt;
  63. dma_addr_t phys;
  64. unsigned long len;
  65. };
  66. struct ipu_image_convert_dma_chan {
  67. int in;
  68. int out;
  69. int rot_in;
  70. int rot_out;
  71. int vdi_in_p;
  72. int vdi_in;
  73. int vdi_in_n;
  74. };
  75. /* dimensions of one tile */
  76. struct ipu_image_tile {
  77. u32 width;
  78. u32 height;
  79. /* size and strides are in bytes */
  80. u32 size;
  81. u32 stride;
  82. u32 rot_stride;
  83. /* start Y or packed offset of this tile */
  84. u32 offset;
  85. /* offset from start to tile in U plane, for planar formats */
  86. u32 u_off;
  87. /* offset from start to tile in V plane, for planar formats */
  88. u32 v_off;
  89. };
  90. struct ipu_image_convert_image {
  91. struct ipu_image base;
  92. enum ipu_image_convert_type type;
  93. const struct ipu_image_pixfmt *fmt;
  94. unsigned int stride;
  95. /* # of rows (horizontal stripes) if dest height is > 1024 */
  96. unsigned int num_rows;
  97. /* # of columns (vertical stripes) if dest width is > 1024 */
  98. unsigned int num_cols;
  99. struct ipu_image_tile tile[MAX_TILES];
  100. };
  101. struct ipu_image_pixfmt {
  102. u32 fourcc; /* V4L2 fourcc */
  103. int bpp; /* total bpp */
  104. int uv_width_dec; /* decimation in width for U/V planes */
  105. int uv_height_dec; /* decimation in height for U/V planes */
  106. bool planar; /* planar format */
  107. bool uv_swapped; /* U and V planes are swapped */
  108. bool uv_packed; /* partial planar (U and V in same plane) */
  109. };
  110. struct ipu_image_convert_ctx;
  111. struct ipu_image_convert_chan;
  112. struct ipu_image_convert_priv;
  113. struct ipu_image_convert_ctx {
  114. struct ipu_image_convert_chan *chan;
  115. ipu_image_convert_cb_t complete;
  116. void *complete_context;
  117. /* Source/destination image data and rotation mode */
  118. struct ipu_image_convert_image in;
  119. struct ipu_image_convert_image out;
  120. enum ipu_rotate_mode rot_mode;
  121. /* intermediate buffer for rotation */
  122. struct ipu_image_convert_dma_buf rot_intermediate[2];
  123. /* current buffer number for double buffering */
  124. int cur_buf_num;
  125. bool aborting;
  126. struct completion aborted;
  127. /* can we use double-buffering for this conversion operation? */
  128. bool double_buffering;
  129. /* num_rows * num_cols */
  130. unsigned int num_tiles;
  131. /* next tile to process */
  132. unsigned int next_tile;
  133. /* where to place converted tile in dest image */
  134. unsigned int out_tile_map[MAX_TILES];
  135. struct list_head list;
  136. };
  137. struct ipu_image_convert_chan {
  138. struct ipu_image_convert_priv *priv;
  139. enum ipu_ic_task ic_task;
  140. const struct ipu_image_convert_dma_chan *dma_ch;
  141. struct ipu_ic *ic;
  142. struct ipuv3_channel *in_chan;
  143. struct ipuv3_channel *out_chan;
  144. struct ipuv3_channel *rotation_in_chan;
  145. struct ipuv3_channel *rotation_out_chan;
  146. /* the IPU end-of-frame irqs */
  147. int out_eof_irq;
  148. int rot_out_eof_irq;
  149. spinlock_t irqlock;
  150. /* list of convert contexts */
  151. struct list_head ctx_list;
  152. /* queue of conversion runs */
  153. struct list_head pending_q;
  154. /* queue of completed runs */
  155. struct list_head done_q;
  156. /* the current conversion run */
  157. struct ipu_image_convert_run *current_run;
  158. };
  159. struct ipu_image_convert_priv {
  160. struct ipu_image_convert_chan chan[IC_NUM_TASKS];
  161. struct ipu_soc *ipu;
  162. };
  163. static const struct ipu_image_convert_dma_chan
  164. image_convert_dma_chan[IC_NUM_TASKS] = {
  165. [IC_TASK_VIEWFINDER] = {
  166. .in = IPUV3_CHANNEL_MEM_IC_PRP_VF,
  167. .out = IPUV3_CHANNEL_IC_PRP_VF_MEM,
  168. .rot_in = IPUV3_CHANNEL_MEM_ROT_VF,
  169. .rot_out = IPUV3_CHANNEL_ROT_VF_MEM,
  170. .vdi_in_p = IPUV3_CHANNEL_MEM_VDI_PREV,
  171. .vdi_in = IPUV3_CHANNEL_MEM_VDI_CUR,
  172. .vdi_in_n = IPUV3_CHANNEL_MEM_VDI_NEXT,
  173. },
  174. [IC_TASK_POST_PROCESSOR] = {
  175. .in = IPUV3_CHANNEL_MEM_IC_PP,
  176. .out = IPUV3_CHANNEL_IC_PP_MEM,
  177. .rot_in = IPUV3_CHANNEL_MEM_ROT_PP,
  178. .rot_out = IPUV3_CHANNEL_ROT_PP_MEM,
  179. },
  180. };
  181. static const struct ipu_image_pixfmt image_convert_formats[] = {
  182. {
  183. .fourcc = V4L2_PIX_FMT_RGB565,
  184. .bpp = 16,
  185. }, {
  186. .fourcc = V4L2_PIX_FMT_RGB24,
  187. .bpp = 24,
  188. }, {
  189. .fourcc = V4L2_PIX_FMT_BGR24,
  190. .bpp = 24,
  191. }, {
  192. .fourcc = V4L2_PIX_FMT_RGB32,
  193. .bpp = 32,
  194. }, {
  195. .fourcc = V4L2_PIX_FMT_BGR32,
  196. .bpp = 32,
  197. }, {
  198. .fourcc = V4L2_PIX_FMT_YUYV,
  199. .bpp = 16,
  200. .uv_width_dec = 2,
  201. .uv_height_dec = 1,
  202. }, {
  203. .fourcc = V4L2_PIX_FMT_UYVY,
  204. .bpp = 16,
  205. .uv_width_dec = 2,
  206. .uv_height_dec = 1,
  207. }, {
  208. .fourcc = V4L2_PIX_FMT_YUV420,
  209. .bpp = 12,
  210. .planar = true,
  211. .uv_width_dec = 2,
  212. .uv_height_dec = 2,
  213. }, {
  214. .fourcc = V4L2_PIX_FMT_YVU420,
  215. .bpp = 12,
  216. .planar = true,
  217. .uv_width_dec = 2,
  218. .uv_height_dec = 2,
  219. .uv_swapped = true,
  220. }, {
  221. .fourcc = V4L2_PIX_FMT_NV12,
  222. .bpp = 12,
  223. .planar = true,
  224. .uv_width_dec = 2,
  225. .uv_height_dec = 2,
  226. .uv_packed = true,
  227. }, {
  228. .fourcc = V4L2_PIX_FMT_YUV422P,
  229. .bpp = 16,
  230. .planar = true,
  231. .uv_width_dec = 2,
  232. .uv_height_dec = 1,
  233. }, {
  234. .fourcc = V4L2_PIX_FMT_NV16,
  235. .bpp = 16,
  236. .planar = true,
  237. .uv_width_dec = 2,
  238. .uv_height_dec = 1,
  239. .uv_packed = true,
  240. },
  241. };
  242. static const struct ipu_image_pixfmt *get_format(u32 fourcc)
  243. {
  244. const struct ipu_image_pixfmt *ret = NULL;
  245. unsigned int i;
  246. for (i = 0; i < ARRAY_SIZE(image_convert_formats); i++) {
  247. if (image_convert_formats[i].fourcc == fourcc) {
  248. ret = &image_convert_formats[i];
  249. break;
  250. }
  251. }
  252. return ret;
  253. }
  254. static void dump_format(struct ipu_image_convert_ctx *ctx,
  255. struct ipu_image_convert_image *ic_image)
  256. {
  257. struct ipu_image_convert_chan *chan = ctx->chan;
  258. struct ipu_image_convert_priv *priv = chan->priv;
  259. dev_dbg(priv->ipu->dev,
  260. "task %u: ctx %p: %s format: %dx%d (%dx%d tiles of size %dx%d), %c%c%c%c\n",
  261. chan->ic_task, ctx,
  262. ic_image->type == IMAGE_CONVERT_OUT ? "Output" : "Input",
  263. ic_image->base.pix.width, ic_image->base.pix.height,
  264. ic_image->num_cols, ic_image->num_rows,
  265. ic_image->tile[0].width, ic_image->tile[0].height,
  266. ic_image->fmt->fourcc & 0xff,
  267. (ic_image->fmt->fourcc >> 8) & 0xff,
  268. (ic_image->fmt->fourcc >> 16) & 0xff,
  269. (ic_image->fmt->fourcc >> 24) & 0xff);
  270. }
  271. int ipu_image_convert_enum_format(int index, u32 *fourcc)
  272. {
  273. const struct ipu_image_pixfmt *fmt;
  274. if (index >= (int)ARRAY_SIZE(image_convert_formats))
  275. return -EINVAL;
  276. /* Format found */
  277. fmt = &image_convert_formats[index];
  278. *fourcc = fmt->fourcc;
  279. return 0;
  280. }
  281. EXPORT_SYMBOL_GPL(ipu_image_convert_enum_format);
  282. static void free_dma_buf(struct ipu_image_convert_priv *priv,
  283. struct ipu_image_convert_dma_buf *buf)
  284. {
  285. if (buf->virt)
  286. dma_free_coherent(priv->ipu->dev,
  287. buf->len, buf->virt, buf->phys);
  288. buf->virt = NULL;
  289. buf->phys = 0;
  290. }
  291. static int alloc_dma_buf(struct ipu_image_convert_priv *priv,
  292. struct ipu_image_convert_dma_buf *buf,
  293. int size)
  294. {
  295. buf->len = PAGE_ALIGN(size);
  296. buf->virt = dma_alloc_coherent(priv->ipu->dev, buf->len, &buf->phys,
  297. GFP_DMA | GFP_KERNEL);
  298. if (!buf->virt) {
  299. dev_err(priv->ipu->dev, "failed to alloc dma buffer\n");
  300. return -ENOMEM;
  301. }
  302. return 0;
  303. }
  304. static inline int num_stripes(int dim)
  305. {
  306. if (dim <= 1024)
  307. return 1;
  308. else if (dim <= 2048)
  309. return 2;
  310. else
  311. return 4;
  312. }
  313. static void calc_tile_dimensions(struct ipu_image_convert_ctx *ctx,
  314. struct ipu_image_convert_image *image)
  315. {
  316. int i;
  317. for (i = 0; i < ctx->num_tiles; i++) {
  318. struct ipu_image_tile *tile = &image->tile[i];
  319. tile->height = image->base.pix.height / image->num_rows;
  320. tile->width = image->base.pix.width / image->num_cols;
  321. tile->size = ((tile->height * image->fmt->bpp) >> 3) *
  322. tile->width;
  323. if (image->fmt->planar) {
  324. tile->stride = tile->width;
  325. tile->rot_stride = tile->height;
  326. } else {
  327. tile->stride =
  328. (image->fmt->bpp * tile->width) >> 3;
  329. tile->rot_stride =
  330. (image->fmt->bpp * tile->height) >> 3;
  331. }
  332. }
  333. }
  334. /*
  335. * Use the rotation transformation to find the tile coordinates
  336. * (row, col) of a tile in the destination frame that corresponds
  337. * to the given tile coordinates of a source frame. The destination
  338. * coordinate is then converted to a tile index.
  339. */
  340. static int transform_tile_index(struct ipu_image_convert_ctx *ctx,
  341. int src_row, int src_col)
  342. {
  343. struct ipu_image_convert_chan *chan = ctx->chan;
  344. struct ipu_image_convert_priv *priv = chan->priv;
  345. struct ipu_image_convert_image *s_image = &ctx->in;
  346. struct ipu_image_convert_image *d_image = &ctx->out;
  347. int dst_row, dst_col;
  348. /* with no rotation it's a 1:1 mapping */
  349. if (ctx->rot_mode == IPU_ROTATE_NONE)
  350. return src_row * s_image->num_cols + src_col;
  351. /*
  352. * before doing the transform, first we have to translate
  353. * source row,col for an origin in the center of s_image
  354. */
  355. src_row = src_row * 2 - (s_image->num_rows - 1);
  356. src_col = src_col * 2 - (s_image->num_cols - 1);
  357. /* do the rotation transform */
  358. if (ctx->rot_mode & IPU_ROT_BIT_90) {
  359. dst_col = -src_row;
  360. dst_row = src_col;
  361. } else {
  362. dst_col = src_col;
  363. dst_row = src_row;
  364. }
  365. /* apply flip */
  366. if (ctx->rot_mode & IPU_ROT_BIT_HFLIP)
  367. dst_col = -dst_col;
  368. if (ctx->rot_mode & IPU_ROT_BIT_VFLIP)
  369. dst_row = -dst_row;
  370. dev_dbg(priv->ipu->dev, "task %u: ctx %p: [%d,%d] --> [%d,%d]\n",
  371. chan->ic_task, ctx, src_col, src_row, dst_col, dst_row);
  372. /*
  373. * finally translate dest row,col using an origin in upper
  374. * left of d_image
  375. */
  376. dst_row += d_image->num_rows - 1;
  377. dst_col += d_image->num_cols - 1;
  378. dst_row /= 2;
  379. dst_col /= 2;
  380. return dst_row * d_image->num_cols + dst_col;
  381. }
  382. /*
  383. * Fill the out_tile_map[] with transformed destination tile indeces.
  384. */
  385. static void calc_out_tile_map(struct ipu_image_convert_ctx *ctx)
  386. {
  387. struct ipu_image_convert_image *s_image = &ctx->in;
  388. unsigned int row, col, tile = 0;
  389. for (row = 0; row < s_image->num_rows; row++) {
  390. for (col = 0; col < s_image->num_cols; col++) {
  391. ctx->out_tile_map[tile] =
  392. transform_tile_index(ctx, row, col);
  393. tile++;
  394. }
  395. }
  396. }
  397. static void calc_tile_offsets_planar(struct ipu_image_convert_ctx *ctx,
  398. struct ipu_image_convert_image *image)
  399. {
  400. struct ipu_image_convert_chan *chan = ctx->chan;
  401. struct ipu_image_convert_priv *priv = chan->priv;
  402. const struct ipu_image_pixfmt *fmt = image->fmt;
  403. unsigned int row, col, tile = 0;
  404. u32 H, w, h, y_stride, uv_stride;
  405. u32 uv_row_off, uv_col_off, uv_off, u_off, v_off, tmp;
  406. u32 y_row_off, y_col_off, y_off;
  407. u32 y_size, uv_size;
  408. /* setup some convenience vars */
  409. H = image->base.pix.height;
  410. y_stride = image->stride;
  411. uv_stride = y_stride / fmt->uv_width_dec;
  412. if (fmt->uv_packed)
  413. uv_stride *= 2;
  414. y_size = H * y_stride;
  415. uv_size = y_size / (fmt->uv_width_dec * fmt->uv_height_dec);
  416. for (row = 0; row < image->num_rows; row++) {
  417. w = image->tile[tile].width;
  418. h = image->tile[tile].height;
  419. y_row_off = row * h * y_stride;
  420. uv_row_off = (row * h * uv_stride) / fmt->uv_height_dec;
  421. for (col = 0; col < image->num_cols; col++) {
  422. y_col_off = col * w;
  423. uv_col_off = y_col_off / fmt->uv_width_dec;
  424. if (fmt->uv_packed)
  425. uv_col_off *= 2;
  426. y_off = y_row_off + y_col_off;
  427. uv_off = uv_row_off + uv_col_off;
  428. u_off = y_size - y_off + uv_off;
  429. v_off = (fmt->uv_packed) ? 0 : u_off + uv_size;
  430. if (fmt->uv_swapped) {
  431. tmp = u_off;
  432. u_off = v_off;
  433. v_off = tmp;
  434. }
  435. image->tile[tile].offset = y_off;
  436. image->tile[tile].u_off = u_off;
  437. image->tile[tile++].v_off = v_off;
  438. dev_dbg(priv->ipu->dev,
  439. "task %u: ctx %p: %s@[%d,%d]: y_off %08x, u_off %08x, v_off %08x\n",
  440. chan->ic_task, ctx,
  441. image->type == IMAGE_CONVERT_IN ?
  442. "Input" : "Output", row, col,
  443. y_off, u_off, v_off);
  444. }
  445. }
  446. }
  447. static void calc_tile_offsets_packed(struct ipu_image_convert_ctx *ctx,
  448. struct ipu_image_convert_image *image)
  449. {
  450. struct ipu_image_convert_chan *chan = ctx->chan;
  451. struct ipu_image_convert_priv *priv = chan->priv;
  452. const struct ipu_image_pixfmt *fmt = image->fmt;
  453. unsigned int row, col, tile = 0;
  454. u32 w, h, bpp, stride;
  455. u32 row_off, col_off;
  456. /* setup some convenience vars */
  457. stride = image->stride;
  458. bpp = fmt->bpp;
  459. for (row = 0; row < image->num_rows; row++) {
  460. w = image->tile[tile].width;
  461. h = image->tile[tile].height;
  462. row_off = row * h * stride;
  463. for (col = 0; col < image->num_cols; col++) {
  464. col_off = (col * w * bpp) >> 3;
  465. image->tile[tile].offset = row_off + col_off;
  466. image->tile[tile].u_off = 0;
  467. image->tile[tile++].v_off = 0;
  468. dev_dbg(priv->ipu->dev,
  469. "task %u: ctx %p: %s@[%d,%d]: phys %08x\n",
  470. chan->ic_task, ctx,
  471. image->type == IMAGE_CONVERT_IN ?
  472. "Input" : "Output", row, col,
  473. row_off + col_off);
  474. }
  475. }
  476. }
  477. static void calc_tile_offsets(struct ipu_image_convert_ctx *ctx,
  478. struct ipu_image_convert_image *image)
  479. {
  480. if (image->fmt->planar)
  481. calc_tile_offsets_planar(ctx, image);
  482. else
  483. calc_tile_offsets_packed(ctx, image);
  484. }
  485. /*
  486. * return the number of runs in given queue (pending_q or done_q)
  487. * for this context. hold irqlock when calling.
  488. */
  489. static int get_run_count(struct ipu_image_convert_ctx *ctx,
  490. struct list_head *q)
  491. {
  492. struct ipu_image_convert_run *run;
  493. int count = 0;
  494. lockdep_assert_held(&ctx->chan->irqlock);
  495. list_for_each_entry(run, q, list) {
  496. if (run->ctx == ctx)
  497. count++;
  498. }
  499. return count;
  500. }
  501. static void convert_stop(struct ipu_image_convert_run *run)
  502. {
  503. struct ipu_image_convert_ctx *ctx = run->ctx;
  504. struct ipu_image_convert_chan *chan = ctx->chan;
  505. struct ipu_image_convert_priv *priv = chan->priv;
  506. dev_dbg(priv->ipu->dev, "%s: task %u: stopping ctx %p run %p\n",
  507. __func__, chan->ic_task, ctx, run);
  508. /* disable IC tasks and the channels */
  509. ipu_ic_task_disable(chan->ic);
  510. ipu_idmac_disable_channel(chan->in_chan);
  511. ipu_idmac_disable_channel(chan->out_chan);
  512. if (ipu_rot_mode_is_irt(ctx->rot_mode)) {
  513. ipu_idmac_disable_channel(chan->rotation_in_chan);
  514. ipu_idmac_disable_channel(chan->rotation_out_chan);
  515. ipu_idmac_unlink(chan->out_chan, chan->rotation_in_chan);
  516. }
  517. ipu_ic_disable(chan->ic);
  518. }
  519. static void init_idmac_channel(struct ipu_image_convert_ctx *ctx,
  520. struct ipuv3_channel *channel,
  521. struct ipu_image_convert_image *image,
  522. enum ipu_rotate_mode rot_mode,
  523. bool rot_swap_width_height)
  524. {
  525. struct ipu_image_convert_chan *chan = ctx->chan;
  526. unsigned int burst_size;
  527. u32 width, height, stride;
  528. dma_addr_t addr0, addr1 = 0;
  529. struct ipu_image tile_image;
  530. unsigned int tile_idx[2];
  531. if (image->type == IMAGE_CONVERT_OUT) {
  532. tile_idx[0] = ctx->out_tile_map[0];
  533. tile_idx[1] = ctx->out_tile_map[1];
  534. } else {
  535. tile_idx[0] = 0;
  536. tile_idx[1] = 1;
  537. }
  538. if (rot_swap_width_height) {
  539. width = image->tile[0].height;
  540. height = image->tile[0].width;
  541. stride = image->tile[0].rot_stride;
  542. addr0 = ctx->rot_intermediate[0].phys;
  543. if (ctx->double_buffering)
  544. addr1 = ctx->rot_intermediate[1].phys;
  545. } else {
  546. width = image->tile[0].width;
  547. height = image->tile[0].height;
  548. stride = image->stride;
  549. addr0 = image->base.phys0 +
  550. image->tile[tile_idx[0]].offset;
  551. if (ctx->double_buffering)
  552. addr1 = image->base.phys0 +
  553. image->tile[tile_idx[1]].offset;
  554. }
  555. ipu_cpmem_zero(channel);
  556. memset(&tile_image, 0, sizeof(tile_image));
  557. tile_image.pix.width = tile_image.rect.width = width;
  558. tile_image.pix.height = tile_image.rect.height = height;
  559. tile_image.pix.bytesperline = stride;
  560. tile_image.pix.pixelformat = image->fmt->fourcc;
  561. tile_image.phys0 = addr0;
  562. tile_image.phys1 = addr1;
  563. ipu_cpmem_set_image(channel, &tile_image);
  564. if (image->fmt->planar && !rot_swap_width_height)
  565. ipu_cpmem_set_uv_offset(channel,
  566. image->tile[tile_idx[0]].u_off,
  567. image->tile[tile_idx[0]].v_off);
  568. if (rot_mode)
  569. ipu_cpmem_set_rotation(channel, rot_mode);
  570. if (channel == chan->rotation_in_chan ||
  571. channel == chan->rotation_out_chan) {
  572. burst_size = 8;
  573. ipu_cpmem_set_block_mode(channel);
  574. } else
  575. burst_size = (width % 16) ? 8 : 16;
  576. ipu_cpmem_set_burstsize(channel, burst_size);
  577. ipu_ic_task_idma_init(chan->ic, channel, width, height,
  578. burst_size, rot_mode);
  579. /*
  580. * Setting a non-zero AXI ID collides with the PRG AXI snooping, so
  581. * only do this when there is no PRG present.
  582. */
  583. if (!channel->ipu->prg_priv)
  584. ipu_cpmem_set_axi_id(channel, 1);
  585. ipu_idmac_set_double_buffer(channel, ctx->double_buffering);
  586. }
  587. static int convert_start(struct ipu_image_convert_run *run)
  588. {
  589. struct ipu_image_convert_ctx *ctx = run->ctx;
  590. struct ipu_image_convert_chan *chan = ctx->chan;
  591. struct ipu_image_convert_priv *priv = chan->priv;
  592. struct ipu_image_convert_image *s_image = &ctx->in;
  593. struct ipu_image_convert_image *d_image = &ctx->out;
  594. enum ipu_color_space src_cs, dest_cs;
  595. unsigned int dest_width, dest_height;
  596. int ret;
  597. dev_dbg(priv->ipu->dev, "%s: task %u: starting ctx %p run %p\n",
  598. __func__, chan->ic_task, ctx, run);
  599. src_cs = ipu_pixelformat_to_colorspace(s_image->fmt->fourcc);
  600. dest_cs = ipu_pixelformat_to_colorspace(d_image->fmt->fourcc);
  601. if (ipu_rot_mode_is_irt(ctx->rot_mode)) {
  602. /* swap width/height for resizer */
  603. dest_width = d_image->tile[0].height;
  604. dest_height = d_image->tile[0].width;
  605. } else {
  606. dest_width = d_image->tile[0].width;
  607. dest_height = d_image->tile[0].height;
  608. }
  609. /* setup the IC resizer and CSC */
  610. ret = ipu_ic_task_init(chan->ic,
  611. s_image->tile[0].width,
  612. s_image->tile[0].height,
  613. dest_width,
  614. dest_height,
  615. src_cs, dest_cs);
  616. if (ret) {
  617. dev_err(priv->ipu->dev, "ipu_ic_task_init failed, %d\n", ret);
  618. return ret;
  619. }
  620. /* init the source MEM-->IC PP IDMAC channel */
  621. init_idmac_channel(ctx, chan->in_chan, s_image,
  622. IPU_ROTATE_NONE, false);
  623. if (ipu_rot_mode_is_irt(ctx->rot_mode)) {
  624. /* init the IC PP-->MEM IDMAC channel */
  625. init_idmac_channel(ctx, chan->out_chan, d_image,
  626. IPU_ROTATE_NONE, true);
  627. /* init the MEM-->IC PP ROT IDMAC channel */
  628. init_idmac_channel(ctx, chan->rotation_in_chan, d_image,
  629. ctx->rot_mode, true);
  630. /* init the destination IC PP ROT-->MEM IDMAC channel */
  631. init_idmac_channel(ctx, chan->rotation_out_chan, d_image,
  632. IPU_ROTATE_NONE, false);
  633. /* now link IC PP-->MEM to MEM-->IC PP ROT */
  634. ipu_idmac_link(chan->out_chan, chan->rotation_in_chan);
  635. } else {
  636. /* init the destination IC PP-->MEM IDMAC channel */
  637. init_idmac_channel(ctx, chan->out_chan, d_image,
  638. ctx->rot_mode, false);
  639. }
  640. /* enable the IC */
  641. ipu_ic_enable(chan->ic);
  642. /* set buffers ready */
  643. ipu_idmac_select_buffer(chan->in_chan, 0);
  644. ipu_idmac_select_buffer(chan->out_chan, 0);
  645. if (ipu_rot_mode_is_irt(ctx->rot_mode))
  646. ipu_idmac_select_buffer(chan->rotation_out_chan, 0);
  647. if (ctx->double_buffering) {
  648. ipu_idmac_select_buffer(chan->in_chan, 1);
  649. ipu_idmac_select_buffer(chan->out_chan, 1);
  650. if (ipu_rot_mode_is_irt(ctx->rot_mode))
  651. ipu_idmac_select_buffer(chan->rotation_out_chan, 1);
  652. }
  653. /* enable the channels! */
  654. ipu_idmac_enable_channel(chan->in_chan);
  655. ipu_idmac_enable_channel(chan->out_chan);
  656. if (ipu_rot_mode_is_irt(ctx->rot_mode)) {
  657. ipu_idmac_enable_channel(chan->rotation_in_chan);
  658. ipu_idmac_enable_channel(chan->rotation_out_chan);
  659. }
  660. ipu_ic_task_enable(chan->ic);
  661. ipu_cpmem_dump(chan->in_chan);
  662. ipu_cpmem_dump(chan->out_chan);
  663. if (ipu_rot_mode_is_irt(ctx->rot_mode)) {
  664. ipu_cpmem_dump(chan->rotation_in_chan);
  665. ipu_cpmem_dump(chan->rotation_out_chan);
  666. }
  667. ipu_dump(priv->ipu);
  668. return 0;
  669. }
  670. /* hold irqlock when calling */
  671. static int do_run(struct ipu_image_convert_run *run)
  672. {
  673. struct ipu_image_convert_ctx *ctx = run->ctx;
  674. struct ipu_image_convert_chan *chan = ctx->chan;
  675. lockdep_assert_held(&chan->irqlock);
  676. ctx->in.base.phys0 = run->in_phys;
  677. ctx->out.base.phys0 = run->out_phys;
  678. ctx->cur_buf_num = 0;
  679. ctx->next_tile = 1;
  680. /* remove run from pending_q and set as current */
  681. list_del(&run->list);
  682. chan->current_run = run;
  683. return convert_start(run);
  684. }
  685. /* hold irqlock when calling */
  686. static void run_next(struct ipu_image_convert_chan *chan)
  687. {
  688. struct ipu_image_convert_priv *priv = chan->priv;
  689. struct ipu_image_convert_run *run, *tmp;
  690. int ret;
  691. lockdep_assert_held(&chan->irqlock);
  692. list_for_each_entry_safe(run, tmp, &chan->pending_q, list) {
  693. /* skip contexts that are aborting */
  694. if (run->ctx->aborting) {
  695. dev_dbg(priv->ipu->dev,
  696. "%s: task %u: skipping aborting ctx %p run %p\n",
  697. __func__, chan->ic_task, run->ctx, run);
  698. continue;
  699. }
  700. ret = do_run(run);
  701. if (!ret)
  702. break;
  703. /*
  704. * something went wrong with start, add the run
  705. * to done q and continue to the next run in the
  706. * pending q.
  707. */
  708. run->status = ret;
  709. list_add_tail(&run->list, &chan->done_q);
  710. chan->current_run = NULL;
  711. }
  712. }
  713. static void empty_done_q(struct ipu_image_convert_chan *chan)
  714. {
  715. struct ipu_image_convert_priv *priv = chan->priv;
  716. struct ipu_image_convert_run *run;
  717. unsigned long flags;
  718. spin_lock_irqsave(&chan->irqlock, flags);
  719. while (!list_empty(&chan->done_q)) {
  720. run = list_entry(chan->done_q.next,
  721. struct ipu_image_convert_run,
  722. list);
  723. list_del(&run->list);
  724. dev_dbg(priv->ipu->dev,
  725. "%s: task %u: completing ctx %p run %p with %d\n",
  726. __func__, chan->ic_task, run->ctx, run, run->status);
  727. /* call the completion callback and free the run */
  728. spin_unlock_irqrestore(&chan->irqlock, flags);
  729. run->ctx->complete(run, run->ctx->complete_context);
  730. spin_lock_irqsave(&chan->irqlock, flags);
  731. }
  732. spin_unlock_irqrestore(&chan->irqlock, flags);
  733. }
  734. /*
  735. * the bottom half thread clears out the done_q, calling the
  736. * completion handler for each.
  737. */
  738. static irqreturn_t do_bh(int irq, void *dev_id)
  739. {
  740. struct ipu_image_convert_chan *chan = dev_id;
  741. struct ipu_image_convert_priv *priv = chan->priv;
  742. struct ipu_image_convert_ctx *ctx;
  743. unsigned long flags;
  744. dev_dbg(priv->ipu->dev, "%s: task %u: enter\n", __func__,
  745. chan->ic_task);
  746. empty_done_q(chan);
  747. spin_lock_irqsave(&chan->irqlock, flags);
  748. /*
  749. * the done_q is cleared out, signal any contexts
  750. * that are aborting that abort can complete.
  751. */
  752. list_for_each_entry(ctx, &chan->ctx_list, list) {
  753. if (ctx->aborting) {
  754. dev_dbg(priv->ipu->dev,
  755. "%s: task %u: signaling abort for ctx %p\n",
  756. __func__, chan->ic_task, ctx);
  757. complete(&ctx->aborted);
  758. }
  759. }
  760. spin_unlock_irqrestore(&chan->irqlock, flags);
  761. dev_dbg(priv->ipu->dev, "%s: task %u: exit\n", __func__,
  762. chan->ic_task);
  763. return IRQ_HANDLED;
  764. }
  765. /* hold irqlock when calling */
  766. static irqreturn_t do_irq(struct ipu_image_convert_run *run)
  767. {
  768. struct ipu_image_convert_ctx *ctx = run->ctx;
  769. struct ipu_image_convert_chan *chan = ctx->chan;
  770. struct ipu_image_tile *src_tile, *dst_tile;
  771. struct ipu_image_convert_image *s_image = &ctx->in;
  772. struct ipu_image_convert_image *d_image = &ctx->out;
  773. struct ipuv3_channel *outch;
  774. unsigned int dst_idx;
  775. lockdep_assert_held(&chan->irqlock);
  776. outch = ipu_rot_mode_is_irt(ctx->rot_mode) ?
  777. chan->rotation_out_chan : chan->out_chan;
  778. /*
  779. * It is difficult to stop the channel DMA before the channels
  780. * enter the paused state. Without double-buffering the channels
  781. * are always in a paused state when the EOF irq occurs, so it
  782. * is safe to stop the channels now. For double-buffering we
  783. * just ignore the abort until the operation completes, when it
  784. * is safe to shut down.
  785. */
  786. if (ctx->aborting && !ctx->double_buffering) {
  787. convert_stop(run);
  788. run->status = -EIO;
  789. goto done;
  790. }
  791. if (ctx->next_tile == ctx->num_tiles) {
  792. /*
  793. * the conversion is complete
  794. */
  795. convert_stop(run);
  796. run->status = 0;
  797. goto done;
  798. }
  799. /*
  800. * not done, place the next tile buffers.
  801. */
  802. if (!ctx->double_buffering) {
  803. src_tile = &s_image->tile[ctx->next_tile];
  804. dst_idx = ctx->out_tile_map[ctx->next_tile];
  805. dst_tile = &d_image->tile[dst_idx];
  806. ipu_cpmem_set_buffer(chan->in_chan, 0,
  807. s_image->base.phys0 + src_tile->offset);
  808. ipu_cpmem_set_buffer(outch, 0,
  809. d_image->base.phys0 + dst_tile->offset);
  810. if (s_image->fmt->planar)
  811. ipu_cpmem_set_uv_offset(chan->in_chan,
  812. src_tile->u_off,
  813. src_tile->v_off);
  814. if (d_image->fmt->planar)
  815. ipu_cpmem_set_uv_offset(outch,
  816. dst_tile->u_off,
  817. dst_tile->v_off);
  818. ipu_idmac_select_buffer(chan->in_chan, 0);
  819. ipu_idmac_select_buffer(outch, 0);
  820. } else if (ctx->next_tile < ctx->num_tiles - 1) {
  821. src_tile = &s_image->tile[ctx->next_tile + 1];
  822. dst_idx = ctx->out_tile_map[ctx->next_tile + 1];
  823. dst_tile = &d_image->tile[dst_idx];
  824. ipu_cpmem_set_buffer(chan->in_chan, ctx->cur_buf_num,
  825. s_image->base.phys0 + src_tile->offset);
  826. ipu_cpmem_set_buffer(outch, ctx->cur_buf_num,
  827. d_image->base.phys0 + dst_tile->offset);
  828. ipu_idmac_select_buffer(chan->in_chan, ctx->cur_buf_num);
  829. ipu_idmac_select_buffer(outch, ctx->cur_buf_num);
  830. ctx->cur_buf_num ^= 1;
  831. }
  832. ctx->next_tile++;
  833. return IRQ_HANDLED;
  834. done:
  835. list_add_tail(&run->list, &chan->done_q);
  836. chan->current_run = NULL;
  837. run_next(chan);
  838. return IRQ_WAKE_THREAD;
  839. }
  840. static irqreturn_t norotate_irq(int irq, void *data)
  841. {
  842. struct ipu_image_convert_chan *chan = data;
  843. struct ipu_image_convert_ctx *ctx;
  844. struct ipu_image_convert_run *run;
  845. unsigned long flags;
  846. irqreturn_t ret;
  847. spin_lock_irqsave(&chan->irqlock, flags);
  848. /* get current run and its context */
  849. run = chan->current_run;
  850. if (!run) {
  851. ret = IRQ_NONE;
  852. goto out;
  853. }
  854. ctx = run->ctx;
  855. if (ipu_rot_mode_is_irt(ctx->rot_mode)) {
  856. /* this is a rotation operation, just ignore */
  857. spin_unlock_irqrestore(&chan->irqlock, flags);
  858. return IRQ_HANDLED;
  859. }
  860. ret = do_irq(run);
  861. out:
  862. spin_unlock_irqrestore(&chan->irqlock, flags);
  863. return ret;
  864. }
  865. static irqreturn_t rotate_irq(int irq, void *data)
  866. {
  867. struct ipu_image_convert_chan *chan = data;
  868. struct ipu_image_convert_priv *priv = chan->priv;
  869. struct ipu_image_convert_ctx *ctx;
  870. struct ipu_image_convert_run *run;
  871. unsigned long flags;
  872. irqreturn_t ret;
  873. spin_lock_irqsave(&chan->irqlock, flags);
  874. /* get current run and its context */
  875. run = chan->current_run;
  876. if (!run) {
  877. ret = IRQ_NONE;
  878. goto out;
  879. }
  880. ctx = run->ctx;
  881. if (!ipu_rot_mode_is_irt(ctx->rot_mode)) {
  882. /* this was NOT a rotation operation, shouldn't happen */
  883. dev_err(priv->ipu->dev, "Unexpected rotation interrupt\n");
  884. spin_unlock_irqrestore(&chan->irqlock, flags);
  885. return IRQ_HANDLED;
  886. }
  887. ret = do_irq(run);
  888. out:
  889. spin_unlock_irqrestore(&chan->irqlock, flags);
  890. return ret;
  891. }
  892. /*
  893. * try to force the completion of runs for this ctx. Called when
  894. * abort wait times out in ipu_image_convert_abort().
  895. */
  896. static void force_abort(struct ipu_image_convert_ctx *ctx)
  897. {
  898. struct ipu_image_convert_chan *chan = ctx->chan;
  899. struct ipu_image_convert_run *run;
  900. unsigned long flags;
  901. spin_lock_irqsave(&chan->irqlock, flags);
  902. run = chan->current_run;
  903. if (run && run->ctx == ctx) {
  904. convert_stop(run);
  905. run->status = -EIO;
  906. list_add_tail(&run->list, &chan->done_q);
  907. chan->current_run = NULL;
  908. run_next(chan);
  909. }
  910. spin_unlock_irqrestore(&chan->irqlock, flags);
  911. empty_done_q(chan);
  912. }
  913. static void release_ipu_resources(struct ipu_image_convert_chan *chan)
  914. {
  915. if (chan->out_eof_irq >= 0)
  916. free_irq(chan->out_eof_irq, chan);
  917. if (chan->rot_out_eof_irq >= 0)
  918. free_irq(chan->rot_out_eof_irq, chan);
  919. if (!IS_ERR_OR_NULL(chan->in_chan))
  920. ipu_idmac_put(chan->in_chan);
  921. if (!IS_ERR_OR_NULL(chan->out_chan))
  922. ipu_idmac_put(chan->out_chan);
  923. if (!IS_ERR_OR_NULL(chan->rotation_in_chan))
  924. ipu_idmac_put(chan->rotation_in_chan);
  925. if (!IS_ERR_OR_NULL(chan->rotation_out_chan))
  926. ipu_idmac_put(chan->rotation_out_chan);
  927. if (!IS_ERR_OR_NULL(chan->ic))
  928. ipu_ic_put(chan->ic);
  929. chan->in_chan = chan->out_chan = chan->rotation_in_chan =
  930. chan->rotation_out_chan = NULL;
  931. chan->out_eof_irq = chan->rot_out_eof_irq = -1;
  932. }
  933. static int get_ipu_resources(struct ipu_image_convert_chan *chan)
  934. {
  935. const struct ipu_image_convert_dma_chan *dma = chan->dma_ch;
  936. struct ipu_image_convert_priv *priv = chan->priv;
  937. int ret;
  938. /* get IC */
  939. chan->ic = ipu_ic_get(priv->ipu, chan->ic_task);
  940. if (IS_ERR(chan->ic)) {
  941. dev_err(priv->ipu->dev, "could not acquire IC\n");
  942. ret = PTR_ERR(chan->ic);
  943. goto err;
  944. }
  945. /* get IDMAC channels */
  946. chan->in_chan = ipu_idmac_get(priv->ipu, dma->in);
  947. chan->out_chan = ipu_idmac_get(priv->ipu, dma->out);
  948. if (IS_ERR(chan->in_chan) || IS_ERR(chan->out_chan)) {
  949. dev_err(priv->ipu->dev, "could not acquire idmac channels\n");
  950. ret = -EBUSY;
  951. goto err;
  952. }
  953. chan->rotation_in_chan = ipu_idmac_get(priv->ipu, dma->rot_in);
  954. chan->rotation_out_chan = ipu_idmac_get(priv->ipu, dma->rot_out);
  955. if (IS_ERR(chan->rotation_in_chan) || IS_ERR(chan->rotation_out_chan)) {
  956. dev_err(priv->ipu->dev,
  957. "could not acquire idmac rotation channels\n");
  958. ret = -EBUSY;
  959. goto err;
  960. }
  961. /* acquire the EOF interrupts */
  962. chan->out_eof_irq = ipu_idmac_channel_irq(priv->ipu,
  963. chan->out_chan,
  964. IPU_IRQ_EOF);
  965. ret = request_threaded_irq(chan->out_eof_irq, norotate_irq, do_bh,
  966. 0, "ipu-ic", chan);
  967. if (ret < 0) {
  968. dev_err(priv->ipu->dev, "could not acquire irq %d\n",
  969. chan->out_eof_irq);
  970. chan->out_eof_irq = -1;
  971. goto err;
  972. }
  973. chan->rot_out_eof_irq = ipu_idmac_channel_irq(priv->ipu,
  974. chan->rotation_out_chan,
  975. IPU_IRQ_EOF);
  976. ret = request_threaded_irq(chan->rot_out_eof_irq, rotate_irq, do_bh,
  977. 0, "ipu-ic", chan);
  978. if (ret < 0) {
  979. dev_err(priv->ipu->dev, "could not acquire irq %d\n",
  980. chan->rot_out_eof_irq);
  981. chan->rot_out_eof_irq = -1;
  982. goto err;
  983. }
  984. return 0;
  985. err:
  986. release_ipu_resources(chan);
  987. return ret;
  988. }
  989. static int fill_image(struct ipu_image_convert_ctx *ctx,
  990. struct ipu_image_convert_image *ic_image,
  991. struct ipu_image *image,
  992. enum ipu_image_convert_type type)
  993. {
  994. struct ipu_image_convert_priv *priv = ctx->chan->priv;
  995. ic_image->base = *image;
  996. ic_image->type = type;
  997. ic_image->fmt = get_format(image->pix.pixelformat);
  998. if (!ic_image->fmt) {
  999. dev_err(priv->ipu->dev, "pixelformat not supported for %s\n",
  1000. type == IMAGE_CONVERT_OUT ? "Output" : "Input");
  1001. return -EINVAL;
  1002. }
  1003. if (ic_image->fmt->planar)
  1004. ic_image->stride = ic_image->base.pix.width;
  1005. else
  1006. ic_image->stride = ic_image->base.pix.bytesperline;
  1007. calc_tile_dimensions(ctx, ic_image);
  1008. calc_tile_offsets(ctx, ic_image);
  1009. return 0;
  1010. }
  1011. /* borrowed from drivers/media/v4l2-core/v4l2-common.c */
  1012. static unsigned int clamp_align(unsigned int x, unsigned int min,
  1013. unsigned int max, unsigned int align)
  1014. {
  1015. /* Bits that must be zero to be aligned */
  1016. unsigned int mask = ~((1 << align) - 1);
  1017. /* Clamp to aligned min and max */
  1018. x = clamp(x, (min + ~mask) & mask, max & mask);
  1019. /* Round to nearest aligned value */
  1020. if (align)
  1021. x = (x + (1 << (align - 1))) & mask;
  1022. return x;
  1023. }
  1024. /*
  1025. * We have to adjust the tile width such that the tile physaddrs and
  1026. * U and V plane offsets are multiples of 8 bytes as required by
  1027. * the IPU DMA Controller. For the planar formats, this corresponds
  1028. * to a pixel alignment of 16 (but use a more formal equation since
  1029. * the variables are available). For all the packed formats, 8 is
  1030. * good enough.
  1031. */
  1032. static inline u32 tile_width_align(const struct ipu_image_pixfmt *fmt)
  1033. {
  1034. return fmt->planar ? 8 * fmt->uv_width_dec : 8;
  1035. }
  1036. /*
  1037. * For tile height alignment, we have to ensure that the output tile
  1038. * heights are multiples of 8 lines if the IRT is required by the
  1039. * given rotation mode (the IRT performs rotations on 8x8 blocks
  1040. * at a time). If the IRT is not used, or for input image tiles,
  1041. * 2 lines are good enough.
  1042. */
  1043. static inline u32 tile_height_align(enum ipu_image_convert_type type,
  1044. enum ipu_rotate_mode rot_mode)
  1045. {
  1046. return (type == IMAGE_CONVERT_OUT &&
  1047. ipu_rot_mode_is_irt(rot_mode)) ? 8 : 2;
  1048. }
  1049. /* Adjusts input/output images to IPU restrictions */
  1050. void ipu_image_convert_adjust(struct ipu_image *in, struct ipu_image *out,
  1051. enum ipu_rotate_mode rot_mode)
  1052. {
  1053. const struct ipu_image_pixfmt *infmt, *outfmt;
  1054. unsigned int num_in_rows, num_in_cols;
  1055. unsigned int num_out_rows, num_out_cols;
  1056. u32 w_align, h_align;
  1057. infmt = get_format(in->pix.pixelformat);
  1058. outfmt = get_format(out->pix.pixelformat);
  1059. /* set some default pixel formats if needed */
  1060. if (!infmt) {
  1061. in->pix.pixelformat = V4L2_PIX_FMT_RGB24;
  1062. infmt = get_format(V4L2_PIX_FMT_RGB24);
  1063. }
  1064. if (!outfmt) {
  1065. out->pix.pixelformat = V4L2_PIX_FMT_RGB24;
  1066. outfmt = get_format(V4L2_PIX_FMT_RGB24);
  1067. }
  1068. /* image converter does not handle fields */
  1069. in->pix.field = out->pix.field = V4L2_FIELD_NONE;
  1070. /* resizer cannot downsize more than 4:1 */
  1071. if (ipu_rot_mode_is_irt(rot_mode)) {
  1072. out->pix.height = max_t(__u32, out->pix.height,
  1073. in->pix.width / 4);
  1074. out->pix.width = max_t(__u32, out->pix.width,
  1075. in->pix.height / 4);
  1076. } else {
  1077. out->pix.width = max_t(__u32, out->pix.width,
  1078. in->pix.width / 4);
  1079. out->pix.height = max_t(__u32, out->pix.height,
  1080. in->pix.height / 4);
  1081. }
  1082. /* get tiling rows/cols from output format */
  1083. num_out_rows = num_stripes(out->pix.height);
  1084. num_out_cols = num_stripes(out->pix.width);
  1085. if (ipu_rot_mode_is_irt(rot_mode)) {
  1086. num_in_rows = num_out_cols;
  1087. num_in_cols = num_out_rows;
  1088. } else {
  1089. num_in_rows = num_out_rows;
  1090. num_in_cols = num_out_cols;
  1091. }
  1092. /* align input width/height */
  1093. w_align = ilog2(tile_width_align(infmt) * num_in_cols);
  1094. h_align = ilog2(tile_height_align(IMAGE_CONVERT_IN, rot_mode) *
  1095. num_in_rows);
  1096. in->pix.width = clamp_align(in->pix.width, MIN_W, MAX_W, w_align);
  1097. in->pix.height = clamp_align(in->pix.height, MIN_H, MAX_H, h_align);
  1098. /* align output width/height */
  1099. w_align = ilog2(tile_width_align(outfmt) * num_out_cols);
  1100. h_align = ilog2(tile_height_align(IMAGE_CONVERT_OUT, rot_mode) *
  1101. num_out_rows);
  1102. out->pix.width = clamp_align(out->pix.width, MIN_W, MAX_W, w_align);
  1103. out->pix.height = clamp_align(out->pix.height, MIN_H, MAX_H, h_align);
  1104. /* set input/output strides and image sizes */
  1105. in->pix.bytesperline = (in->pix.width * infmt->bpp) >> 3;
  1106. in->pix.sizeimage = in->pix.height * in->pix.bytesperline;
  1107. out->pix.bytesperline = (out->pix.width * outfmt->bpp) >> 3;
  1108. out->pix.sizeimage = out->pix.height * out->pix.bytesperline;
  1109. }
  1110. EXPORT_SYMBOL_GPL(ipu_image_convert_adjust);
  1111. /*
  1112. * this is used by ipu_image_convert_prepare() to verify set input and
  1113. * output images are valid before starting the conversion. Clients can
  1114. * also call it before calling ipu_image_convert_prepare().
  1115. */
  1116. int ipu_image_convert_verify(struct ipu_image *in, struct ipu_image *out,
  1117. enum ipu_rotate_mode rot_mode)
  1118. {
  1119. struct ipu_image testin, testout;
  1120. testin = *in;
  1121. testout = *out;
  1122. ipu_image_convert_adjust(&testin, &testout, rot_mode);
  1123. if (testin.pix.width != in->pix.width ||
  1124. testin.pix.height != in->pix.height ||
  1125. testout.pix.width != out->pix.width ||
  1126. testout.pix.height != out->pix.height)
  1127. return -EINVAL;
  1128. return 0;
  1129. }
  1130. EXPORT_SYMBOL_GPL(ipu_image_convert_verify);
  1131. /*
  1132. * Call ipu_image_convert_prepare() to prepare for the conversion of
  1133. * given images and rotation mode. Returns a new conversion context.
  1134. */
  1135. struct ipu_image_convert_ctx *
  1136. ipu_image_convert_prepare(struct ipu_soc *ipu, enum ipu_ic_task ic_task,
  1137. struct ipu_image *in, struct ipu_image *out,
  1138. enum ipu_rotate_mode rot_mode,
  1139. ipu_image_convert_cb_t complete,
  1140. void *complete_context)
  1141. {
  1142. struct ipu_image_convert_priv *priv = ipu->image_convert_priv;
  1143. struct ipu_image_convert_image *s_image, *d_image;
  1144. struct ipu_image_convert_chan *chan;
  1145. struct ipu_image_convert_ctx *ctx;
  1146. unsigned long flags;
  1147. bool get_res;
  1148. int ret;
  1149. if (!in || !out || !complete ||
  1150. (ic_task != IC_TASK_VIEWFINDER &&
  1151. ic_task != IC_TASK_POST_PROCESSOR))
  1152. return ERR_PTR(-EINVAL);
  1153. /* verify the in/out images before continuing */
  1154. ret = ipu_image_convert_verify(in, out, rot_mode);
  1155. if (ret) {
  1156. dev_err(priv->ipu->dev, "%s: in/out formats invalid\n",
  1157. __func__);
  1158. return ERR_PTR(ret);
  1159. }
  1160. chan = &priv->chan[ic_task];
  1161. ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
  1162. if (!ctx)
  1163. return ERR_PTR(-ENOMEM);
  1164. dev_dbg(priv->ipu->dev, "%s: task %u: ctx %p\n", __func__,
  1165. chan->ic_task, ctx);
  1166. ctx->chan = chan;
  1167. init_completion(&ctx->aborted);
  1168. s_image = &ctx->in;
  1169. d_image = &ctx->out;
  1170. /* set tiling and rotation */
  1171. d_image->num_rows = num_stripes(out->pix.height);
  1172. d_image->num_cols = num_stripes(out->pix.width);
  1173. if (ipu_rot_mode_is_irt(rot_mode)) {
  1174. s_image->num_rows = d_image->num_cols;
  1175. s_image->num_cols = d_image->num_rows;
  1176. } else {
  1177. s_image->num_rows = d_image->num_rows;
  1178. s_image->num_cols = d_image->num_cols;
  1179. }
  1180. ctx->num_tiles = d_image->num_cols * d_image->num_rows;
  1181. ctx->rot_mode = rot_mode;
  1182. ret = fill_image(ctx, s_image, in, IMAGE_CONVERT_IN);
  1183. if (ret)
  1184. goto out_free;
  1185. ret = fill_image(ctx, d_image, out, IMAGE_CONVERT_OUT);
  1186. if (ret)
  1187. goto out_free;
  1188. calc_out_tile_map(ctx);
  1189. dump_format(ctx, s_image);
  1190. dump_format(ctx, d_image);
  1191. ctx->complete = complete;
  1192. ctx->complete_context = complete_context;
  1193. /*
  1194. * Can we use double-buffering for this operation? If there is
  1195. * only one tile (the whole image can be converted in a single
  1196. * operation) there's no point in using double-buffering. Also,
  1197. * the IPU's IDMAC channels allow only a single U and V plane
  1198. * offset shared between both buffers, but these offsets change
  1199. * for every tile, and therefore would have to be updated for
  1200. * each buffer which is not possible. So double-buffering is
  1201. * impossible when either the source or destination images are
  1202. * a planar format (YUV420, YUV422P, etc.).
  1203. */
  1204. ctx->double_buffering = (ctx->num_tiles > 1 &&
  1205. !s_image->fmt->planar &&
  1206. !d_image->fmt->planar);
  1207. if (ipu_rot_mode_is_irt(ctx->rot_mode)) {
  1208. ret = alloc_dma_buf(priv, &ctx->rot_intermediate[0],
  1209. d_image->tile[0].size);
  1210. if (ret)
  1211. goto out_free;
  1212. if (ctx->double_buffering) {
  1213. ret = alloc_dma_buf(priv,
  1214. &ctx->rot_intermediate[1],
  1215. d_image->tile[0].size);
  1216. if (ret)
  1217. goto out_free_dmabuf0;
  1218. }
  1219. }
  1220. spin_lock_irqsave(&chan->irqlock, flags);
  1221. get_res = list_empty(&chan->ctx_list);
  1222. list_add_tail(&ctx->list, &chan->ctx_list);
  1223. spin_unlock_irqrestore(&chan->irqlock, flags);
  1224. if (get_res) {
  1225. ret = get_ipu_resources(chan);
  1226. if (ret)
  1227. goto out_free_dmabuf1;
  1228. }
  1229. return ctx;
  1230. out_free_dmabuf1:
  1231. free_dma_buf(priv, &ctx->rot_intermediate[1]);
  1232. spin_lock_irqsave(&chan->irqlock, flags);
  1233. list_del(&ctx->list);
  1234. spin_unlock_irqrestore(&chan->irqlock, flags);
  1235. out_free_dmabuf0:
  1236. free_dma_buf(priv, &ctx->rot_intermediate[0]);
  1237. out_free:
  1238. kfree(ctx);
  1239. return ERR_PTR(ret);
  1240. }
  1241. EXPORT_SYMBOL_GPL(ipu_image_convert_prepare);
  1242. /*
  1243. * Carry out a single image conversion run. Only the physaddr's of the input
  1244. * and output image buffers are needed. The conversion context must have
  1245. * been created previously with ipu_image_convert_prepare().
  1246. */
  1247. int ipu_image_convert_queue(struct ipu_image_convert_run *run)
  1248. {
  1249. struct ipu_image_convert_chan *chan;
  1250. struct ipu_image_convert_priv *priv;
  1251. struct ipu_image_convert_ctx *ctx;
  1252. unsigned long flags;
  1253. int ret = 0;
  1254. if (!run || !run->ctx || !run->in_phys || !run->out_phys)
  1255. return -EINVAL;
  1256. ctx = run->ctx;
  1257. chan = ctx->chan;
  1258. priv = chan->priv;
  1259. dev_dbg(priv->ipu->dev, "%s: task %u: ctx %p run %p\n", __func__,
  1260. chan->ic_task, ctx, run);
  1261. INIT_LIST_HEAD(&run->list);
  1262. spin_lock_irqsave(&chan->irqlock, flags);
  1263. if (ctx->aborting) {
  1264. ret = -EIO;
  1265. goto unlock;
  1266. }
  1267. list_add_tail(&run->list, &chan->pending_q);
  1268. if (!chan->current_run) {
  1269. ret = do_run(run);
  1270. if (ret)
  1271. chan->current_run = NULL;
  1272. }
  1273. unlock:
  1274. spin_unlock_irqrestore(&chan->irqlock, flags);
  1275. return ret;
  1276. }
  1277. EXPORT_SYMBOL_GPL(ipu_image_convert_queue);
  1278. /* Abort any active or pending conversions for this context */
  1279. void ipu_image_convert_abort(struct ipu_image_convert_ctx *ctx)
  1280. {
  1281. struct ipu_image_convert_chan *chan = ctx->chan;
  1282. struct ipu_image_convert_priv *priv = chan->priv;
  1283. struct ipu_image_convert_run *run, *active_run, *tmp;
  1284. unsigned long flags;
  1285. int run_count, ret;
  1286. bool need_abort;
  1287. reinit_completion(&ctx->aborted);
  1288. spin_lock_irqsave(&chan->irqlock, flags);
  1289. /* move all remaining pending runs in this context to done_q */
  1290. list_for_each_entry_safe(run, tmp, &chan->pending_q, list) {
  1291. if (run->ctx != ctx)
  1292. continue;
  1293. run->status = -EIO;
  1294. list_move_tail(&run->list, &chan->done_q);
  1295. }
  1296. run_count = get_run_count(ctx, &chan->done_q);
  1297. active_run = (chan->current_run && chan->current_run->ctx == ctx) ?
  1298. chan->current_run : NULL;
  1299. need_abort = (run_count || active_run);
  1300. ctx->aborting = need_abort;
  1301. spin_unlock_irqrestore(&chan->irqlock, flags);
  1302. if (!need_abort) {
  1303. dev_dbg(priv->ipu->dev,
  1304. "%s: task %u: no abort needed for ctx %p\n",
  1305. __func__, chan->ic_task, ctx);
  1306. return;
  1307. }
  1308. dev_dbg(priv->ipu->dev,
  1309. "%s: task %u: wait for completion: %d runs, active run %p\n",
  1310. __func__, chan->ic_task, run_count, active_run);
  1311. ret = wait_for_completion_timeout(&ctx->aborted,
  1312. msecs_to_jiffies(10000));
  1313. if (ret == 0) {
  1314. dev_warn(priv->ipu->dev, "%s: timeout\n", __func__);
  1315. force_abort(ctx);
  1316. }
  1317. ctx->aborting = false;
  1318. }
  1319. EXPORT_SYMBOL_GPL(ipu_image_convert_abort);
  1320. /* Unprepare image conversion context */
  1321. void ipu_image_convert_unprepare(struct ipu_image_convert_ctx *ctx)
  1322. {
  1323. struct ipu_image_convert_chan *chan = ctx->chan;
  1324. struct ipu_image_convert_priv *priv = chan->priv;
  1325. unsigned long flags;
  1326. bool put_res;
  1327. /* make sure no runs are hanging around */
  1328. ipu_image_convert_abort(ctx);
  1329. dev_dbg(priv->ipu->dev, "%s: task %u: removing ctx %p\n", __func__,
  1330. chan->ic_task, ctx);
  1331. spin_lock_irqsave(&chan->irqlock, flags);
  1332. list_del(&ctx->list);
  1333. put_res = list_empty(&chan->ctx_list);
  1334. spin_unlock_irqrestore(&chan->irqlock, flags);
  1335. if (put_res)
  1336. release_ipu_resources(chan);
  1337. free_dma_buf(priv, &ctx->rot_intermediate[1]);
  1338. free_dma_buf(priv, &ctx->rot_intermediate[0]);
  1339. kfree(ctx);
  1340. }
  1341. EXPORT_SYMBOL_GPL(ipu_image_convert_unprepare);
  1342. /*
  1343. * "Canned" asynchronous single image conversion. Allocates and returns
  1344. * a new conversion run. On successful return the caller must free the
  1345. * run and call ipu_image_convert_unprepare() after conversion completes.
  1346. */
  1347. struct ipu_image_convert_run *
  1348. ipu_image_convert(struct ipu_soc *ipu, enum ipu_ic_task ic_task,
  1349. struct ipu_image *in, struct ipu_image *out,
  1350. enum ipu_rotate_mode rot_mode,
  1351. ipu_image_convert_cb_t complete,
  1352. void *complete_context)
  1353. {
  1354. struct ipu_image_convert_ctx *ctx;
  1355. struct ipu_image_convert_run *run;
  1356. int ret;
  1357. ctx = ipu_image_convert_prepare(ipu, ic_task, in, out, rot_mode,
  1358. complete, complete_context);
  1359. if (IS_ERR(ctx))
  1360. return ERR_CAST(ctx);
  1361. run = kzalloc(sizeof(*run), GFP_KERNEL);
  1362. if (!run) {
  1363. ipu_image_convert_unprepare(ctx);
  1364. return ERR_PTR(-ENOMEM);
  1365. }
  1366. run->ctx = ctx;
  1367. run->in_phys = in->phys0;
  1368. run->out_phys = out->phys0;
  1369. ret = ipu_image_convert_queue(run);
  1370. if (ret) {
  1371. ipu_image_convert_unprepare(ctx);
  1372. kfree(run);
  1373. return ERR_PTR(ret);
  1374. }
  1375. return run;
  1376. }
  1377. EXPORT_SYMBOL_GPL(ipu_image_convert);
  1378. /* "Canned" synchronous single image conversion */
  1379. static void image_convert_sync_complete(struct ipu_image_convert_run *run,
  1380. void *data)
  1381. {
  1382. struct completion *comp = data;
  1383. complete(comp);
  1384. }
  1385. int ipu_image_convert_sync(struct ipu_soc *ipu, enum ipu_ic_task ic_task,
  1386. struct ipu_image *in, struct ipu_image *out,
  1387. enum ipu_rotate_mode rot_mode)
  1388. {
  1389. struct ipu_image_convert_run *run;
  1390. struct completion comp;
  1391. int ret;
  1392. init_completion(&comp);
  1393. run = ipu_image_convert(ipu, ic_task, in, out, rot_mode,
  1394. image_convert_sync_complete, &comp);
  1395. if (IS_ERR(run))
  1396. return PTR_ERR(run);
  1397. ret = wait_for_completion_timeout(&comp, msecs_to_jiffies(10000));
  1398. ret = (ret == 0) ? -ETIMEDOUT : 0;
  1399. ipu_image_convert_unprepare(run->ctx);
  1400. kfree(run);
  1401. return ret;
  1402. }
  1403. EXPORT_SYMBOL_GPL(ipu_image_convert_sync);
  1404. int ipu_image_convert_init(struct ipu_soc *ipu, struct device *dev)
  1405. {
  1406. struct ipu_image_convert_priv *priv;
  1407. int i;
  1408. priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
  1409. if (!priv)
  1410. return -ENOMEM;
  1411. ipu->image_convert_priv = priv;
  1412. priv->ipu = ipu;
  1413. for (i = 0; i < IC_NUM_TASKS; i++) {
  1414. struct ipu_image_convert_chan *chan = &priv->chan[i];
  1415. chan->ic_task = i;
  1416. chan->priv = priv;
  1417. chan->dma_ch = &image_convert_dma_chan[i];
  1418. chan->out_eof_irq = -1;
  1419. chan->rot_out_eof_irq = -1;
  1420. spin_lock_init(&chan->irqlock);
  1421. INIT_LIST_HEAD(&chan->ctx_list);
  1422. INIT_LIST_HEAD(&chan->pending_q);
  1423. INIT_LIST_HEAD(&chan->done_q);
  1424. }
  1425. return 0;
  1426. }
  1427. void ipu_image_convert_exit(struct ipu_soc *ipu)
  1428. {
  1429. }