vsp1_video.c 32 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228
  1. /*
  2. * vsp1_video.c -- R-Car VSP1 Video Node
  3. *
  4. * Copyright (C) 2013-2015 Renesas Electronics Corporation
  5. *
  6. * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
  7. *
  8. * This program is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License as published by
  10. * the Free Software Foundation; either version 2 of the License, or
  11. * (at your option) any later version.
  12. */
  13. #include <linux/list.h>
  14. #include <linux/module.h>
  15. #include <linux/mutex.h>
  16. #include <linux/slab.h>
  17. #include <linux/v4l2-mediabus.h>
  18. #include <linux/videodev2.h>
  19. #include <linux/wait.h>
  20. #include <media/media-entity.h>
  21. #include <media/v4l2-dev.h>
  22. #include <media/v4l2-fh.h>
  23. #include <media/v4l2-ioctl.h>
  24. #include <media/v4l2-subdev.h>
  25. #include <media/videobuf2-v4l2.h>
  26. #include <media/videobuf2-dma-contig.h>
  27. #include "vsp1.h"
  28. #include "vsp1_bru.h"
  29. #include "vsp1_dl.h"
  30. #include "vsp1_entity.h"
  31. #include "vsp1_hgo.h"
  32. #include "vsp1_hgt.h"
  33. #include "vsp1_pipe.h"
  34. #include "vsp1_rwpf.h"
  35. #include "vsp1_uds.h"
  36. #include "vsp1_video.h"
  37. #define VSP1_VIDEO_DEF_FORMAT V4L2_PIX_FMT_YUYV
  38. #define VSP1_VIDEO_DEF_WIDTH 1024
  39. #define VSP1_VIDEO_DEF_HEIGHT 768
  40. #define VSP1_VIDEO_MIN_WIDTH 2U
  41. #define VSP1_VIDEO_MAX_WIDTH 8190U
  42. #define VSP1_VIDEO_MIN_HEIGHT 2U
  43. #define VSP1_VIDEO_MAX_HEIGHT 8190U
  44. /* -----------------------------------------------------------------------------
  45. * Helper functions
  46. */
  47. static struct v4l2_subdev *
  48. vsp1_video_remote_subdev(struct media_pad *local, u32 *pad)
  49. {
  50. struct media_pad *remote;
  51. remote = media_entity_remote_pad(local);
  52. if (!remote || !is_media_entity_v4l2_subdev(remote->entity))
  53. return NULL;
  54. if (pad)
  55. *pad = remote->index;
  56. return media_entity_to_v4l2_subdev(remote->entity);
  57. }
  58. static int vsp1_video_verify_format(struct vsp1_video *video)
  59. {
  60. struct v4l2_subdev_format fmt;
  61. struct v4l2_subdev *subdev;
  62. int ret;
  63. subdev = vsp1_video_remote_subdev(&video->pad, &fmt.pad);
  64. if (subdev == NULL)
  65. return -EINVAL;
  66. fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE;
  67. ret = v4l2_subdev_call(subdev, pad, get_fmt, NULL, &fmt);
  68. if (ret < 0)
  69. return ret == -ENOIOCTLCMD ? -EINVAL : ret;
  70. if (video->rwpf->fmtinfo->mbus != fmt.format.code ||
  71. video->rwpf->format.height != fmt.format.height ||
  72. video->rwpf->format.width != fmt.format.width)
  73. return -EINVAL;
  74. return 0;
  75. }
  76. static int __vsp1_video_try_format(struct vsp1_video *video,
  77. struct v4l2_pix_format_mplane *pix,
  78. const struct vsp1_format_info **fmtinfo)
  79. {
  80. static const u32 xrgb_formats[][2] = {
  81. { V4L2_PIX_FMT_RGB444, V4L2_PIX_FMT_XRGB444 },
  82. { V4L2_PIX_FMT_RGB555, V4L2_PIX_FMT_XRGB555 },
  83. { V4L2_PIX_FMT_BGR32, V4L2_PIX_FMT_XBGR32 },
  84. { V4L2_PIX_FMT_RGB32, V4L2_PIX_FMT_XRGB32 },
  85. };
  86. const struct vsp1_format_info *info;
  87. unsigned int width = pix->width;
  88. unsigned int height = pix->height;
  89. unsigned int i;
  90. /*
  91. * Backward compatibility: replace deprecated RGB formats by their XRGB
  92. * equivalent. This selects the format older userspace applications want
  93. * while still exposing the new format.
  94. */
  95. for (i = 0; i < ARRAY_SIZE(xrgb_formats); ++i) {
  96. if (xrgb_formats[i][0] == pix->pixelformat) {
  97. pix->pixelformat = xrgb_formats[i][1];
  98. break;
  99. }
  100. }
  101. /*
  102. * Retrieve format information and select the default format if the
  103. * requested format isn't supported.
  104. */
  105. info = vsp1_get_format_info(video->vsp1, pix->pixelformat);
  106. if (info == NULL)
  107. info = vsp1_get_format_info(video->vsp1, VSP1_VIDEO_DEF_FORMAT);
  108. pix->pixelformat = info->fourcc;
  109. pix->colorspace = V4L2_COLORSPACE_SRGB;
  110. pix->field = V4L2_FIELD_NONE;
  111. if (info->fourcc == V4L2_PIX_FMT_HSV24 ||
  112. info->fourcc == V4L2_PIX_FMT_HSV32)
  113. pix->hsv_enc = V4L2_HSV_ENC_256;
  114. memset(pix->reserved, 0, sizeof(pix->reserved));
  115. /* Align the width and height for YUV 4:2:2 and 4:2:0 formats. */
  116. width = round_down(width, info->hsub);
  117. height = round_down(height, info->vsub);
  118. /* Clamp the width and height. */
  119. pix->width = clamp(width, VSP1_VIDEO_MIN_WIDTH, VSP1_VIDEO_MAX_WIDTH);
  120. pix->height = clamp(height, VSP1_VIDEO_MIN_HEIGHT,
  121. VSP1_VIDEO_MAX_HEIGHT);
  122. /*
  123. * Compute and clamp the stride and image size. While not documented in
  124. * the datasheet, strides not aligned to a multiple of 128 bytes result
  125. * in image corruption.
  126. */
  127. for (i = 0; i < min(info->planes, 2U); ++i) {
  128. unsigned int hsub = i > 0 ? info->hsub : 1;
  129. unsigned int vsub = i > 0 ? info->vsub : 1;
  130. unsigned int align = 128;
  131. unsigned int bpl;
  132. bpl = clamp_t(unsigned int, pix->plane_fmt[i].bytesperline,
  133. pix->width / hsub * info->bpp[i] / 8,
  134. round_down(65535U, align));
  135. pix->plane_fmt[i].bytesperline = round_up(bpl, align);
  136. pix->plane_fmt[i].sizeimage = pix->plane_fmt[i].bytesperline
  137. * pix->height / vsub;
  138. }
  139. if (info->planes == 3) {
  140. /* The second and third planes must have the same stride. */
  141. pix->plane_fmt[2].bytesperline = pix->plane_fmt[1].bytesperline;
  142. pix->plane_fmt[2].sizeimage = pix->plane_fmt[1].sizeimage;
  143. }
  144. pix->num_planes = info->planes;
  145. if (fmtinfo)
  146. *fmtinfo = info;
  147. return 0;
  148. }
  149. /* -----------------------------------------------------------------------------
  150. * VSP1 Partition Algorithm support
  151. */
  152. static void vsp1_video_pipeline_setup_partitions(struct vsp1_pipeline *pipe)
  153. {
  154. struct vsp1_device *vsp1 = pipe->output->entity.vsp1;
  155. const struct v4l2_mbus_framefmt *format;
  156. struct vsp1_entity *entity;
  157. unsigned int div_size;
  158. /*
  159. * Partitions are computed on the size before rotation, use the format
  160. * at the WPF sink.
  161. */
  162. format = vsp1_entity_get_pad_format(&pipe->output->entity,
  163. pipe->output->entity.config,
  164. RWPF_PAD_SINK);
  165. div_size = format->width;
  166. /* Gen2 hardware doesn't require image partitioning. */
  167. if (vsp1->info->gen == 2) {
  168. pipe->div_size = div_size;
  169. pipe->partitions = 1;
  170. return;
  171. }
  172. list_for_each_entry(entity, &pipe->entities, list_pipe) {
  173. unsigned int entity_max = VSP1_VIDEO_MAX_WIDTH;
  174. if (entity->ops->max_width) {
  175. entity_max = entity->ops->max_width(entity, pipe);
  176. if (entity_max)
  177. div_size = min(div_size, entity_max);
  178. }
  179. }
  180. pipe->div_size = div_size;
  181. pipe->partitions = DIV_ROUND_UP(format->width, div_size);
  182. }
  183. /**
  184. * vsp1_video_partition - Calculate the active partition output window
  185. *
  186. * @div_size: pre-determined maximum partition division size
  187. * @index: partition index
  188. *
  189. * Returns a v4l2_rect describing the partition window.
  190. */
  191. static struct v4l2_rect vsp1_video_partition(struct vsp1_pipeline *pipe,
  192. unsigned int div_size,
  193. unsigned int index)
  194. {
  195. const struct v4l2_mbus_framefmt *format;
  196. struct v4l2_rect partition;
  197. unsigned int modulus;
  198. /*
  199. * Partitions are computed on the size before rotation, use the format
  200. * at the WPF sink.
  201. */
  202. format = vsp1_entity_get_pad_format(&pipe->output->entity,
  203. pipe->output->entity.config,
  204. RWPF_PAD_SINK);
  205. /* A single partition simply processes the output size in full. */
  206. if (pipe->partitions <= 1) {
  207. partition.left = 0;
  208. partition.top = 0;
  209. partition.width = format->width;
  210. partition.height = format->height;
  211. return partition;
  212. }
  213. /* Initialise the partition with sane starting conditions. */
  214. partition.left = index * div_size;
  215. partition.top = 0;
  216. partition.width = div_size;
  217. partition.height = format->height;
  218. modulus = format->width % div_size;
  219. /*
  220. * We need to prevent the last partition from being smaller than the
  221. * *minimum* width of the hardware capabilities.
  222. *
  223. * If the modulus is less than half of the partition size,
  224. * the penultimate partition is reduced to half, which is added
  225. * to the final partition: |1234|1234|1234|12|341|
  226. * to prevents this: |1234|1234|1234|1234|1|.
  227. */
  228. if (modulus) {
  229. /*
  230. * pipe->partitions is 1 based, whilst index is a 0 based index.
  231. * Normalise this locally.
  232. */
  233. unsigned int partitions = pipe->partitions - 1;
  234. if (modulus < div_size / 2) {
  235. if (index == partitions - 1) {
  236. /* Halve the penultimate partition. */
  237. partition.width = div_size / 2;
  238. } else if (index == partitions) {
  239. /* Increase the final partition. */
  240. partition.width = (div_size / 2) + modulus;
  241. partition.left -= div_size / 2;
  242. }
  243. } else if (index == partitions) {
  244. partition.width = modulus;
  245. }
  246. }
  247. return partition;
  248. }
  249. /* -----------------------------------------------------------------------------
  250. * Pipeline Management
  251. */
  252. /*
  253. * vsp1_video_complete_buffer - Complete the current buffer
  254. * @video: the video node
  255. *
  256. * This function completes the current buffer by filling its sequence number,
  257. * time stamp and payload size, and hands it back to the videobuf core.
  258. *
  259. * When operating in DU output mode (deep pipeline to the DU through the LIF),
  260. * the VSP1 needs to constantly supply frames to the display. In that case, if
  261. * no other buffer is queued, reuse the one that has just been processed instead
  262. * of handing it back to the videobuf core.
  263. *
  264. * Return the next queued buffer or NULL if the queue is empty.
  265. */
  266. static struct vsp1_vb2_buffer *
  267. vsp1_video_complete_buffer(struct vsp1_video *video)
  268. {
  269. struct vsp1_pipeline *pipe = video->rwpf->pipe;
  270. struct vsp1_vb2_buffer *next = NULL;
  271. struct vsp1_vb2_buffer *done;
  272. unsigned long flags;
  273. unsigned int i;
  274. spin_lock_irqsave(&video->irqlock, flags);
  275. if (list_empty(&video->irqqueue)) {
  276. spin_unlock_irqrestore(&video->irqlock, flags);
  277. return NULL;
  278. }
  279. done = list_first_entry(&video->irqqueue,
  280. struct vsp1_vb2_buffer, queue);
  281. /* In DU output mode reuse the buffer if the list is singular. */
  282. if (pipe->lif && list_is_singular(&video->irqqueue)) {
  283. spin_unlock_irqrestore(&video->irqlock, flags);
  284. return done;
  285. }
  286. list_del(&done->queue);
  287. if (!list_empty(&video->irqqueue))
  288. next = list_first_entry(&video->irqqueue,
  289. struct vsp1_vb2_buffer, queue);
  290. spin_unlock_irqrestore(&video->irqlock, flags);
  291. done->buf.sequence = pipe->sequence;
  292. done->buf.vb2_buf.timestamp = ktime_get_ns();
  293. for (i = 0; i < done->buf.vb2_buf.num_planes; ++i)
  294. vb2_set_plane_payload(&done->buf.vb2_buf, i,
  295. vb2_plane_size(&done->buf.vb2_buf, i));
  296. vb2_buffer_done(&done->buf.vb2_buf, VB2_BUF_STATE_DONE);
  297. return next;
  298. }
  299. static void vsp1_video_frame_end(struct vsp1_pipeline *pipe,
  300. struct vsp1_rwpf *rwpf)
  301. {
  302. struct vsp1_video *video = rwpf->video;
  303. struct vsp1_vb2_buffer *buf;
  304. buf = vsp1_video_complete_buffer(video);
  305. if (buf == NULL)
  306. return;
  307. video->rwpf->mem = buf->mem;
  308. pipe->buffers_ready |= 1 << video->pipe_index;
  309. }
  310. static void vsp1_video_pipeline_run_partition(struct vsp1_pipeline *pipe,
  311. struct vsp1_dl_list *dl)
  312. {
  313. struct vsp1_entity *entity;
  314. pipe->partition = vsp1_video_partition(pipe, pipe->div_size,
  315. pipe->current_partition);
  316. list_for_each_entry(entity, &pipe->entities, list_pipe) {
  317. if (entity->ops->configure)
  318. entity->ops->configure(entity, pipe, dl,
  319. VSP1_ENTITY_PARAMS_PARTITION);
  320. }
  321. }
  322. static void vsp1_video_pipeline_run(struct vsp1_pipeline *pipe)
  323. {
  324. struct vsp1_device *vsp1 = pipe->output->entity.vsp1;
  325. struct vsp1_entity *entity;
  326. if (!pipe->dl)
  327. pipe->dl = vsp1_dl_list_get(pipe->output->dlm);
  328. /*
  329. * Start with the runtime parameters as the configure operation can
  330. * compute/cache information needed when configuring partitions. This
  331. * is the case with flipping in the WPF.
  332. */
  333. list_for_each_entry(entity, &pipe->entities, list_pipe) {
  334. if (entity->ops->configure)
  335. entity->ops->configure(entity, pipe, pipe->dl,
  336. VSP1_ENTITY_PARAMS_RUNTIME);
  337. }
  338. /* Run the first partition */
  339. pipe->current_partition = 0;
  340. vsp1_video_pipeline_run_partition(pipe, pipe->dl);
  341. /* Process consecutive partitions as necessary */
  342. for (pipe->current_partition = 1;
  343. pipe->current_partition < pipe->partitions;
  344. pipe->current_partition++) {
  345. struct vsp1_dl_list *dl;
  346. /*
  347. * Partition configuration operations will utilise
  348. * the pipe->current_partition variable to determine
  349. * the work they should complete.
  350. */
  351. dl = vsp1_dl_list_get(pipe->output->dlm);
  352. /*
  353. * An incomplete chain will still function, but output only
  354. * the partitions that had a dl available. The frame end
  355. * interrupt will be marked on the last dl in the chain.
  356. */
  357. if (!dl) {
  358. dev_err(vsp1->dev, "Failed to obtain a dl list. Frame will be incomplete\n");
  359. break;
  360. }
  361. vsp1_video_pipeline_run_partition(pipe, dl);
  362. vsp1_dl_list_add_chain(pipe->dl, dl);
  363. }
  364. /* Complete, and commit the head display list. */
  365. vsp1_dl_list_commit(pipe->dl);
  366. pipe->dl = NULL;
  367. vsp1_pipeline_run(pipe);
  368. }
  369. static void vsp1_video_pipeline_frame_end(struct vsp1_pipeline *pipe)
  370. {
  371. struct vsp1_device *vsp1 = pipe->output->entity.vsp1;
  372. enum vsp1_pipeline_state state;
  373. unsigned long flags;
  374. unsigned int i;
  375. spin_lock_irqsave(&pipe->irqlock, flags);
  376. /* Complete buffers on all video nodes. */
  377. for (i = 0; i < vsp1->info->rpf_count; ++i) {
  378. if (!pipe->inputs[i])
  379. continue;
  380. vsp1_video_frame_end(pipe, pipe->inputs[i]);
  381. }
  382. vsp1_video_frame_end(pipe, pipe->output);
  383. state = pipe->state;
  384. pipe->state = VSP1_PIPELINE_STOPPED;
  385. /*
  386. * If a stop has been requested, mark the pipeline as stopped and
  387. * return. Otherwise restart the pipeline if ready.
  388. */
  389. if (state == VSP1_PIPELINE_STOPPING)
  390. wake_up(&pipe->wq);
  391. else if (vsp1_pipeline_ready(pipe))
  392. vsp1_video_pipeline_run(pipe);
  393. spin_unlock_irqrestore(&pipe->irqlock, flags);
  394. }
  395. static int vsp1_video_pipeline_build_branch(struct vsp1_pipeline *pipe,
  396. struct vsp1_rwpf *input,
  397. struct vsp1_rwpf *output)
  398. {
  399. struct media_entity_enum ent_enum;
  400. struct vsp1_entity *entity;
  401. struct media_pad *pad;
  402. bool bru_found = false;
  403. int ret;
  404. ret = media_entity_enum_init(&ent_enum, &input->entity.vsp1->media_dev);
  405. if (ret < 0)
  406. return ret;
  407. /*
  408. * The main data path doesn't include the HGO or HGT, use
  409. * vsp1_entity_remote_pad() to traverse the graph.
  410. */
  411. pad = vsp1_entity_remote_pad(&input->entity.pads[RWPF_PAD_SOURCE]);
  412. while (1) {
  413. if (pad == NULL) {
  414. ret = -EPIPE;
  415. goto out;
  416. }
  417. /* We've reached a video node, that shouldn't have happened. */
  418. if (!is_media_entity_v4l2_subdev(pad->entity)) {
  419. ret = -EPIPE;
  420. goto out;
  421. }
  422. entity = to_vsp1_entity(
  423. media_entity_to_v4l2_subdev(pad->entity));
  424. /*
  425. * A BRU is present in the pipeline, store the BRU input pad
  426. * number in the input RPF for use when configuring the RPF.
  427. */
  428. if (entity->type == VSP1_ENTITY_BRU) {
  429. struct vsp1_bru *bru = to_bru(&entity->subdev);
  430. bru->inputs[pad->index].rpf = input;
  431. input->bru_input = pad->index;
  432. bru_found = true;
  433. }
  434. /* We've reached the WPF, we're done. */
  435. if (entity->type == VSP1_ENTITY_WPF)
  436. break;
  437. /* Ensure the branch has no loop. */
  438. if (media_entity_enum_test_and_set(&ent_enum,
  439. &entity->subdev.entity)) {
  440. ret = -EPIPE;
  441. goto out;
  442. }
  443. /* UDS can't be chained. */
  444. if (entity->type == VSP1_ENTITY_UDS) {
  445. if (pipe->uds) {
  446. ret = -EPIPE;
  447. goto out;
  448. }
  449. pipe->uds = entity;
  450. pipe->uds_input = bru_found ? pipe->bru
  451. : &input->entity;
  452. }
  453. /* Follow the source link, ignoring any HGO or HGT. */
  454. pad = &entity->pads[entity->source_pad];
  455. pad = vsp1_entity_remote_pad(pad);
  456. }
  457. /* The last entity must be the output WPF. */
  458. if (entity != &output->entity)
  459. ret = -EPIPE;
  460. out:
  461. media_entity_enum_cleanup(&ent_enum);
  462. return ret;
  463. }
  464. static int vsp1_video_pipeline_build(struct vsp1_pipeline *pipe,
  465. struct vsp1_video *video)
  466. {
  467. struct media_graph graph;
  468. struct media_entity *entity = &video->video.entity;
  469. struct media_device *mdev = entity->graph_obj.mdev;
  470. unsigned int i;
  471. int ret;
  472. /* Walk the graph to locate the entities and video nodes. */
  473. ret = media_graph_walk_init(&graph, mdev);
  474. if (ret)
  475. return ret;
  476. media_graph_walk_start(&graph, entity);
  477. while ((entity = media_graph_walk_next(&graph))) {
  478. struct v4l2_subdev *subdev;
  479. struct vsp1_rwpf *rwpf;
  480. struct vsp1_entity *e;
  481. if (!is_media_entity_v4l2_subdev(entity))
  482. continue;
  483. subdev = media_entity_to_v4l2_subdev(entity);
  484. e = to_vsp1_entity(subdev);
  485. list_add_tail(&e->list_pipe, &pipe->entities);
  486. if (e->type == VSP1_ENTITY_RPF) {
  487. rwpf = to_rwpf(subdev);
  488. pipe->inputs[rwpf->entity.index] = rwpf;
  489. rwpf->video->pipe_index = ++pipe->num_inputs;
  490. rwpf->pipe = pipe;
  491. } else if (e->type == VSP1_ENTITY_WPF) {
  492. rwpf = to_rwpf(subdev);
  493. pipe->output = rwpf;
  494. rwpf->video->pipe_index = 0;
  495. rwpf->pipe = pipe;
  496. } else if (e->type == VSP1_ENTITY_LIF) {
  497. pipe->lif = e;
  498. } else if (e->type == VSP1_ENTITY_BRU) {
  499. pipe->bru = e;
  500. } else if (e->type == VSP1_ENTITY_HGO) {
  501. struct vsp1_hgo *hgo = to_hgo(subdev);
  502. pipe->hgo = e;
  503. hgo->histo.pipe = pipe;
  504. } else if (e->type == VSP1_ENTITY_HGT) {
  505. struct vsp1_hgt *hgt = to_hgt(subdev);
  506. pipe->hgt = e;
  507. hgt->histo.pipe = pipe;
  508. }
  509. }
  510. media_graph_walk_cleanup(&graph);
  511. /* We need one output and at least one input. */
  512. if (pipe->num_inputs == 0 || !pipe->output)
  513. return -EPIPE;
  514. /*
  515. * Follow links downstream for each input and make sure the graph
  516. * contains no loop and that all branches end at the output WPF.
  517. */
  518. for (i = 0; i < video->vsp1->info->rpf_count; ++i) {
  519. if (!pipe->inputs[i])
  520. continue;
  521. ret = vsp1_video_pipeline_build_branch(pipe, pipe->inputs[i],
  522. pipe->output);
  523. if (ret < 0)
  524. return ret;
  525. }
  526. return 0;
  527. }
  528. static int vsp1_video_pipeline_init(struct vsp1_pipeline *pipe,
  529. struct vsp1_video *video)
  530. {
  531. vsp1_pipeline_init(pipe);
  532. pipe->frame_end = vsp1_video_pipeline_frame_end;
  533. return vsp1_video_pipeline_build(pipe, video);
  534. }
  535. static struct vsp1_pipeline *vsp1_video_pipeline_get(struct vsp1_video *video)
  536. {
  537. struct vsp1_pipeline *pipe;
  538. int ret;
  539. /*
  540. * Get a pipeline object for the video node. If a pipeline has already
  541. * been allocated just increment its reference count and return it.
  542. * Otherwise allocate a new pipeline and initialize it, it will be freed
  543. * when the last reference is released.
  544. */
  545. if (!video->rwpf->pipe) {
  546. pipe = kzalloc(sizeof(*pipe), GFP_KERNEL);
  547. if (!pipe)
  548. return ERR_PTR(-ENOMEM);
  549. ret = vsp1_video_pipeline_init(pipe, video);
  550. if (ret < 0) {
  551. vsp1_pipeline_reset(pipe);
  552. kfree(pipe);
  553. return ERR_PTR(ret);
  554. }
  555. } else {
  556. pipe = video->rwpf->pipe;
  557. kref_get(&pipe->kref);
  558. }
  559. return pipe;
  560. }
  561. static void vsp1_video_pipeline_release(struct kref *kref)
  562. {
  563. struct vsp1_pipeline *pipe = container_of(kref, typeof(*pipe), kref);
  564. vsp1_pipeline_reset(pipe);
  565. kfree(pipe);
  566. }
  567. static void vsp1_video_pipeline_put(struct vsp1_pipeline *pipe)
  568. {
  569. struct media_device *mdev = &pipe->output->entity.vsp1->media_dev;
  570. mutex_lock(&mdev->graph_mutex);
  571. kref_put(&pipe->kref, vsp1_video_pipeline_release);
  572. mutex_unlock(&mdev->graph_mutex);
  573. }
  574. /* -----------------------------------------------------------------------------
  575. * videobuf2 Queue Operations
  576. */
  577. static int
  578. vsp1_video_queue_setup(struct vb2_queue *vq,
  579. unsigned int *nbuffers, unsigned int *nplanes,
  580. unsigned int sizes[], struct device *alloc_devs[])
  581. {
  582. struct vsp1_video *video = vb2_get_drv_priv(vq);
  583. const struct v4l2_pix_format_mplane *format = &video->rwpf->format;
  584. unsigned int i;
  585. if (*nplanes) {
  586. if (*nplanes != format->num_planes)
  587. return -EINVAL;
  588. for (i = 0; i < *nplanes; i++)
  589. if (sizes[i] < format->plane_fmt[i].sizeimage)
  590. return -EINVAL;
  591. return 0;
  592. }
  593. *nplanes = format->num_planes;
  594. for (i = 0; i < format->num_planes; ++i)
  595. sizes[i] = format->plane_fmt[i].sizeimage;
  596. return 0;
  597. }
  598. static int vsp1_video_buffer_prepare(struct vb2_buffer *vb)
  599. {
  600. struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
  601. struct vsp1_video *video = vb2_get_drv_priv(vb->vb2_queue);
  602. struct vsp1_vb2_buffer *buf = to_vsp1_vb2_buffer(vbuf);
  603. const struct v4l2_pix_format_mplane *format = &video->rwpf->format;
  604. unsigned int i;
  605. if (vb->num_planes < format->num_planes)
  606. return -EINVAL;
  607. for (i = 0; i < vb->num_planes; ++i) {
  608. buf->mem.addr[i] = vb2_dma_contig_plane_dma_addr(vb, i);
  609. if (vb2_plane_size(vb, i) < format->plane_fmt[i].sizeimage)
  610. return -EINVAL;
  611. }
  612. for ( ; i < 3; ++i)
  613. buf->mem.addr[i] = 0;
  614. return 0;
  615. }
  616. static void vsp1_video_buffer_queue(struct vb2_buffer *vb)
  617. {
  618. struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
  619. struct vsp1_video *video = vb2_get_drv_priv(vb->vb2_queue);
  620. struct vsp1_pipeline *pipe = video->rwpf->pipe;
  621. struct vsp1_vb2_buffer *buf = to_vsp1_vb2_buffer(vbuf);
  622. unsigned long flags;
  623. bool empty;
  624. spin_lock_irqsave(&video->irqlock, flags);
  625. empty = list_empty(&video->irqqueue);
  626. list_add_tail(&buf->queue, &video->irqqueue);
  627. spin_unlock_irqrestore(&video->irqlock, flags);
  628. if (!empty)
  629. return;
  630. spin_lock_irqsave(&pipe->irqlock, flags);
  631. video->rwpf->mem = buf->mem;
  632. pipe->buffers_ready |= 1 << video->pipe_index;
  633. if (vb2_is_streaming(&video->queue) &&
  634. vsp1_pipeline_ready(pipe))
  635. vsp1_video_pipeline_run(pipe);
  636. spin_unlock_irqrestore(&pipe->irqlock, flags);
  637. }
  638. static int vsp1_video_setup_pipeline(struct vsp1_pipeline *pipe)
  639. {
  640. struct vsp1_entity *entity;
  641. /* Determine this pipelines sizes for image partitioning support. */
  642. vsp1_video_pipeline_setup_partitions(pipe);
  643. /* Prepare the display list. */
  644. pipe->dl = vsp1_dl_list_get(pipe->output->dlm);
  645. if (!pipe->dl)
  646. return -ENOMEM;
  647. if (pipe->uds) {
  648. struct vsp1_uds *uds = to_uds(&pipe->uds->subdev);
  649. /*
  650. * If a BRU is present in the pipeline before the UDS, the alpha
  651. * component doesn't need to be scaled as the BRU output alpha
  652. * value is fixed to 255. Otherwise we need to scale the alpha
  653. * component only when available at the input RPF.
  654. */
  655. if (pipe->uds_input->type == VSP1_ENTITY_BRU) {
  656. uds->scale_alpha = false;
  657. } else {
  658. struct vsp1_rwpf *rpf =
  659. to_rwpf(&pipe->uds_input->subdev);
  660. uds->scale_alpha = rpf->fmtinfo->alpha;
  661. }
  662. }
  663. list_for_each_entry(entity, &pipe->entities, list_pipe) {
  664. vsp1_entity_route_setup(entity, pipe, pipe->dl);
  665. if (entity->ops->configure)
  666. entity->ops->configure(entity, pipe, pipe->dl,
  667. VSP1_ENTITY_PARAMS_INIT);
  668. }
  669. return 0;
  670. }
  671. static int vsp1_video_start_streaming(struct vb2_queue *vq, unsigned int count)
  672. {
  673. struct vsp1_video *video = vb2_get_drv_priv(vq);
  674. struct vsp1_pipeline *pipe = video->rwpf->pipe;
  675. bool start_pipeline = false;
  676. unsigned long flags;
  677. int ret;
  678. mutex_lock(&pipe->lock);
  679. if (pipe->stream_count == pipe->num_inputs) {
  680. ret = vsp1_video_setup_pipeline(pipe);
  681. if (ret < 0) {
  682. mutex_unlock(&pipe->lock);
  683. return ret;
  684. }
  685. start_pipeline = true;
  686. }
  687. pipe->stream_count++;
  688. mutex_unlock(&pipe->lock);
  689. /*
  690. * vsp1_pipeline_ready() is not sufficient to establish that all streams
  691. * are prepared and the pipeline is configured, as multiple streams
  692. * can race through streamon with buffers already queued; Therefore we
  693. * don't even attempt to start the pipeline until the last stream has
  694. * called through here.
  695. */
  696. if (!start_pipeline)
  697. return 0;
  698. spin_lock_irqsave(&pipe->irqlock, flags);
  699. if (vsp1_pipeline_ready(pipe))
  700. vsp1_video_pipeline_run(pipe);
  701. spin_unlock_irqrestore(&pipe->irqlock, flags);
  702. return 0;
  703. }
  704. static void vsp1_video_stop_streaming(struct vb2_queue *vq)
  705. {
  706. struct vsp1_video *video = vb2_get_drv_priv(vq);
  707. struct vsp1_pipeline *pipe = video->rwpf->pipe;
  708. struct vsp1_vb2_buffer *buffer;
  709. unsigned long flags;
  710. int ret;
  711. /*
  712. * Clear the buffers ready flag to make sure the device won't be started
  713. * by a QBUF on the video node on the other side of the pipeline.
  714. */
  715. spin_lock_irqsave(&video->irqlock, flags);
  716. pipe->buffers_ready &= ~(1 << video->pipe_index);
  717. spin_unlock_irqrestore(&video->irqlock, flags);
  718. mutex_lock(&pipe->lock);
  719. if (--pipe->stream_count == pipe->num_inputs) {
  720. /* Stop the pipeline. */
  721. ret = vsp1_pipeline_stop(pipe);
  722. if (ret == -ETIMEDOUT)
  723. dev_err(video->vsp1->dev, "pipeline stop timeout\n");
  724. vsp1_dl_list_put(pipe->dl);
  725. pipe->dl = NULL;
  726. }
  727. mutex_unlock(&pipe->lock);
  728. media_pipeline_stop(&video->video.entity);
  729. vsp1_video_pipeline_put(pipe);
  730. /* Remove all buffers from the IRQ queue. */
  731. spin_lock_irqsave(&video->irqlock, flags);
  732. list_for_each_entry(buffer, &video->irqqueue, queue)
  733. vb2_buffer_done(&buffer->buf.vb2_buf, VB2_BUF_STATE_ERROR);
  734. INIT_LIST_HEAD(&video->irqqueue);
  735. spin_unlock_irqrestore(&video->irqlock, flags);
  736. }
  737. static const struct vb2_ops vsp1_video_queue_qops = {
  738. .queue_setup = vsp1_video_queue_setup,
  739. .buf_prepare = vsp1_video_buffer_prepare,
  740. .buf_queue = vsp1_video_buffer_queue,
  741. .wait_prepare = vb2_ops_wait_prepare,
  742. .wait_finish = vb2_ops_wait_finish,
  743. .start_streaming = vsp1_video_start_streaming,
  744. .stop_streaming = vsp1_video_stop_streaming,
  745. };
  746. /* -----------------------------------------------------------------------------
  747. * V4L2 ioctls
  748. */
  749. static int
  750. vsp1_video_querycap(struct file *file, void *fh, struct v4l2_capability *cap)
  751. {
  752. struct v4l2_fh *vfh = file->private_data;
  753. struct vsp1_video *video = to_vsp1_video(vfh->vdev);
  754. cap->capabilities = V4L2_CAP_DEVICE_CAPS | V4L2_CAP_STREAMING
  755. | V4L2_CAP_VIDEO_CAPTURE_MPLANE
  756. | V4L2_CAP_VIDEO_OUTPUT_MPLANE;
  757. if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
  758. cap->device_caps = V4L2_CAP_VIDEO_CAPTURE_MPLANE
  759. | V4L2_CAP_STREAMING;
  760. else
  761. cap->device_caps = V4L2_CAP_VIDEO_OUTPUT_MPLANE
  762. | V4L2_CAP_STREAMING;
  763. strlcpy(cap->driver, "vsp1", sizeof(cap->driver));
  764. strlcpy(cap->card, video->video.name, sizeof(cap->card));
  765. snprintf(cap->bus_info, sizeof(cap->bus_info), "platform:%s",
  766. dev_name(video->vsp1->dev));
  767. return 0;
  768. }
  769. static int
  770. vsp1_video_get_format(struct file *file, void *fh, struct v4l2_format *format)
  771. {
  772. struct v4l2_fh *vfh = file->private_data;
  773. struct vsp1_video *video = to_vsp1_video(vfh->vdev);
  774. if (format->type != video->queue.type)
  775. return -EINVAL;
  776. mutex_lock(&video->lock);
  777. format->fmt.pix_mp = video->rwpf->format;
  778. mutex_unlock(&video->lock);
  779. return 0;
  780. }
  781. static int
  782. vsp1_video_try_format(struct file *file, void *fh, struct v4l2_format *format)
  783. {
  784. struct v4l2_fh *vfh = file->private_data;
  785. struct vsp1_video *video = to_vsp1_video(vfh->vdev);
  786. if (format->type != video->queue.type)
  787. return -EINVAL;
  788. return __vsp1_video_try_format(video, &format->fmt.pix_mp, NULL);
  789. }
  790. static int
  791. vsp1_video_set_format(struct file *file, void *fh, struct v4l2_format *format)
  792. {
  793. struct v4l2_fh *vfh = file->private_data;
  794. struct vsp1_video *video = to_vsp1_video(vfh->vdev);
  795. const struct vsp1_format_info *info;
  796. int ret;
  797. if (format->type != video->queue.type)
  798. return -EINVAL;
  799. ret = __vsp1_video_try_format(video, &format->fmt.pix_mp, &info);
  800. if (ret < 0)
  801. return ret;
  802. mutex_lock(&video->lock);
  803. if (vb2_is_busy(&video->queue)) {
  804. ret = -EBUSY;
  805. goto done;
  806. }
  807. video->rwpf->format = format->fmt.pix_mp;
  808. video->rwpf->fmtinfo = info;
  809. done:
  810. mutex_unlock(&video->lock);
  811. return ret;
  812. }
  813. static int
  814. vsp1_video_streamon(struct file *file, void *fh, enum v4l2_buf_type type)
  815. {
  816. struct v4l2_fh *vfh = file->private_data;
  817. struct vsp1_video *video = to_vsp1_video(vfh->vdev);
  818. struct media_device *mdev = &video->vsp1->media_dev;
  819. struct vsp1_pipeline *pipe;
  820. int ret;
  821. if (video->queue.owner && video->queue.owner != file->private_data)
  822. return -EBUSY;
  823. /*
  824. * Get a pipeline for the video node and start streaming on it. No link
  825. * touching an entity in the pipeline can be activated or deactivated
  826. * once streaming is started.
  827. */
  828. mutex_lock(&mdev->graph_mutex);
  829. pipe = vsp1_video_pipeline_get(video);
  830. if (IS_ERR(pipe)) {
  831. mutex_unlock(&mdev->graph_mutex);
  832. return PTR_ERR(pipe);
  833. }
  834. ret = __media_pipeline_start(&video->video.entity, &pipe->pipe);
  835. if (ret < 0) {
  836. mutex_unlock(&mdev->graph_mutex);
  837. goto err_pipe;
  838. }
  839. mutex_unlock(&mdev->graph_mutex);
  840. /*
  841. * Verify that the configured format matches the output of the connected
  842. * subdev.
  843. */
  844. ret = vsp1_video_verify_format(video);
  845. if (ret < 0)
  846. goto err_stop;
  847. /* Start the queue. */
  848. ret = vb2_streamon(&video->queue, type);
  849. if (ret < 0)
  850. goto err_stop;
  851. return 0;
  852. err_stop:
  853. media_pipeline_stop(&video->video.entity);
  854. err_pipe:
  855. vsp1_video_pipeline_put(pipe);
  856. return ret;
  857. }
  858. static const struct v4l2_ioctl_ops vsp1_video_ioctl_ops = {
  859. .vidioc_querycap = vsp1_video_querycap,
  860. .vidioc_g_fmt_vid_cap_mplane = vsp1_video_get_format,
  861. .vidioc_s_fmt_vid_cap_mplane = vsp1_video_set_format,
  862. .vidioc_try_fmt_vid_cap_mplane = vsp1_video_try_format,
  863. .vidioc_g_fmt_vid_out_mplane = vsp1_video_get_format,
  864. .vidioc_s_fmt_vid_out_mplane = vsp1_video_set_format,
  865. .vidioc_try_fmt_vid_out_mplane = vsp1_video_try_format,
  866. .vidioc_reqbufs = vb2_ioctl_reqbufs,
  867. .vidioc_querybuf = vb2_ioctl_querybuf,
  868. .vidioc_qbuf = vb2_ioctl_qbuf,
  869. .vidioc_dqbuf = vb2_ioctl_dqbuf,
  870. .vidioc_expbuf = vb2_ioctl_expbuf,
  871. .vidioc_create_bufs = vb2_ioctl_create_bufs,
  872. .vidioc_prepare_buf = vb2_ioctl_prepare_buf,
  873. .vidioc_streamon = vsp1_video_streamon,
  874. .vidioc_streamoff = vb2_ioctl_streamoff,
  875. };
  876. /* -----------------------------------------------------------------------------
  877. * V4L2 File Operations
  878. */
  879. static int vsp1_video_open(struct file *file)
  880. {
  881. struct vsp1_video *video = video_drvdata(file);
  882. struct v4l2_fh *vfh;
  883. int ret = 0;
  884. vfh = kzalloc(sizeof(*vfh), GFP_KERNEL);
  885. if (vfh == NULL)
  886. return -ENOMEM;
  887. v4l2_fh_init(vfh, &video->video);
  888. v4l2_fh_add(vfh);
  889. file->private_data = vfh;
  890. ret = vsp1_device_get(video->vsp1);
  891. if (ret < 0) {
  892. v4l2_fh_del(vfh);
  893. v4l2_fh_exit(vfh);
  894. kfree(vfh);
  895. }
  896. return ret;
  897. }
  898. static int vsp1_video_release(struct file *file)
  899. {
  900. struct vsp1_video *video = video_drvdata(file);
  901. struct v4l2_fh *vfh = file->private_data;
  902. mutex_lock(&video->lock);
  903. if (video->queue.owner == vfh) {
  904. vb2_queue_release(&video->queue);
  905. video->queue.owner = NULL;
  906. }
  907. mutex_unlock(&video->lock);
  908. vsp1_device_put(video->vsp1);
  909. v4l2_fh_release(file);
  910. file->private_data = NULL;
  911. return 0;
  912. }
  913. static const struct v4l2_file_operations vsp1_video_fops = {
  914. .owner = THIS_MODULE,
  915. .unlocked_ioctl = video_ioctl2,
  916. .open = vsp1_video_open,
  917. .release = vsp1_video_release,
  918. .poll = vb2_fop_poll,
  919. .mmap = vb2_fop_mmap,
  920. };
  921. /* -----------------------------------------------------------------------------
  922. * Initialization and Cleanup
  923. */
  924. struct vsp1_video *vsp1_video_create(struct vsp1_device *vsp1,
  925. struct vsp1_rwpf *rwpf)
  926. {
  927. struct vsp1_video *video;
  928. const char *direction;
  929. int ret;
  930. video = devm_kzalloc(vsp1->dev, sizeof(*video), GFP_KERNEL);
  931. if (!video)
  932. return ERR_PTR(-ENOMEM);
  933. rwpf->video = video;
  934. video->vsp1 = vsp1;
  935. video->rwpf = rwpf;
  936. if (rwpf->entity.type == VSP1_ENTITY_RPF) {
  937. direction = "input";
  938. video->type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
  939. video->pad.flags = MEDIA_PAD_FL_SOURCE;
  940. video->video.vfl_dir = VFL_DIR_TX;
  941. } else {
  942. direction = "output";
  943. video->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
  944. video->pad.flags = MEDIA_PAD_FL_SINK;
  945. video->video.vfl_dir = VFL_DIR_RX;
  946. }
  947. mutex_init(&video->lock);
  948. spin_lock_init(&video->irqlock);
  949. INIT_LIST_HEAD(&video->irqqueue);
  950. /* Initialize the media entity... */
  951. ret = media_entity_pads_init(&video->video.entity, 1, &video->pad);
  952. if (ret < 0)
  953. return ERR_PTR(ret);
  954. /* ... and the format ... */
  955. rwpf->format.pixelformat = VSP1_VIDEO_DEF_FORMAT;
  956. rwpf->format.width = VSP1_VIDEO_DEF_WIDTH;
  957. rwpf->format.height = VSP1_VIDEO_DEF_HEIGHT;
  958. __vsp1_video_try_format(video, &rwpf->format, &rwpf->fmtinfo);
  959. /* ... and the video node... */
  960. video->video.v4l2_dev = &video->vsp1->v4l2_dev;
  961. video->video.fops = &vsp1_video_fops;
  962. snprintf(video->video.name, sizeof(video->video.name), "%s %s",
  963. rwpf->entity.subdev.name, direction);
  964. video->video.vfl_type = VFL_TYPE_GRABBER;
  965. video->video.release = video_device_release_empty;
  966. video->video.ioctl_ops = &vsp1_video_ioctl_ops;
  967. video_set_drvdata(&video->video, video);
  968. video->queue.type = video->type;
  969. video->queue.io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF;
  970. video->queue.lock = &video->lock;
  971. video->queue.drv_priv = video;
  972. video->queue.buf_struct_size = sizeof(struct vsp1_vb2_buffer);
  973. video->queue.ops = &vsp1_video_queue_qops;
  974. video->queue.mem_ops = &vb2_dma_contig_memops;
  975. video->queue.timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
  976. video->queue.dev = video->vsp1->dev;
  977. ret = vb2_queue_init(&video->queue);
  978. if (ret < 0) {
  979. dev_err(video->vsp1->dev, "failed to initialize vb2 queue\n");
  980. goto error;
  981. }
  982. /* ... and register the video device. */
  983. video->video.queue = &video->queue;
  984. ret = video_register_device(&video->video, VFL_TYPE_GRABBER, -1);
  985. if (ret < 0) {
  986. dev_err(video->vsp1->dev, "failed to register video device\n");
  987. goto error;
  988. }
  989. return video;
  990. error:
  991. vsp1_video_cleanup(video);
  992. return ERR_PTR(ret);
  993. }
  994. void vsp1_video_cleanup(struct vsp1_video *video)
  995. {
  996. if (video_is_registered(&video->video))
  997. video_unregister_device(&video->video);
  998. media_entity_cleanup(&video->video.entity);
  999. }