|
@@ -387,7 +387,7 @@ static void v4l2_m2m_cancel_job(struct v4l2_m2m_ctx *m2m_ctx)
|
|
|
spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
|
|
|
if (m2m_dev->m2m_ops->job_abort)
|
|
|
m2m_dev->m2m_ops->job_abort(m2m_ctx->priv);
|
|
|
- dprintk("m2m_ctx %p running, will wait to complete", m2m_ctx);
|
|
|
+ dprintk("m2m_ctx %p running, will wait to complete\n", m2m_ctx);
|
|
|
wait_event(m2m_ctx->finished,
|
|
|
!(m2m_ctx->job_flags & TRANS_RUNNING));
|
|
|
} else if (m2m_ctx->job_flags & TRANS_QUEUED) {
|
|
@@ -478,8 +478,14 @@ int v4l2_m2m_qbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
|
|
|
int ret;
|
|
|
|
|
|
vq = v4l2_m2m_get_vq(m2m_ctx, buf->type);
|
|
|
+ if (!V4L2_TYPE_IS_OUTPUT(vq->type) &&
|
|
|
+ (buf->flags & V4L2_BUF_FLAG_REQUEST_FD)) {
|
|
|
+ dprintk("%s: requests cannot be used with capture buffers\n",
|
|
|
+ __func__);
|
|
|
+ return -EPERM;
|
|
|
+ }
|
|
|
ret = vb2_qbuf(vq, vdev->v4l2_dev->mdev, buf);
|
|
|
- if (!ret)
|
|
|
+ if (!ret && !(buf->flags & V4L2_BUF_FLAG_IN_REQUEST))
|
|
|
v4l2_m2m_try_schedule(m2m_ctx);
|
|
|
|
|
|
return ret;
|
|
@@ -501,14 +507,9 @@ int v4l2_m2m_prepare_buf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
|
|
|
{
|
|
|
struct video_device *vdev = video_devdata(file);
|
|
|
struct vb2_queue *vq;
|
|
|
- int ret;
|
|
|
|
|
|
vq = v4l2_m2m_get_vq(m2m_ctx, buf->type);
|
|
|
- ret = vb2_prepare_buf(vq, vdev->v4l2_dev->mdev, buf);
|
|
|
- if (!ret)
|
|
|
- v4l2_m2m_try_schedule(m2m_ctx);
|
|
|
-
|
|
|
- return ret;
|
|
|
+ return vb2_prepare_buf(vq, vdev->v4l2_dev->mdev, buf);
|
|
|
}
|
|
|
EXPORT_SYMBOL_GPL(v4l2_m2m_prepare_buf);
|
|
|
|
|
@@ -952,6 +953,52 @@ void v4l2_m2m_buf_queue(struct v4l2_m2m_ctx *m2m_ctx,
|
|
|
}
|
|
|
EXPORT_SYMBOL_GPL(v4l2_m2m_buf_queue);
|
|
|
|
|
|
+void vb2_m2m_request_queue(struct media_request *req)
|
|
|
+{
|
|
|
+ struct media_request_object *obj, *obj_safe;
|
|
|
+ struct v4l2_m2m_ctx *m2m_ctx = NULL;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Queue all objects. Note that buffer objects are at the end of the
|
|
|
+ * objects list, after all other object types. Once buffer objects
|
|
|
+ * are queued, the driver might delete them immediately (if the driver
|
|
|
+ * processes the buffer at once), so we have to use
|
|
|
+ * list_for_each_entry_safe() to handle the case where the object we
|
|
|
+ * queue is deleted.
|
|
|
+ */
|
|
|
+ list_for_each_entry_safe(obj, obj_safe, &req->objects, list) {
|
|
|
+ struct v4l2_m2m_ctx *m2m_ctx_obj;
|
|
|
+ struct vb2_buffer *vb;
|
|
|
+
|
|
|
+ if (!obj->ops->queue)
|
|
|
+ continue;
|
|
|
+
|
|
|
+ if (vb2_request_object_is_buffer(obj)) {
|
|
|
+ /* Sanity checks */
|
|
|
+ vb = container_of(obj, struct vb2_buffer, req_obj);
|
|
|
+ WARN_ON(!V4L2_TYPE_IS_OUTPUT(vb->vb2_queue->type));
|
|
|
+ m2m_ctx_obj = container_of(vb->vb2_queue,
|
|
|
+ struct v4l2_m2m_ctx,
|
|
|
+ out_q_ctx.q);
|
|
|
+ WARN_ON(m2m_ctx && m2m_ctx_obj != m2m_ctx);
|
|
|
+ m2m_ctx = m2m_ctx_obj;
|
|
|
+ }
|
|
|
+
|
|
|
+ /*
|
|
|
+ * The buffer we queue here can in theory be immediately
|
|
|
+ * unbound, hence the use of list_for_each_entry_safe()
|
|
|
+ * above and why we call the queue op last.
|
|
|
+ */
|
|
|
+ obj->ops->queue(obj);
|
|
|
+ }
|
|
|
+
|
|
|
+ WARN_ON(!m2m_ctx);
|
|
|
+
|
|
|
+ if (m2m_ctx)
|
|
|
+ v4l2_m2m_try_schedule(m2m_ctx);
|
|
|
+}
|
|
|
+EXPORT_SYMBOL_GPL(vb2_m2m_request_queue);
|
|
|
+
|
|
|
/* Videobuf2 ioctl helpers */
|
|
|
|
|
|
int v4l2_m2m_ioctl_reqbufs(struct file *file, void *priv,
|