summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2018-10-31 20:53:29 +0300
committerLinus Torvalds <torvalds@linux-foundation.org>2018-10-31 20:53:29 +0300
commitb3491d8430dd25f0a4e00c33d60da22a9bd9d052 (patch)
treea3506967c871971bf9e5920dca5316c5346763df /drivers
parent59fc453b21f767f2fb0ff4dc0a947e9b9c9e6d14 (diff)
parente4183d3256e3cd668e899d06af66da5aac3a51af (diff)
downloadlinux-b3491d8430dd25f0a4e00c33d60da22a9bd9d052.tar.xz
Merge tag 'media/v4.20-2' of git://git.kernel.org/pub/scm/linux/kernel/git/mchehab/linux-media
Pull new experimental media request API from Mauro Carvalho Chehab: "A new media request API This API is needed to support device drivers that can dynamically change their parameters for each new frame. The latest versions of Google camera and codec HAL depends on such feature. At this stage, it supports only stateless codecs. It has been discussed for a long time (at least over the last 3-4 years), and we finally reached to something that seem to work. This series contain both the API and core changes required to support it and a new m2m decoder driver (cedrus). As the current API is still experimental, the only real driver using it (cedrus) was added at staging[1]. We intend to keep it there for a while, in order to test the API. Only when we're sure that this API works for other cases (like encoders), we'll move this driver out of staging and set the API into a stone. [1] We added support for the vivid virtual driver (used only for testing) to it too, as it makes easier to test the API for the ones that don't have the cedrus hardware" * tag 'media/v4.20-2' of git://git.kernel.org/pub/scm/linux/kernel/git/mchehab/linux-media: (53 commits) media: dt-bindings: Document the Rockchip VPU bindings media: platform: Add Cedrus VPU decoder driver media: dt-bindings: media: Document bindings for the Cedrus VPU driver media: v4l: Add definition for the Sunxi tiled NV12 format media: v4l: Add definitions for MPEG-2 slice format and metadata media: videobuf2-core: Rework and rename helper for request buffer count media: v4l2-ctrls.c: initialize an error return code with zero media: v4l2-compat-ioctl32.c: add missing documentation for a field media: media-request: update documentation media: media-request: EPERM -> EACCES/EBUSY media: v4l2-ctrls: improve media_request_(un)lock_for_update media: v4l2-ctrls: use media_request_(un)lock_for_access media: media-request: add media_request_(un)lock_for_access media: vb2: set reqbufs/create_bufs capabilities media: videodev2.h: add new capabilities for buffer types media: buffer.rst: only set V4L2_BUF_FLAG_REQUEST_FD for QBUF media: v4l2-ctrls: return -EACCES if request wasn't completed media: media-request: return -EINVAL for invalid request_fds media: vivid: add request support media: vivid: add mc ...
Diffstat (limited to 'drivers')
-rw-r--r--drivers/media/Makefile3
-rw-r--r--drivers/media/common/videobuf2/videobuf2-core.c260
-rw-r--r--drivers/media/common/videobuf2/videobuf2-v4l2.c528
-rw-r--r--drivers/media/dvb-core/dvb_vb2.c5
-rw-r--r--drivers/media/dvb-frontends/rtl2832_sdr.c5
-rw-r--r--drivers/media/media-device.c24
-rw-r--r--drivers/media/media-request.c501
-rw-r--r--drivers/media/pci/bt8xx/bttv-driver.c2
-rw-r--r--drivers/media/pci/cx23885/cx23885-417.c2
-rw-r--r--drivers/media/pci/cx88/cx88-blackbird.c2
-rw-r--r--drivers/media/pci/cx88/cx88-video.c2
-rw-r--r--drivers/media/pci/saa7134/saa7134-empress.c4
-rw-r--r--drivers/media/pci/saa7134/saa7134-video.c2
-rw-r--r--drivers/media/platform/exynos4-is/fimc-capture.c2
-rw-r--r--drivers/media/platform/omap3isp/ispvideo.c4
-rw-r--r--drivers/media/platform/rcar-vin/rcar-core.c2
-rw-r--r--drivers/media/platform/rcar_drif.c2
-rw-r--r--drivers/media/platform/s3c-camif/camif-capture.c4
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc_dec.c4
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc_enc.c4
-rw-r--r--drivers/media/platform/soc_camera/soc_camera.c7
-rw-r--r--drivers/media/platform/vim2m.c50
-rw-r--r--drivers/media/platform/vivid/vivid-core.c74
-rw-r--r--drivers/media/platform/vivid/vivid-core.h8
-rw-r--r--drivers/media/platform/vivid/vivid-ctrls.c46
-rw-r--r--drivers/media/platform/vivid/vivid-kthread-cap.c12
-rw-r--r--drivers/media/platform/vivid/vivid-kthread-out.c12
-rw-r--r--drivers/media/platform/vivid/vivid-sdr-cap.c16
-rw-r--r--drivers/media/platform/vivid/vivid-vbi-cap.c10
-rw-r--r--drivers/media/platform/vivid/vivid-vbi-out.c10
-rw-r--r--drivers/media/platform/vivid/vivid-vid-cap.c10
-rw-r--r--drivers/media/platform/vivid/vivid-vid-out.c10
-rw-r--r--drivers/media/usb/cpia2/cpia2_v4l.c2
-rw-r--r--drivers/media/usb/cx231xx/cx231xx-417.c2
-rw-r--r--drivers/media/usb/cx231xx/cx231xx-video.c4
-rw-r--r--drivers/media/usb/msi2500/msi2500.c2
-rw-r--r--drivers/media/usb/tm6000/tm6000-video.c2
-rw-r--r--drivers/media/usb/uvc/uvc_queue.c5
-rw-r--r--drivers/media/usb/uvc/uvc_v4l2.c3
-rw-r--r--drivers/media/usb/uvc/uvcvideo.h1
-rw-r--r--drivers/media/v4l2-core/v4l2-compat-ioctl32.c19
-rw-r--r--drivers/media/v4l2-core/v4l2-ctrls.c612
-rw-r--r--drivers/media/v4l2-core/v4l2-dev.c18
-rw-r--r--drivers/media/v4l2-core/v4l2-device.c3
-rw-r--r--drivers/media/v4l2-core/v4l2-ioctl.c50
-rw-r--r--drivers/media/v4l2-core/v4l2-mem2mem.c67
-rw-r--r--drivers/media/v4l2-core/v4l2-subdev.c9
-rw-r--r--drivers/staging/media/Kconfig2
-rw-r--r--drivers/staging/media/Makefile1
-rw-r--r--drivers/staging/media/davinci_vpfe/vpfe_video.c7
-rw-r--r--drivers/staging/media/imx/imx-media-dev.c2
-rw-r--r--drivers/staging/media/imx/imx-media-fim.c2
-rw-r--r--drivers/staging/media/omap4iss/iss_video.c3
-rw-r--r--drivers/staging/media/sunxi/Kconfig15
-rw-r--r--drivers/staging/media/sunxi/Makefile1
-rw-r--r--drivers/staging/media/sunxi/cedrus/Kconfig14
-rw-r--r--drivers/staging/media/sunxi/cedrus/Makefile3
-rw-r--r--drivers/staging/media/sunxi/cedrus/TODO7
-rw-r--r--drivers/staging/media/sunxi/cedrus/cedrus.c431
-rw-r--r--drivers/staging/media/sunxi/cedrus/cedrus.h167
-rw-r--r--drivers/staging/media/sunxi/cedrus/cedrus_dec.c70
-rw-r--r--drivers/staging/media/sunxi/cedrus/cedrus_dec.h27
-rw-r--r--drivers/staging/media/sunxi/cedrus/cedrus_hw.c327
-rw-r--r--drivers/staging/media/sunxi/cedrus/cedrus_hw.h30
-rw-r--r--drivers/staging/media/sunxi/cedrus/cedrus_mpeg2.c246
-rw-r--r--drivers/staging/media/sunxi/cedrus/cedrus_regs.h235
-rw-r--r--drivers/staging/media/sunxi/cedrus/cedrus_video.c542
-rw-r--r--drivers/staging/media/sunxi/cedrus/cedrus_video.h30
-rw-r--r--drivers/usb/gadget/function/uvc_queue.c2
69 files changed, 4245 insertions, 345 deletions
diff --git a/drivers/media/Makefile b/drivers/media/Makefile
index 594b462ddf0e..985d35ec6b29 100644
--- a/drivers/media/Makefile
+++ b/drivers/media/Makefile
@@ -3,7 +3,8 @@
# Makefile for the kernel multimedia device drivers.
#
-media-objs := media-device.o media-devnode.o media-entity.o
+media-objs := media-device.o media-devnode.o media-entity.o \
+ media-request.o
#
# I2C drivers should come before other drivers, otherwise they'll fail
diff --git a/drivers/media/common/videobuf2/videobuf2-core.c b/drivers/media/common/videobuf2/videobuf2-core.c
index d6d22cf77066..975ff5669f72 100644
--- a/drivers/media/common/videobuf2/videobuf2-core.c
+++ b/drivers/media/common/videobuf2/videobuf2-core.c
@@ -356,6 +356,8 @@ static int __vb2_queue_alloc(struct vb2_queue *q, enum vb2_memory memory,
vb->planes[plane].length = plane_sizes[plane];
vb->planes[plane].min_length = plane_sizes[plane];
}
+ call_void_bufop(q, init_buffer, vb);
+
q->bufs[vb->index] = vb;
/* Allocate video buffer memory for the MMAP type */
@@ -497,8 +499,9 @@ static int __vb2_queue_free(struct vb2_queue *q, unsigned int buffers)
pr_info(" buf_init: %u buf_cleanup: %u buf_prepare: %u buf_finish: %u\n",
vb->cnt_buf_init, vb->cnt_buf_cleanup,
vb->cnt_buf_prepare, vb->cnt_buf_finish);
- pr_info(" buf_queue: %u buf_done: %u\n",
- vb->cnt_buf_queue, vb->cnt_buf_done);
+ pr_info(" buf_queue: %u buf_done: %u buf_request_complete: %u\n",
+ vb->cnt_buf_queue, vb->cnt_buf_done,
+ vb->cnt_buf_request_complete);
pr_info(" alloc: %u put: %u prepare: %u finish: %u mmap: %u\n",
vb->cnt_mem_alloc, vb->cnt_mem_put,
vb->cnt_mem_prepare, vb->cnt_mem_finish,
@@ -683,7 +686,7 @@ int vb2_core_reqbufs(struct vb2_queue *q, enum vb2_memory memory,
}
/*
- * Call queue_cancel to clean up any buffers in the PREPARED or
+ * Call queue_cancel to clean up any buffers in the
* QUEUED state which is possible if buffers were prepared or
* queued without ever calling STREAMON.
*/
@@ -930,6 +933,7 @@ void vb2_buffer_done(struct vb2_buffer *vb, enum vb2_buffer_state state)
/* sync buffers */
for (plane = 0; plane < vb->num_planes; ++plane)
call_void_memop(vb, finish, vb->planes[plane].mem_priv);
+ vb->synced = false;
}
spin_lock_irqsave(&q->done_lock, flags);
@@ -942,6 +946,14 @@ void vb2_buffer_done(struct vb2_buffer *vb, enum vb2_buffer_state state)
vb->state = state;
}
atomic_dec(&q->owned_by_drv_count);
+
+ if (vb->req_obj.req) {
+ /* This is not supported at the moment */
+ WARN_ON(state == VB2_BUF_STATE_REQUEUEING);
+ media_request_object_unbind(&vb->req_obj);
+ media_request_object_put(&vb->req_obj);
+ }
+
spin_unlock_irqrestore(&q->done_lock, flags);
trace_vb2_buf_done(q, vb);
@@ -976,20 +988,19 @@ EXPORT_SYMBOL_GPL(vb2_discard_done);
/*
* __prepare_mmap() - prepare an MMAP buffer
*/
-static int __prepare_mmap(struct vb2_buffer *vb, const void *pb)
+static int __prepare_mmap(struct vb2_buffer *vb)
{
int ret = 0;
- if (pb)
- ret = call_bufop(vb->vb2_queue, fill_vb2_buffer,
- vb, pb, vb->planes);
+ ret = call_bufop(vb->vb2_queue, fill_vb2_buffer,
+ vb, vb->planes);
return ret ? ret : call_vb_qop(vb, buf_prepare, vb);
}
/*
* __prepare_userptr() - prepare a USERPTR buffer
*/
-static int __prepare_userptr(struct vb2_buffer *vb, const void *pb)
+static int __prepare_userptr(struct vb2_buffer *vb)
{
struct vb2_plane planes[VB2_MAX_PLANES];
struct vb2_queue *q = vb->vb2_queue;
@@ -1000,12 +1011,10 @@ static int __prepare_userptr(struct vb2_buffer *vb, const void *pb)
memset(planes, 0, sizeof(planes[0]) * vb->num_planes);
/* Copy relevant information provided by the userspace */
- if (pb) {
- ret = call_bufop(vb->vb2_queue, fill_vb2_buffer,
- vb, pb, planes);
- if (ret)
- return ret;
- }
+ ret = call_bufop(vb->vb2_queue, fill_vb2_buffer,
+ vb, planes);
+ if (ret)
+ return ret;
for (plane = 0; plane < vb->num_planes; ++plane) {
/* Skip the plane if already verified */
@@ -1105,7 +1114,7 @@ err:
/*
* __prepare_dmabuf() - prepare a DMABUF buffer
*/
-static int __prepare_dmabuf(struct vb2_buffer *vb, const void *pb)
+static int __prepare_dmabuf(struct vb2_buffer *vb)
{
struct vb2_plane planes[VB2_MAX_PLANES];
struct vb2_queue *q = vb->vb2_queue;
@@ -1116,12 +1125,10 @@ static int __prepare_dmabuf(struct vb2_buffer *vb, const void *pb)
memset(planes, 0, sizeof(planes[0]) * vb->num_planes);
/* Copy relevant information provided by the userspace */
- if (pb) {
- ret = call_bufop(vb->vb2_queue, fill_vb2_buffer,
- vb, pb, planes);
- if (ret)
- return ret;
- }
+ ret = call_bufop(vb->vb2_queue, fill_vb2_buffer,
+ vb, planes);
+ if (ret)
+ return ret;
for (plane = 0; plane < vb->num_planes; ++plane) {
struct dma_buf *dbuf = dma_buf_get(planes[plane].m.fd);
@@ -1250,9 +1257,10 @@ static void __enqueue_in_driver(struct vb2_buffer *vb)
call_void_vb_qop(vb, buf_queue, vb);
}
-static int __buf_prepare(struct vb2_buffer *vb, const void *pb)
+static int __buf_prepare(struct vb2_buffer *vb)
{
struct vb2_queue *q = vb->vb2_queue;
+ enum vb2_buffer_state orig_state = vb->state;
unsigned int plane;
int ret;
@@ -1261,26 +1269,31 @@ static int __buf_prepare(struct vb2_buffer *vb, const void *pb)
return -EIO;
}
+ if (vb->prepared)
+ return 0;
+ WARN_ON(vb->synced);
+
vb->state = VB2_BUF_STATE_PREPARING;
switch (q->memory) {
case VB2_MEMORY_MMAP:
- ret = __prepare_mmap(vb, pb);
+ ret = __prepare_mmap(vb);
break;
case VB2_MEMORY_USERPTR:
- ret = __prepare_userptr(vb, pb);
+ ret = __prepare_userptr(vb);
break;
case VB2_MEMORY_DMABUF:
- ret = __prepare_dmabuf(vb, pb);
+ ret = __prepare_dmabuf(vb);
break;
default:
WARN(1, "Invalid queue type\n");
ret = -EINVAL;
+ break;
}
if (ret) {
dprintk(1, "buffer preparation failed: %d\n", ret);
- vb->state = VB2_BUF_STATE_DEQUEUED;
+ vb->state = orig_state;
return ret;
}
@@ -1288,11 +1301,98 @@ static int __buf_prepare(struct vb2_buffer *vb, const void *pb)
for (plane = 0; plane < vb->num_planes; ++plane)
call_void_memop(vb, prepare, vb->planes[plane].mem_priv);
- vb->state = VB2_BUF_STATE_PREPARED;
+ vb->synced = true;
+ vb->prepared = true;
+ vb->state = orig_state;
return 0;
}
+static int vb2_req_prepare(struct media_request_object *obj)
+{
+ struct vb2_buffer *vb = container_of(obj, struct vb2_buffer, req_obj);
+ int ret;
+
+ if (WARN_ON(vb->state != VB2_BUF_STATE_IN_REQUEST))
+ return -EINVAL;
+
+ mutex_lock(vb->vb2_queue->lock);
+ ret = __buf_prepare(vb);
+ mutex_unlock(vb->vb2_queue->lock);
+ return ret;
+}
+
+static void __vb2_dqbuf(struct vb2_buffer *vb);
+
+static void vb2_req_unprepare(struct media_request_object *obj)
+{
+ struct vb2_buffer *vb = container_of(obj, struct vb2_buffer, req_obj);
+
+ mutex_lock(vb->vb2_queue->lock);
+ __vb2_dqbuf(vb);
+ vb->state = VB2_BUF_STATE_IN_REQUEST;
+ mutex_unlock(vb->vb2_queue->lock);
+ WARN_ON(!vb->req_obj.req);
+}
+
+int vb2_core_qbuf(struct vb2_queue *q, unsigned int index, void *pb,
+ struct media_request *req);
+
+static void vb2_req_queue(struct media_request_object *obj)
+{
+ struct vb2_buffer *vb = container_of(obj, struct vb2_buffer, req_obj);
+
+ mutex_lock(vb->vb2_queue->lock);
+ vb2_core_qbuf(vb->vb2_queue, vb->index, NULL, NULL);
+ mutex_unlock(vb->vb2_queue->lock);
+}
+
+static void vb2_req_unbind(struct media_request_object *obj)
+{
+ struct vb2_buffer *vb = container_of(obj, struct vb2_buffer, req_obj);
+
+ if (vb->state == VB2_BUF_STATE_IN_REQUEST)
+ call_void_bufop(vb->vb2_queue, init_buffer, vb);
+}
+
+static void vb2_req_release(struct media_request_object *obj)
+{
+ struct vb2_buffer *vb = container_of(obj, struct vb2_buffer, req_obj);
+
+ if (vb->state == VB2_BUF_STATE_IN_REQUEST)
+ vb->state = VB2_BUF_STATE_DEQUEUED;
+}
+
+static const struct media_request_object_ops vb2_core_req_ops = {
+ .prepare = vb2_req_prepare,
+ .unprepare = vb2_req_unprepare,
+ .queue = vb2_req_queue,
+ .unbind = vb2_req_unbind,
+ .release = vb2_req_release,
+};
+
+bool vb2_request_object_is_buffer(struct media_request_object *obj)
+{
+ return obj->ops == &vb2_core_req_ops;
+}
+EXPORT_SYMBOL_GPL(vb2_request_object_is_buffer);
+
+unsigned int vb2_request_buffer_cnt(struct media_request *req)
+{
+ struct media_request_object *obj;
+ unsigned long flags;
+ unsigned int buffer_cnt = 0;
+
+ spin_lock_irqsave(&req->lock, flags);
+ list_for_each_entry(obj, &req->objects, list)
+ if (vb2_request_object_is_buffer(obj))
+ buffer_cnt++;
+ spin_unlock_irqrestore(&req->lock, flags);
+
+ return buffer_cnt;
+}
+EXPORT_SYMBOL_GPL(vb2_request_buffer_cnt);
+
int vb2_core_prepare_buf(struct vb2_queue *q, unsigned int index, void *pb)
{
struct vb2_buffer *vb;
@@ -1304,8 +1404,12 @@ int vb2_core_prepare_buf(struct vb2_queue *q, unsigned int index, void *pb)
vb->state);
return -EINVAL;
}
+ if (vb->prepared) {
+ dprintk(1, "buffer already prepared\n");
+ return -EINVAL;
+ }
- ret = __buf_prepare(vb, pb);
+ ret = __buf_prepare(vb);
if (ret)
return ret;
@@ -1314,7 +1418,7 @@ int vb2_core_prepare_buf(struct vb2_queue *q, unsigned int index, void *pb)
dprintk(2, "prepare of buffer %d succeeded\n", vb->index);
- return ret;
+ return 0;
}
EXPORT_SYMBOL_GPL(vb2_core_prepare_buf);
@@ -1381,7 +1485,8 @@ static int vb2_start_streaming(struct vb2_queue *q)
return ret;
}
-int vb2_core_qbuf(struct vb2_queue *q, unsigned int index, void *pb)
+int vb2_core_qbuf(struct vb2_queue *q, unsigned int index, void *pb,
+ struct media_request *req)
{
struct vb2_buffer *vb;
int ret;
@@ -1393,13 +1498,57 @@ int vb2_core_qbuf(struct vb2_queue *q, unsigned int index, void *pb)
vb = q->bufs[index];
- switch (vb->state) {
- case VB2_BUF_STATE_DEQUEUED:
- ret = __buf_prepare(vb, pb);
+ if ((req && q->uses_qbuf) ||
+ (!req && vb->state != VB2_BUF_STATE_IN_REQUEST &&
+ q->uses_requests)) {
+ dprintk(1, "queue in wrong mode (qbuf vs requests)\n");
+ return -EBUSY;
+ }
+
+ if (req) {
+ int ret;
+
+ q->uses_requests = 1;
+ if (vb->state != VB2_BUF_STATE_DEQUEUED) {
+ dprintk(1, "buffer %d not in dequeued state\n",
+ vb->index);
+ return -EINVAL;
+ }
+
+ media_request_object_init(&vb->req_obj);
+
+ /* Make sure the request is in a safe state for updating. */
+ ret = media_request_lock_for_update(req);
if (ret)
return ret;
- break;
- case VB2_BUF_STATE_PREPARED:
+ ret = media_request_object_bind(req, &vb2_core_req_ops,
+ q, true, &vb->req_obj);
+ media_request_unlock_for_update(req);
+ if (ret)
+ return ret;
+
+ vb->state = VB2_BUF_STATE_IN_REQUEST;
+ /* Fill buffer information for the userspace */
+ if (pb) {
+ call_void_bufop(q, copy_timestamp, vb, pb);
+ call_void_bufop(q, fill_user_buffer, vb, pb);
+ }
+
+ dprintk(2, "qbuf of buffer %d succeeded\n", vb->index);
+ return 0;
+ }
+
+ if (vb->state != VB2_BUF_STATE_IN_REQUEST)
+ q->uses_qbuf = 1;
+
+ switch (vb->state) {
+ case VB2_BUF_STATE_DEQUEUED:
+ case VB2_BUF_STATE_IN_REQUEST:
+ if (!vb->prepared) {
+ ret = __buf_prepare(vb);
+ if (ret)
+ return ret;
+ }
break;
case VB2_BUF_STATE_PREPARING:
dprintk(1, "buffer still being prepared\n");
@@ -1600,6 +1749,11 @@ static void __vb2_dqbuf(struct vb2_buffer *vb)
call_void_memop(vb, unmap_dmabuf, vb->planes[i].mem_priv);
vb->planes[i].dbuf_mapped = 0;
}
+ if (vb->req_obj.req) {
+ media_request_object_unbind(&vb->req_obj);
+ media_request_object_put(&vb->req_obj);
+ }
+ call_void_bufop(q, init_buffer, vb);
}
int vb2_core_dqbuf(struct vb2_queue *q, unsigned int *pindex, void *pb,
@@ -1625,6 +1779,7 @@ int vb2_core_dqbuf(struct vb2_queue *q, unsigned int *pindex, void *pb,
}
call_void_vb_qop(vb, buf_finish, vb);
+ vb->prepared = false;
if (pindex)
*pindex = vb->index;
@@ -1688,6 +1843,8 @@ static void __vb2_queue_cancel(struct vb2_queue *q)
q->start_streaming_called = 0;
q->queued_count = 0;
q->error = 0;
+ q->uses_requests = 0;
+ q->uses_qbuf = 0;
/*
* Remove all buffers from videobuf's list...
@@ -1712,19 +1869,38 @@ static void __vb2_queue_cancel(struct vb2_queue *q)
*/
for (i = 0; i < q->num_buffers; ++i) {
struct vb2_buffer *vb = q->bufs[i];
+ struct media_request *req = vb->req_obj.req;
+
+ /*
+ * If a request is associated with this buffer, then
+ * call buf_request_cancel() to give the driver to complete()
+ * related request objects. Otherwise those objects would
+ * never complete.
+ */
+ if (req) {
+ enum media_request_state state;
+ unsigned long flags;
+
+ spin_lock_irqsave(&req->lock, flags);
+ state = req->state;
+ spin_unlock_irqrestore(&req->lock, flags);
- if (vb->state == VB2_BUF_STATE_PREPARED ||
- vb->state == VB2_BUF_STATE_QUEUED) {
+ if (state == MEDIA_REQUEST_STATE_QUEUED)
+ call_void_vb_qop(vb, buf_request_complete, vb);
+ }
+
+ if (vb->synced) {
unsigned int plane;
for (plane = 0; plane < vb->num_planes; ++plane)
call_void_memop(vb, finish,
vb->planes[plane].mem_priv);
+ vb->synced = false;
}
- if (vb->state != VB2_BUF_STATE_DEQUEUED) {
- vb->state = VB2_BUF_STATE_PREPARED;
+ if (vb->prepared) {
call_void_vb_qop(vb, buf_finish, vb);
+ vb->prepared = false;
}
__vb2_dqbuf(vb);
}
@@ -2281,7 +2457,7 @@ static int __vb2_init_fileio(struct vb2_queue *q, int read)
* Queue all buffers.
*/
for (i = 0; i < q->num_buffers; i++) {
- ret = vb2_core_qbuf(q, i, NULL);
+ ret = vb2_core_qbuf(q, i, NULL, NULL);
if (ret)
goto err_reqbufs;
fileio->bufs[i].queued = 1;
@@ -2460,7 +2636,7 @@ static size_t __vb2_perform_fileio(struct vb2_queue *q, char __user *data, size_
if (copy_timestamp)
b->timestamp = ktime_get_ns();
- ret = vb2_core_qbuf(q, index, NULL);
+ ret = vb2_core_qbuf(q, index, NULL, NULL);
dprintk(5, "vb2_dbuf result: %d\n", ret);
if (ret)
return ret;
@@ -2563,7 +2739,7 @@ static int vb2_thread(void *data)
if (copy_timestamp)
vb->timestamp = ktime_get_ns();
if (!threadio->stop)
- ret = vb2_core_qbuf(q, vb->index, NULL);
+ ret = vb2_core_qbuf(q, vb->index, NULL, NULL);
call_void_qop(q, wait_prepare, q);
if (ret || threadio->stop)
break;
diff --git a/drivers/media/common/videobuf2/videobuf2-v4l2.c b/drivers/media/common/videobuf2/videobuf2-v4l2.c
index 886a2d8d5c6c..a17033ab2c22 100644
--- a/drivers/media/common/videobuf2/videobuf2-v4l2.c
+++ b/drivers/media/common/videobuf2/videobuf2-v4l2.c
@@ -25,6 +25,7 @@
#include <linux/kthread.h>
#include <media/v4l2-dev.h>
+#include <media/v4l2-device.h>
#include <media/v4l2-fh.h>
#include <media/v4l2-event.h>
#include <media/v4l2-common.h>
@@ -40,10 +41,12 @@ module_param(debug, int, 0644);
pr_info("vb2-v4l2: %s: " fmt, __func__, ## arg); \
} while (0)
-/* Flags that are set by the vb2 core */
+/* Flags that are set by us */
#define V4L2_BUFFER_MASK_FLAGS (V4L2_BUF_FLAG_MAPPED | V4L2_BUF_FLAG_QUEUED | \
V4L2_BUF_FLAG_DONE | V4L2_BUF_FLAG_ERROR | \
V4L2_BUF_FLAG_PREPARED | \
+ V4L2_BUF_FLAG_IN_REQUEST | \
+ V4L2_BUF_FLAG_REQUEST_FD | \
V4L2_BUF_FLAG_TIMESTAMP_MASK)
/* Output buffer flags that should be passed on to the driver */
#define V4L2_BUFFER_OUT_FLAGS (V4L2_BUF_FLAG_PFRAME | V4L2_BUF_FLAG_BFRAME | \
@@ -118,6 +121,16 @@ static int __verify_length(struct vb2_buffer *vb, const struct v4l2_buffer *b)
return 0;
}
+/*
+ * __init_v4l2_vb2_buffer() - initialize the v4l2_vb2_buffer struct
+ */
+static void __init_v4l2_vb2_buffer(struct vb2_buffer *vb)
+{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+
+ vbuf->request_fd = -1;
+}
+
static void __copy_timestamp(struct vb2_buffer *vb, const void *pb)
{
const struct v4l2_buffer *b = pb;
@@ -154,9 +167,181 @@ static void vb2_warn_zero_bytesused(struct vb2_buffer *vb)
pr_warn("use the actual size instead.\n");
}
-static int vb2_queue_or_prepare_buf(struct vb2_queue *q, struct v4l2_buffer *b,
- const char *opname)
+static int vb2_fill_vb2_v4l2_buffer(struct vb2_buffer *vb, struct v4l2_buffer *b)
{
+ struct vb2_queue *q = vb->vb2_queue;
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct vb2_plane *planes = vbuf->planes;
+ unsigned int plane;
+ int ret;
+
+ ret = __verify_length(vb, b);
+ if (ret < 0) {
+ dprintk(1, "plane parameters verification failed: %d\n", ret);
+ return ret;
+ }
+ if (b->field == V4L2_FIELD_ALTERNATE && q->is_output) {
+ /*
+ * If the format's field is ALTERNATE, then the buffer's field
+ * should be either TOP or BOTTOM, not ALTERNATE since that
+ * makes no sense. The driver has to know whether the
+ * buffer represents a top or a bottom field in order to
+ * program any DMA correctly. Using ALTERNATE is wrong, since
+ * that just says that it is either a top or a bottom field,
+ * but not which of the two it is.
+ */
+ dprintk(1, "the field is incorrectly set to ALTERNATE for an output buffer\n");
+ return -EINVAL;
+ }
+ vbuf->sequence = 0;
+ vbuf->request_fd = -1;
+
+ if (V4L2_TYPE_IS_MULTIPLANAR(b->type)) {
+ switch (b->memory) {
+ case VB2_MEMORY_USERPTR:
+ for (plane = 0; plane < vb->num_planes; ++plane) {
+ planes[plane].m.userptr =
+ b->m.planes[plane].m.userptr;
+ planes[plane].length =
+ b->m.planes[plane].length;
+ }
+ break;
+ case VB2_MEMORY_DMABUF:
+ for (plane = 0; plane < vb->num_planes; ++plane) {
+ planes[plane].m.fd =
+ b->m.planes[plane].m.fd;
+ planes[plane].length =
+ b->m.planes[plane].length;
+ }
+ break;
+ default:
+ for (plane = 0; plane < vb->num_planes; ++plane) {
+ planes[plane].m.offset =
+ vb->planes[plane].m.offset;
+ planes[plane].length =
+ vb->planes[plane].length;
+ }
+ break;
+ }
+
+ /* Fill in driver-provided information for OUTPUT types */
+ if (V4L2_TYPE_IS_OUTPUT(b->type)) {
+ /*
+ * Will have to go up to b->length when API starts
+ * accepting variable number of planes.
+ *
+ * If bytesused == 0 for the output buffer, then fall
+ * back to the full buffer size. In that case
+ * userspace clearly never bothered to set it and
+ * it's a safe assumption that they really meant to
+ * use the full plane sizes.
+ *
+ * Some drivers, e.g. old codec drivers, use bytesused == 0
+ * as a way to indicate that streaming is finished.
+ * In that case, the driver should use the
+ * allow_zero_bytesused flag to keep old userspace
+ * applications working.
+ */
+ for (plane = 0; plane < vb->num_planes; ++plane) {
+ struct vb2_plane *pdst = &planes[plane];
+ struct v4l2_plane *psrc = &b->m.planes[plane];
+
+ if (psrc->bytesused == 0)
+ vb2_warn_zero_bytesused(vb);
+
+ if (vb->vb2_queue->allow_zero_bytesused)
+ pdst->bytesused = psrc->bytesused;
+ else
+ pdst->bytesused = psrc->bytesused ?
+ psrc->bytesused : pdst->length;
+ pdst->data_offset = psrc->data_offset;
+ }
+ }
+ } else {
+ /*
+ * Single-planar buffers do not use planes array,
+ * so fill in relevant v4l2_buffer struct fields instead.
+ * In videobuf we use our internal V4l2_planes struct for
+ * single-planar buffers as well, for simplicity.
+ *
+ * If bytesused == 0 for the output buffer, then fall back
+ * to the full buffer size as that's a sensible default.
+ *
+ * Some drivers, e.g. old codec drivers, use bytesused == 0 as
+ * a way to indicate that streaming is finished. In that case,
+ * the driver should use the allow_zero_bytesused flag to keep
+ * old userspace applications working.
+ */
+ switch (b->memory) {
+ case VB2_MEMORY_USERPTR:
+ planes[0].m.userptr = b->m.userptr;
+ planes[0].length = b->length;
+ break;
+ case VB2_MEMORY_DMABUF:
+ planes[0].m.fd = b->m.fd;
+ planes[0].length = b->length;
+ break;
+ default:
+ planes[0].m.offset = vb->planes[0].m.offset;
+ planes[0].length = vb->planes[0].length;
+ break;
+ }
+
+ planes[0].data_offset = 0;
+ if (V4L2_TYPE_IS_OUTPUT(b->type)) {
+ if (b->bytesused == 0)
+ vb2_warn_zero_bytesused(vb);
+
+ if (vb->vb2_queue->allow_zero_bytesused)
+ planes[0].bytesused = b->bytesused;
+ else
+ planes[0].bytesused = b->bytesused ?
+ b->bytesused : planes[0].length;
+ } else
+ planes[0].bytesused = 0;
+
+ }
+
+ /* Zero flags that we handle */
+ vbuf->flags = b->flags & ~V4L2_BUFFER_MASK_FLAGS;
+ if (!vb->vb2_queue->copy_timestamp || !V4L2_TYPE_IS_OUTPUT(b->type)) {
+ /*
+ * Non-COPY timestamps and non-OUTPUT queues will get
+ * their timestamp and timestamp source flags from the
+ * queue.
+ */
+ vbuf->flags &= ~V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
+ }
+
+ if (V4L2_TYPE_IS_OUTPUT(b->type)) {
+ /*
+ * For output buffers mask out the timecode flag:
+ * this will be handled later in vb2_qbuf().
+ * The 'field' is valid metadata for this output buffer
+ * and so that needs to be copied here.
+ */
+ vbuf->flags &= ~V4L2_BUF_FLAG_TIMECODE;
+ vbuf->field = b->field;
+ } else {
+ /* Zero any output buffer flags as this is a capture buffer */
+ vbuf->flags &= ~V4L2_BUFFER_OUT_FLAGS;
+ /* Zero last flag, this is a signal from driver to userspace */
+ vbuf->flags &= ~V4L2_BUF_FLAG_LAST;
+ }
+
+ return 0;
+}
+
+static int vb2_queue_or_prepare_buf(struct vb2_queue *q, struct media_device *mdev,
+ struct v4l2_buffer *b,
+ const char *opname,
+ struct media_request **p_req)
+{
+ struct media_request *req;
+ struct vb2_v4l2_buffer *vbuf;
+ struct vb2_buffer *vb;
+ int ret;
+
if (b->type != q->type) {
dprintk(1, "%s: invalid buffer type\n", opname);
return -EINVAL;
@@ -178,7 +363,82 @@ static int vb2_queue_or_prepare_buf(struct vb2_queue *q, struct v4l2_buffer *b,
return -EINVAL;
}
- return __verify_planes_array(q->bufs[b->index], b);
+ vb = q->bufs[b->index];
+ vbuf = to_vb2_v4l2_buffer(vb);
+ ret = __verify_planes_array(vb, b);
+ if (ret)
+ return ret;
+
+ if (!vb->prepared) {
+ /* Copy relevant information provided by the userspace */
+ memset(vbuf->planes, 0,
+ sizeof(vbuf->planes[0]) * vb->num_planes);
+ ret = vb2_fill_vb2_v4l2_buffer(vb, b);
+ if (ret)
+ return ret;
+ }
+
+ if (!(b->flags & V4L2_BUF_FLAG_REQUEST_FD)) {
+ if (q->uses_requests) {
+ dprintk(1, "%s: queue uses requests\n", opname);
+ return -EBUSY;
+ }
+ return 0;
+ } else if (!q->supports_requests) {
+ dprintk(1, "%s: queue does not support requests\n", opname);
+ return -EACCES;
+ } else if (q->uses_qbuf) {
+ dprintk(1, "%s: queue does not use requests\n", opname);
+ return -EBUSY;
+ }
+
+ /*
+ * For proper locking when queueing a request you need to be able
+ * to lock access to the vb2 queue, so check that there is a lock
+ * that we can use. In addition p_req must be non-NULL.
+ */
+ if (WARN_ON(!q->lock || !p_req))
+ return -EINVAL;
+
+ /*
+ * Make sure this op is implemented by the driver. It's easy to forget
+ * this callback, but is it important when canceling a buffer in a
+ * queued request.
+ */
+ if (WARN_ON(!q->ops->buf_request_complete))
+ return -EINVAL;
+
+ if (vb->state != VB2_BUF_STATE_DEQUEUED) {
+ dprintk(1, "%s: buffer is not in dequeued state\n", opname);
+ return -EINVAL;
+ }
+
+ if (b->request_fd < 0) {
+ dprintk(1, "%s: request_fd < 0\n", opname);
+ return -EINVAL;
+ }
+
+ req = media_request_get_by_fd(mdev, b->request_fd);
+ if (IS_ERR(req)) {
+ dprintk(1, "%s: invalid request_fd\n", opname);
+ return PTR_ERR(req);
+ }
+
+ /*
+ * Early sanity check. This is checked again when the buffer
+ * is bound to the request in vb2_core_qbuf().
+ */
+ if (req->state != MEDIA_REQUEST_STATE_IDLE &&
+ req->state != MEDIA_REQUEST_STATE_UPDATING) {
+ dprintk(1, "%s: request is not idle\n", opname);
+ media_request_put(req);
+ return -EBUSY;
+ }
+
+ *p_req = req;
+ vbuf->request_fd = b->request_fd;
+
+ return 0;
}
/*
@@ -204,7 +464,7 @@ static void __fill_v4l2_buffer(struct vb2_buffer *vb, void *pb)
b->timecode = vbuf->timecode;
b->sequence = vbuf->sequence;
b->reserved2 = 0;
- b->reserved = 0;
+ b->request_fd = 0;
if (q->is_multiplanar) {
/*
@@ -261,15 +521,15 @@ static void __fill_v4l2_buffer(struct vb2_buffer *vb, void *pb)
case VB2_BUF_STATE_ACTIVE:
b->flags |= V4L2_BUF_FLAG_QUEUED;
break;
+ case VB2_BUF_STATE_IN_REQUEST:
+ b->flags |= V4L2_BUF_FLAG_IN_REQUEST;
+ break;
case VB2_BUF_STATE_ERROR:
b->flags |= V4L2_BUF_FLAG_ERROR;
/* fall through */
case VB2_BUF_STATE_DONE:
b->flags |= V4L2_BUF_FLAG_DONE;
break;
- case VB2_BUF_STATE_PREPARED:
- b->flags |= V4L2_BUF_FLAG_PREPARED;
- break;
case VB2_BUF_STATE_PREPARING:
case VB2_BUF_STATE_DEQUEUED:
case VB2_BUF_STATE_REQUEUEING:
@@ -277,8 +537,17 @@ static void __fill_v4l2_buffer(struct vb2_buffer *vb, void *pb)
break;
}
+ if ((vb->state == VB2_BUF_STATE_DEQUEUED ||
+ vb->state == VB2_BUF_STATE_IN_REQUEST) &&
+ vb->synced && vb->prepared)
+ b->flags |= V4L2_BUF_FLAG_PREPARED;
+
if (vb2_buffer_in_use(q, vb))
b->flags |= V4L2_BUF_FLAG_MAPPED;
+ if (vbuf->request_fd >= 0) {
+ b->flags |= V4L2_BUF_FLAG_REQUEST_FD;
+ b->request_fd = vbuf->request_fd;
+ }
if (!q->is_output &&
b->flags & V4L2_BUF_FLAG_DONE &&
@@ -291,158 +560,28 @@ static void __fill_v4l2_buffer(struct vb2_buffer *vb, void *pb)
* v4l2_buffer by the userspace. It also verifies that struct
* v4l2_buffer has a valid number of planes.
*/
-static int __fill_vb2_buffer(struct vb2_buffer *vb,
- const void *pb, struct vb2_plane *planes)
+static int __fill_vb2_buffer(struct vb2_buffer *vb, struct vb2_plane *planes)
{
- struct vb2_queue *q = vb->vb2_queue;
- const struct v4l2_buffer *b = pb;
struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
unsigned int plane;
- int ret;
-
- ret = __verify_length(vb, b);
- if (ret < 0) {
- dprintk(1, "plane parameters verification failed: %d\n", ret);
- return ret;
- }
- if (b->field == V4L2_FIELD_ALTERNATE && q->is_output) {
- /*
- * If the format's field is ALTERNATE, then the buffer's field
- * should be either TOP or BOTTOM, not ALTERNATE since that
- * makes no sense. The driver has to know whether the
- * buffer represents a top or a bottom field in order to
- * program any DMA correctly. Using ALTERNATE is wrong, since
- * that just says that it is either a top or a bottom field,
- * but not which of the two it is.
- */
- dprintk(1, "the field is incorrectly set to ALTERNATE for an output buffer\n");
- return -EINVAL;
- }
- vb->timestamp = 0;
- vbuf->sequence = 0;
- if (V4L2_TYPE_IS_MULTIPLANAR(b->type)) {
- if (b->memory == VB2_MEMORY_USERPTR) {
- for (plane = 0; plane < vb->num_planes; ++plane) {
- planes[plane].m.userptr =
- b->m.planes[plane].m.userptr;
- planes[plane].length =
- b->m.planes[plane].length;
- }
- }
- if (b->memory == VB2_MEMORY_DMABUF) {
- for (plane = 0; plane < vb->num_planes; ++plane) {
- planes[plane].m.fd =
- b->m.planes[plane].m.fd;
- planes[plane].length =
- b->m.planes[plane].length;
- }
- }
-
- /* Fill in driver-provided information for OUTPUT types */
- if (V4L2_TYPE_IS_OUTPUT(b->type)) {
- /*
- * Will have to go up to b->length when API starts
- * accepting variable number of planes.
- *
- * If bytesused == 0 for the output buffer, then fall
- * back to the full buffer size. In that case
- * userspace clearly never bothered to set it and
- * it's a safe assumption that they really meant to
- * use the full plane sizes.
- *
- * Some drivers, e.g. old codec drivers, use bytesused == 0
- * as a way to indicate that streaming is finished.
- * In that case, the driver should use the
- * allow_zero_bytesused flag to keep old userspace
- * applications working.
- */
- for (plane = 0; plane < vb->num_planes; ++plane) {
- struct vb2_plane *pdst = &planes[plane];
- struct v4l2_plane *psrc = &b->m.planes[plane];
-
- if (psrc->bytesused == 0)
- vb2_warn_zero_bytesused(vb);
-
- if (vb->vb2_queue->allow_zero_bytesused)
- pdst->bytesused = psrc->bytesused;
- else
- pdst->bytesused = psrc->bytesused ?
- psrc->bytesused : pdst->length;
- pdst->data_offset = psrc->data_offset;
- }
- }
- } else {
- /*
- * Single-planar buffers do not use planes array,
- * so fill in relevant v4l2_buffer struct fields instead.
- * In videobuf we use our internal V4l2_planes struct for
- * single-planar buffers as well, for simplicity.
- *
- * If bytesused == 0 for the output buffer, then fall back
- * to the full buffer size as that's a sensible default.
- *
- * Some drivers, e.g. old codec drivers, use bytesused == 0 as
- * a way to indicate that streaming is finished. In that case,
- * the driver should use the allow_zero_bytesused flag to keep
- * old userspace applications working.
- */
- if (b->memory == VB2_MEMORY_USERPTR) {
- planes[0].m.userptr = b->m.userptr;
- planes[0].length = b->length;
- }
+ if (!vb->vb2_queue->is_output || !vb->vb2_queue->copy_timestamp)
+ vb->timestamp = 0;
- if (b->memory == VB2_MEMORY_DMABUF) {
- planes[0].m.fd = b->m.fd;
- planes[0].length = b->length;
+ for (plane = 0; plane < vb->num_planes; ++plane) {
+ if (vb->vb2_queue->memory != VB2_MEMORY_MMAP) {
+ planes[plane].m = vbuf->planes[plane].m;
+ planes[plane].length = vbuf->planes[plane].length;
}
-
- if (V4L2_TYPE_IS_OUTPUT(b->type)) {
- if (b->bytesused == 0)
- vb2_warn_zero_bytesused(vb);
-
- if (vb->vb2_queue->allow_zero_bytesused)
- planes[0].bytesused = b->bytesused;
- else
- planes[0].bytesused = b->bytesused ?
- b->bytesused : planes[0].length;
- } else
- planes[0].bytesused = 0;
-
- }
-
- /* Zero flags that the vb2 core handles */
- vbuf->flags = b->flags & ~V4L2_BUFFER_MASK_FLAGS;
- if (!vb->vb2_queue->copy_timestamp || !V4L2_TYPE_IS_OUTPUT(b->type)) {
- /*
- * Non-COPY timestamps and non-OUTPUT queues will get
- * their timestamp and timestamp source flags from the
- * queue.
- */
- vbuf->flags &= ~V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
+ planes[plane].bytesused = vbuf->planes[plane].bytesused;
+ planes[plane].data_offset = vbuf->planes[plane].data_offset;
}
-
- if (V4L2_TYPE_IS_OUTPUT(b->type)) {
- /*
- * For output buffers mask out the timecode flag:
- * this will be handled later in vb2_qbuf().
- * The 'field' is valid metadata for this output buffer
- * and so that needs to be copied here.
- */
- vbuf->flags &= ~V4L2_BUF_FLAG_TIMECODE;
- vbuf->field = b->field;
- } else {
- /* Zero any output buffer flags as this is a capture buffer */
- vbuf->flags &= ~V4L2_BUFFER_OUT_FLAGS;
- /* Zero last flag, this is a signal from driver to userspace */
- vbuf->flags &= ~V4L2_BUF_FLAG_LAST;
- }
-
return 0;
}
static const struct vb2_buf_ops v4l2_buf_ops = {
.verify_planes_array = __verify_planes_array_core,
+ .init_buffer = __init_v4l2_vb2_buffer,
.fill_user_buffer = __fill_v4l2_buffer,
.fill_vb2_buffer = __fill_vb2_buffer,
.copy_timestamp = __copy_timestamp,
@@ -483,15 +622,30 @@ int vb2_querybuf(struct vb2_queue *q, struct v4l2_buffer *b)
}
EXPORT_SYMBOL(vb2_querybuf);
+static void fill_buf_caps(struct vb2_queue *q, u32 *caps)
+{
+ *caps = 0;
+ if (q->io_modes & VB2_MMAP)
+ *caps |= V4L2_BUF_CAP_SUPPORTS_MMAP;
+ if (q->io_modes & VB2_USERPTR)
+ *caps |= V4L2_BUF_CAP_SUPPORTS_USERPTR;
+ if (q->io_modes & VB2_DMABUF)
+ *caps |= V4L2_BUF_CAP_SUPPORTS_DMABUF;
+ if (q->supports_requests)
+ *caps |= V4L2_BUF_CAP_SUPPORTS_REQUESTS;
+}
+
int vb2_reqbufs(struct vb2_queue *q, struct v4l2_requestbuffers *req)
{
int ret = vb2_verify_memory_type(q, req->memory, req->type);
+ fill_buf_caps(q, &req->capabilities);
return ret ? ret : vb2_core_reqbufs(q, req->memory, &req->count);
}
EXPORT_SYMBOL_GPL(vb2_reqbufs);
-int vb2_prepare_buf(struct vb2_queue *q, struct v4l2_buffer *b)
+int vb2_prepare_buf(struct vb2_queue *q, struct media_device *mdev,
+ struct v4l2_buffer *b)
{
int ret;
@@ -500,7 +654,10 @@ int vb2_prepare_buf(struct vb2_queue *q, struct v4l2_buffer *b)
return -EBUSY;
}
- ret = vb2_queue_or_prepare_buf(q, b, "prepare_buf");
+ if (b->flags & V4L2_BUF_FLAG_REQUEST_FD)
+ return -EINVAL;
+
+ ret = vb2_queue_or_prepare_buf(q, mdev, b, "prepare_buf", NULL);
return ret ? ret : vb2_core_prepare_buf(q, b->index, b);
}
@@ -514,6 +671,7 @@ int vb2_create_bufs(struct vb2_queue *q, struct v4l2_create_buffers *create)
int ret = vb2_verify_memory_type(q, create->memory, f->type);
unsigned i;
+ fill_buf_caps(q, &create->capabilities);
create->index = q->num_buffers;
if (create->count == 0)
return ret != -EBUSY ? ret : 0;
@@ -560,8 +718,10 @@ int vb2_create_bufs(struct vb2_queue *q, struct v4l2_create_buffers *create)
}
EXPORT_SYMBOL_GPL(vb2_create_bufs);
-int vb2_qbuf(struct vb2_queue *q, struct v4l2_buffer *b)
+int vb2_qbuf(struct vb2_queue *q, struct media_device *mdev,
+ struct v4l2_buffer *b)
{
+ struct media_request *req = NULL;
int ret;
if (vb2_fileio_is_active(q)) {
@@ -569,8 +729,13 @@ int vb2_qbuf(struct vb2_queue *q, struct v4l2_buffer *b)
return -EBUSY;
}
- ret = vb2_queue_or_prepare_buf(q, b, "qbuf");
- return ret ? ret : vb2_core_qbuf(q, b->index, b);
+ ret = vb2_queue_or_prepare_buf(q, mdev, b, "qbuf", &req);
+ if (ret)
+ return ret;
+ ret = vb2_core_qbuf(q, b->index, b, req);
+ if (req)
+ media_request_put(req);
+ return ret;
}
EXPORT_SYMBOL_GPL(vb2_qbuf);
@@ -714,6 +879,7 @@ int vb2_ioctl_reqbufs(struct file *file, void *priv,
struct video_device *vdev = video_devdata(file);
int res = vb2_verify_memory_type(vdev->queue, p->memory, p->type);
+ fill_buf_caps(vdev->queue, &p->capabilities);
if (res)
return res;
if (vb2_queue_is_busy(vdev, file))
@@ -735,6 +901,7 @@ int vb2_ioctl_create_bufs(struct file *file, void *priv,
p->format.type);
p->index = vdev->queue->num_buffers;
+ fill_buf_caps(vdev->queue, &p->capabilities);
/*
* If count == 0, then just check if memory and type are valid.
* Any -EBUSY result from vb2_verify_memory_type can be mapped to 0.
@@ -760,7 +927,7 @@ int vb2_ioctl_prepare_buf(struct file *file, void *priv,
if (vb2_queue_is_busy(vdev, file))
return -EBUSY;
- return vb2_prepare_buf(vdev->queue, p);
+ return vb2_prepare_buf(vdev->queue, vdev->v4l2_dev->mdev, p);
}
EXPORT_SYMBOL_GPL(vb2_ioctl_prepare_buf);
@@ -779,7 +946,7 @@ int vb2_ioctl_qbuf(struct file *file, void *priv, struct v4l2_buffer *p)
if (vb2_queue_is_busy(vdev, file))
return -EBUSY;
- return vb2_qbuf(vdev->queue, p);
+ return vb2_qbuf(vdev->queue, vdev->v4l2_dev->mdev, p);
}
EXPORT_SYMBOL_GPL(vb2_ioctl_qbuf);
@@ -961,6 +1128,57 @@ void vb2_ops_wait_finish(struct vb2_queue *vq)
}
EXPORT_SYMBOL_GPL(vb2_ops_wait_finish);
+/*
+ * Note that this function is called during validation time and
+ * thus the req_queue_mutex is held to ensure no request objects
+ * can be added or deleted while validating. So there is no need
+ * to protect the objects list.
+ */
+int vb2_request_validate(struct media_request *req)
+{
+ struct media_request_object *obj;
+ int ret = 0;
+
+ if (!vb2_request_buffer_cnt(req))
+ return -ENOENT;
+
+ list_for_each_entry(obj, &req->objects, list) {
+ if (!obj->ops->prepare)
+ continue;
+
+ ret = obj->ops->prepare(obj);
+ if (ret)
+ break;
+ }
+
+ if (ret) {
+ list_for_each_entry_continue_reverse(obj, &req->objects, list)
+ if (obj->ops->unprepare)
+ obj->ops->unprepare(obj);
+ return ret;
+ }
+ return 0;
+}
+EXPORT_SYMBOL_GPL(vb2_request_validate);
+
+void vb2_request_queue(struct media_request *req)
+{
+ struct media_request_object *obj, *obj_safe;
+
+ /*
+ * Queue all objects. Note that buffer objects are at the end of the
+ * objects list, after all other object types. Once buffer objects
+ * are queued, the driver might delete them immediately (if the driver
+ * processes the buffer at once), so we have to use
+ * list_for_each_entry_safe() to handle the case where the object we
+ * queue is deleted.
+ */
+ list_for_each_entry_safe(obj, obj_safe, &req->objects, list)
+ if (obj->ops->queue)
+ obj->ops->queue(obj);
+}
+EXPORT_SYMBOL_GPL(vb2_request_queue);
+
MODULE_DESCRIPTION("Driver helper framework for Video for Linux 2");
MODULE_AUTHOR("Pawel Osciak <pawel@osciak.com>, Marek Szyprowski");
MODULE_LICENSE("GPL");
diff --git a/drivers/media/dvb-core/dvb_vb2.c b/drivers/media/dvb-core/dvb_vb2.c
index c90b1fd94735..6974f1731529 100644
--- a/drivers/media/dvb-core/dvb_vb2.c
+++ b/drivers/media/dvb-core/dvb_vb2.c
@@ -146,8 +146,7 @@ static void _fill_dmx_buffer(struct vb2_buffer *vb, void *pb)
dprintk(3, "[%s]\n", ctx->name);
}
-static int _fill_vb2_buffer(struct vb2_buffer *vb,
- const void *pb, struct vb2_plane *planes)
+static int _fill_vb2_buffer(struct vb2_buffer *vb, struct vb2_plane *planes)
{
struct dvb_vb2_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
@@ -385,7 +384,7 @@ int dvb_vb2_qbuf(struct dvb_vb2_ctx *ctx, struct dmx_buffer *b)
{
int ret;
- ret = vb2_core_qbuf(&ctx->vb_q, b->index, b);
+ ret = vb2_core_qbuf(&ctx->vb_q, b->index, b, NULL);
if (ret) {
dprintk(1, "[%s] index=%d errno=%d\n", ctx->name,
b->index, ret);
diff --git a/drivers/media/dvb-frontends/rtl2832_sdr.c b/drivers/media/dvb-frontends/rtl2832_sdr.c
index 8ef91b1598e3..d6673f4fb47b 100644
--- a/drivers/media/dvb-frontends/rtl2832_sdr.c
+++ b/drivers/media/dvb-frontends/rtl2832_sdr.c
@@ -1394,7 +1394,8 @@ static int rtl2832_sdr_probe(struct platform_device *pdev)
case RTL2832_SDR_TUNER_E4000:
v4l2_ctrl_handler_init(&dev->hdl, 9);
if (subdev)
- v4l2_ctrl_add_handler(&dev->hdl, subdev->ctrl_handler, NULL);
+ v4l2_ctrl_add_handler(&dev->hdl, subdev->ctrl_handler,
+ NULL, true);
break;
case RTL2832_SDR_TUNER_R820T:
case RTL2832_SDR_TUNER_R828D:
@@ -1423,7 +1424,7 @@ static int rtl2832_sdr_probe(struct platform_device *pdev)
v4l2_ctrl_handler_init(&dev->hdl, 2);
if (subdev)
v4l2_ctrl_add_handler(&dev->hdl, subdev->ctrl_handler,
- NULL);
+ NULL, true);
break;
default:
v4l2_ctrl_handler_init(&dev->hdl, 0);
diff --git a/drivers/media/media-device.c b/drivers/media/media-device.c
index 4c7190db420e..bed24372e61f 100644
--- a/drivers/media/media-device.c
+++ b/drivers/media/media-device.c
@@ -30,6 +30,7 @@
#include <media/media-device.h>
#include <media/media-devnode.h>
#include <media/media-entity.h>
+#include <media/media-request.h>
#ifdef CONFIG_MEDIA_CONTROLLER
@@ -377,10 +378,19 @@ static long media_device_get_topology(struct media_device *mdev, void *arg)
return ret;
}
+static long media_device_request_alloc(struct media_device *mdev,
+ int *alloc_fd)
+{
+ if (!mdev->ops || !mdev->ops->req_validate || !mdev->ops->req_queue)
+ return -ENOTTY;
+
+ return media_request_alloc(mdev, alloc_fd);
+}
+
static long copy_arg_from_user(void *karg, void __user *uarg, unsigned int cmd)
{
- /* All media IOCTLs are _IOWR() */
- if (copy_from_user(karg, uarg, _IOC_SIZE(cmd)))
+ if ((_IOC_DIR(cmd) & _IOC_WRITE) &&
+ copy_from_user(karg, uarg, _IOC_SIZE(cmd)))
return -EFAULT;
return 0;
@@ -388,8 +398,8 @@ static long copy_arg_from_user(void *karg, void __user *uarg, unsigned int cmd)
static long copy_arg_to_user(void __user *uarg, void *karg, unsigned int cmd)
{
- /* All media IOCTLs are _IOWR() */
- if (copy_to_user(uarg, karg, _IOC_SIZE(cmd)))
+ if ((_IOC_DIR(cmd) & _IOC_READ) &&
+ copy_to_user(uarg, karg, _IOC_SIZE(cmd)))
return -EFAULT;
return 0;
@@ -425,6 +435,7 @@ static const struct media_ioctl_info ioctl_info[] = {
MEDIA_IOC(ENUM_LINKS, media_device_enum_links, MEDIA_IOC_FL_GRAPH_MUTEX),
MEDIA_IOC(SETUP_LINK, media_device_setup_link, MEDIA_IOC_FL_GRAPH_MUTEX),
MEDIA_IOC(G_TOPOLOGY, media_device_get_topology, MEDIA_IOC_FL_GRAPH_MUTEX),
+ MEDIA_IOC(REQUEST_ALLOC, media_device_request_alloc, 0),
};
static long media_device_ioctl(struct file *filp, unsigned int cmd,
@@ -691,9 +702,13 @@ void media_device_init(struct media_device *mdev)
INIT_LIST_HEAD(&mdev->pads);
INIT_LIST_HEAD(&mdev->links);
INIT_LIST_HEAD(&mdev->entity_notify);
+
+ mutex_init(&mdev->req_queue_mutex);
mutex_init(&mdev->graph_mutex);
ida_init(&mdev->entity_internal_idx);
+ atomic_set(&mdev->request_id, 0);
+
dev_dbg(mdev->dev, "Media device initialized\n");
}
EXPORT_SYMBOL_GPL(media_device_init);
@@ -704,6 +719,7 @@ void media_device_cleanup(struct media_device *mdev)
mdev->entity_internal_idx_max = 0;
media_graph_walk_cleanup(&mdev->pm_count_walk);
mutex_destroy(&mdev->graph_mutex);
+ mutex_destroy(&mdev->req_queue_mutex);
}
EXPORT_SYMBOL_GPL(media_device_cleanup);
diff --git a/drivers/media/media-request.c b/drivers/media/media-request.c
new file mode 100644
index 000000000000..4e9db1fed697
--- /dev/null
+++ b/drivers/media/media-request.c
@@ -0,0 +1,501 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Media device request objects
+ *
+ * Copyright 2018 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
+ * Copyright (C) 2018 Intel Corporation
+ * Copyright (C) 2018 Google, Inc.
+ *
+ * Author: Hans Verkuil <hans.verkuil@cisco.com>
+ * Author: Sakari Ailus <sakari.ailus@linux.intel.com>
+ */
+
+#include <linux/anon_inodes.h>
+#include <linux/file.h>
+#include <linux/refcount.h>
+
+#include <media/media-device.h>
+#include <media/media-request.h>
+
+static const char * const request_state[] = {
+ [MEDIA_REQUEST_STATE_IDLE] = "idle",
+ [MEDIA_REQUEST_STATE_VALIDATING] = "validating",
+ [MEDIA_REQUEST_STATE_QUEUED] = "queued",
+ [MEDIA_REQUEST_STATE_COMPLETE] = "complete",
+ [MEDIA_REQUEST_STATE_CLEANING] = "cleaning",
+ [MEDIA_REQUEST_STATE_UPDATING] = "updating",
+};
+
+static const char *
+media_request_state_str(enum media_request_state state)
+{
+ BUILD_BUG_ON(ARRAY_SIZE(request_state) != NR_OF_MEDIA_REQUEST_STATE);
+
+ if (WARN_ON(state >= ARRAY_SIZE(request_state)))
+ return "invalid";
+ return request_state[state];
+}
+
+static void media_request_clean(struct media_request *req)
+{
+ struct media_request_object *obj, *obj_safe;
+
+ /* Just a sanity check. No other code path is allowed to change this. */
+ WARN_ON(req->state != MEDIA_REQUEST_STATE_CLEANING);
+ WARN_ON(req->updating_count);
+ WARN_ON(req->access_count);
+
+ list_for_each_entry_safe(obj, obj_safe, &req->objects, list) {
+ media_request_object_unbind(obj);
+ media_request_object_put(obj);
+ }
+
+ req->updating_count = 0;
+ req->access_count = 0;
+ WARN_ON(req->num_incomplete_objects);
+ req->num_incomplete_objects = 0;
+ wake_up_interruptible_all(&req->poll_wait);
+}
+
+static void media_request_release(struct kref *kref)
+{
+ struct media_request *req =
+ container_of(kref, struct media_request, kref);
+ struct media_device *mdev = req->mdev;
+
+ dev_dbg(mdev->dev, "request: release %s\n", req->debug_str);
+
+ /* No other users, no need for a spinlock */
+ req->state = MEDIA_REQUEST_STATE_CLEANING;
+
+ media_request_clean(req);
+
+ if (mdev->ops->req_free)
+ mdev->ops->req_free(req);
+ else
+ kfree(req);
+}
+
+void media_request_put(struct media_request *req)
+{
+ kref_put(&req->kref, media_request_release);
+}
+EXPORT_SYMBOL_GPL(media_request_put);
+
+static int media_request_close(struct inode *inode, struct file *filp)
+{
+ struct media_request *req = filp->private_data;
+
+ media_request_put(req);
+ return 0;
+}
+
+static __poll_t media_request_poll(struct file *filp,
+ struct poll_table_struct *wait)
+{
+ struct media_request *req = filp->private_data;
+ unsigned long flags;
+ __poll_t ret = 0;
+
+ if (!(poll_requested_events(wait) & EPOLLPRI))
+ return 0;
+
+ spin_lock_irqsave(&req->lock, flags);
+ if (req->state == MEDIA_REQUEST_STATE_COMPLETE) {
+ ret = EPOLLPRI;
+ goto unlock;
+ }
+ if (req->state != MEDIA_REQUEST_STATE_QUEUED) {
+ ret = EPOLLERR;
+ goto unlock;
+ }
+
+ poll_wait(filp, &req->poll_wait, wait);
+
+unlock:
+ spin_unlock_irqrestore(&req->lock, flags);
+ return ret;
+}
+
+static long media_request_ioctl_queue(struct media_request *req)
+{
+ struct media_device *mdev = req->mdev;
+ enum media_request_state state;
+ unsigned long flags;
+ int ret;
+
+ dev_dbg(mdev->dev, "request: queue %s\n", req->debug_str);
+
+ /*
+ * Ensure the request that is validated will be the one that gets queued
+ * next by serialising the queueing process. This mutex is also used
+ * to serialize with canceling a vb2 queue and with setting values such
+ * as controls in a request.
+ */
+ mutex_lock(&mdev->req_queue_mutex);
+
+ media_request_get(req);
+
+ spin_lock_irqsave(&req->lock, flags);
+ if (req->state == MEDIA_REQUEST_STATE_IDLE)
+ req->state = MEDIA_REQUEST_STATE_VALIDATING;
+ state = req->state;
+ spin_unlock_irqrestore(&req->lock, flags);
+ if (state != MEDIA_REQUEST_STATE_VALIDATING) {
+ dev_dbg(mdev->dev,
+ "request: unable to queue %s, request in state %s\n",
+ req->debug_str, media_request_state_str(state));
+ media_request_put(req);
+ mutex_unlock(&mdev->req_queue_mutex);
+ return -EBUSY;
+ }
+
+ ret = mdev->ops->req_validate(req);
+
+ /*
+ * If the req_validate was successful, then we mark the state as QUEUED
+ * and call req_queue. The reason we set the state first is that this
+ * allows req_queue to unbind or complete the queued objects in case
+ * they are immediately 'consumed'. State changes from QUEUED to another
+ * state can only happen if either the driver changes the state or if
+ * the user cancels the vb2 queue. The driver can only change the state
+ * after each object is queued through the req_queue op (and note that
+ * that op cannot fail), so setting the state to QUEUED up front is
+ * safe.
+ *
+ * The other reason for changing the state is if the vb2 queue is
+ * canceled, and that uses the req_queue_mutex which is still locked
+ * while req_queue is called, so that's safe as well.
+ */
+ spin_lock_irqsave(&req->lock, flags);
+ req->state = ret ? MEDIA_REQUEST_STATE_IDLE
+ : MEDIA_REQUEST_STATE_QUEUED;
+ spin_unlock_irqrestore(&req->lock, flags);
+
+ if (!ret)
+ mdev->ops->req_queue(req);
+
+ mutex_unlock(&mdev->req_queue_mutex);
+
+ if (ret) {
+ dev_dbg(mdev->dev, "request: can't queue %s (%d)\n",
+ req->debug_str, ret);
+ media_request_put(req);
+ }
+
+ return ret;
+}
+
+static long media_request_ioctl_reinit(struct media_request *req)
+{
+ struct media_device *mdev = req->mdev;
+ unsigned long flags;
+
+ spin_lock_irqsave(&req->lock, flags);
+ if (req->state != MEDIA_REQUEST_STATE_IDLE &&
+ req->state != MEDIA_REQUEST_STATE_COMPLETE) {
+ dev_dbg(mdev->dev,
+ "request: %s not in idle or complete state, cannot reinit\n",
+ req->debug_str);
+ spin_unlock_irqrestore(&req->lock, flags);
+ return -EBUSY;
+ }
+ if (req->access_count) {
+ dev_dbg(mdev->dev,
+ "request: %s is being accessed, cannot reinit\n",
+ req->debug_str);
+ spin_unlock_irqrestore(&req->lock, flags);
+ return -EBUSY;
+ }
+ req->state = MEDIA_REQUEST_STATE_CLEANING;
+ spin_unlock_irqrestore(&req->lock, flags);
+
+ media_request_clean(req);
+
+ spin_lock_irqsave(&req->lock, flags);
+ req->state = MEDIA_REQUEST_STATE_IDLE;
+ spin_unlock_irqrestore(&req->lock, flags);
+
+ return 0;
+}
+
+static long media_request_ioctl(struct file *filp, unsigned int cmd,
+ unsigned long arg)
+{
+ struct media_request *req = filp->private_data;
+
+ switch (cmd) {
+ case MEDIA_REQUEST_IOC_QUEUE:
+ return media_request_ioctl_queue(req);
+ case MEDIA_REQUEST_IOC_REINIT:
+ return media_request_ioctl_reinit(req);
+ default:
+ return -ENOIOCTLCMD;
+ }
+}
+
+static const struct file_operations request_fops = {
+ .owner = THIS_MODULE,
+ .poll = media_request_poll,
+ .unlocked_ioctl = media_request_ioctl,
+ .release = media_request_close,
+};
+
+struct media_request *
+media_request_get_by_fd(struct media_device *mdev, int request_fd)
+{
+ struct file *filp;
+ struct media_request *req;
+
+ if (!mdev || !mdev->ops ||
+ !mdev->ops->req_validate || !mdev->ops->req_queue)
+ return ERR_PTR(-EACCES);
+
+ filp = fget(request_fd);
+ if (!filp)
+ goto err_no_req_fd;
+
+ if (filp->f_op != &request_fops)
+ goto err_fput;
+ req = filp->private_data;
+ if (req->mdev != mdev)
+ goto err_fput;
+
+ /*
+ * Note: as long as someone has an open filehandle of the request,
+ * the request can never be released. The fget() above ensures that
+ * even if userspace closes the request filehandle, the release()
+ * fop won't be called, so the media_request_get() always succeeds
+ * and there is no race condition where the request was released
+ * before media_request_get() is called.
+ */
+ media_request_get(req);
+ fput(filp);
+
+ return req;
+
+err_fput:
+ fput(filp);
+
+err_no_req_fd:
+ dev_dbg(mdev->dev, "cannot find request_fd %d\n", request_fd);
+ return ERR_PTR(-EINVAL);
+}
+EXPORT_SYMBOL_GPL(media_request_get_by_fd);
+
+int media_request_alloc(struct media_device *mdev, int *alloc_fd)
+{
+ struct media_request *req;
+ struct file *filp;
+ int fd;
+ int ret;
+
+ /* Either both are NULL or both are non-NULL */
+ if (WARN_ON(!mdev->ops->req_alloc ^ !mdev->ops->req_free))
+ return -ENOMEM;
+
+ fd = get_unused_fd_flags(O_CLOEXEC);
+ if (fd < 0)
+ return fd;
+
+ filp = anon_inode_getfile("request", &request_fops, NULL, O_CLOEXEC);
+ if (IS_ERR(filp)) {
+ ret = PTR_ERR(filp);
+ goto err_put_fd;
+ }
+
+ if (mdev->ops->req_alloc)
+ req = mdev->ops->req_alloc(mdev);
+ else
+ req = kzalloc(sizeof(*req), GFP_KERNEL);
+ if (!req) {
+ ret = -ENOMEM;
+ goto err_fput;
+ }
+
+ filp->private_data = req;
+ req->mdev = mdev;
+ req->state = MEDIA_REQUEST_STATE_IDLE;
+ req->num_incomplete_objects = 0;
+ kref_init(&req->kref);
+ INIT_LIST_HEAD(&req->objects);
+ spin_lock_init(&req->lock);
+ init_waitqueue_head(&req->poll_wait);
+ req->updating_count = 0;
+ req->access_count = 0;
+
+ *alloc_fd = fd;
+
+ snprintf(req->debug_str, sizeof(req->debug_str), "%u:%d",
+ atomic_inc_return(&mdev->request_id), fd);
+ dev_dbg(mdev->dev, "request: allocated %s\n", req->debug_str);
+
+ fd_install(fd, filp);
+
+ return 0;
+
+err_fput:
+ fput(filp);
+
+err_put_fd:
+ put_unused_fd(fd);
+
+ return ret;
+}
+
+static void media_request_object_release(struct kref *kref)
+{
+ struct media_request_object *obj =
+ container_of(kref, struct media_request_object, kref);
+ struct media_request *req = obj->req;
+
+ if (WARN_ON(req))
+ media_request_object_unbind(obj);
+ obj->ops->release(obj);
+}
+
+struct media_request_object *
+media_request_object_find(struct media_request *req,
+ const struct media_request_object_ops *ops,
+ void *priv)
+{
+ struct media_request_object *obj;
+ struct media_request_object *found = NULL;
+ unsigned long flags;
+
+ if (WARN_ON(!ops || !priv))
+ return NULL;
+
+ spin_lock_irqsave(&req->lock, flags);
+ list_for_each_entry(obj, &req->objects, list) {
+ if (obj->ops == ops && obj->priv == priv) {
+ media_request_object_get(obj);
+ found = obj;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&req->lock, flags);
+ return found;
+}
+EXPORT_SYMBOL_GPL(media_request_object_find);
+
+void media_request_object_put(struct media_request_object *obj)
+{
+ kref_put(&obj->kref, media_request_object_release);
+}
+EXPORT_SYMBOL_GPL(media_request_object_put);
+
+void media_request_object_init(struct media_request_object *obj)
+{
+ obj->ops = NULL;
+ obj->req = NULL;
+ obj->priv = NULL;
+ obj->completed = false;
+ INIT_LIST_HEAD(&obj->list);
+ kref_init(&obj->kref);
+}
+EXPORT_SYMBOL_GPL(media_request_object_init);
+
+int media_request_object_bind(struct media_request *req,
+ const struct media_request_object_ops *ops,
+ void *priv, bool is_buffer,
+ struct media_request_object *obj)
+{
+ unsigned long flags;
+ int ret = -EBUSY;
+
+ if (WARN_ON(!ops->release))
+ return -EACCES;
+
+ spin_lock_irqsave(&req->lock, flags);
+
+ if (WARN_ON(req->state != MEDIA_REQUEST_STATE_UPDATING))
+ goto unlock;
+
+ obj->req = req;
+ obj->ops = ops;
+ obj->priv = priv;
+
+ if (is_buffer)
+ list_add_tail(&obj->list, &req->objects);
+ else
+ list_add(&obj->list, &req->objects);
+ req->num_incomplete_objects++;
+ ret = 0;
+
+unlock:
+ spin_unlock_irqrestore(&req->lock, flags);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(media_request_object_bind);
+
+void media_request_object_unbind(struct media_request_object *obj)
+{
+ struct media_request *req = obj->req;
+ unsigned long flags;
+ bool completed = false;
+
+ if (WARN_ON(!req))
+ return;
+
+ spin_lock_irqsave(&req->lock, flags);
+ list_del(&obj->list);
+ obj->req = NULL;
+
+ if (req->state == MEDIA_REQUEST_STATE_COMPLETE)
+ goto unlock;
+
+ if (WARN_ON(req->state == MEDIA_REQUEST_STATE_VALIDATING))
+ goto unlock;
+
+ if (req->state == MEDIA_REQUEST_STATE_CLEANING) {
+ if (!obj->completed)
+ req->num_incomplete_objects--;
+ goto unlock;
+ }
+
+ if (WARN_ON(!req->num_incomplete_objects))
+ goto unlock;
+
+ req->num_incomplete_objects--;
+ if (req->state == MEDIA_REQUEST_STATE_QUEUED &&
+ !req->num_incomplete_objects) {
+ req->state = MEDIA_REQUEST_STATE_COMPLETE;
+ completed = true;
+ wake_up_interruptible_all(&req->poll_wait);
+ }
+
+unlock:
+ spin_unlock_irqrestore(&req->lock, flags);
+ if (obj->ops->unbind)
+ obj->ops->unbind(obj);
+ if (completed)
+ media_request_put(req);
+}
+EXPORT_SYMBOL_GPL(media_request_object_unbind);
+
+void media_request_object_complete(struct media_request_object *obj)
+{
+ struct media_request *req = obj->req;
+ unsigned long flags;
+ bool completed = false;
+
+ spin_lock_irqsave(&req->lock, flags);
+ if (obj->completed)
+ goto unlock;
+ obj->completed = true;
+ if (WARN_ON(!req->num_incomplete_objects) ||
+ WARN_ON(req->state != MEDIA_REQUEST_STATE_QUEUED))
+ goto unlock;
+
+ if (!--req->num_incomplete_objects) {
+ req->state = MEDIA_REQUEST_STATE_COMPLETE;
+ wake_up_interruptible_all(&req->poll_wait);
+ completed = true;
+ }
+unlock:
+ spin_unlock_irqrestore(&req->lock, flags);
+ if (completed)
+ media_request_put(req);
+}
+EXPORT_SYMBOL_GPL(media_request_object_complete);
diff --git a/drivers/media/pci/bt8xx/bttv-driver.c b/drivers/media/pci/bt8xx/bttv-driver.c
index b2cfcbb0008e..d4906c04dc6e 100644
--- a/drivers/media/pci/bt8xx/bttv-driver.c
+++ b/drivers/media/pci/bt8xx/bttv-driver.c
@@ -4210,7 +4210,7 @@ static int bttv_probe(struct pci_dev *dev, const struct pci_device_id *pci_id)
/* register video4linux + input */
if (!bttv_tvcards[btv->c.type].no_video) {
v4l2_ctrl_add_handler(&btv->radio_ctrl_handler, hdl,
- v4l2_ctrl_radio_filter);
+ v4l2_ctrl_radio_filter, false);
if (btv->radio_ctrl_handler.error) {
result = btv->radio_ctrl_handler.error;
goto fail2;
diff --git a/drivers/media/pci/cx23885/cx23885-417.c b/drivers/media/pci/cx23885/cx23885-417.c
index 3083434bb636..a00b77d80ed9 100644
--- a/drivers/media/pci/cx23885/cx23885-417.c
+++ b/drivers/media/pci/cx23885/cx23885-417.c
@@ -1527,7 +1527,7 @@ int cx23885_417_register(struct cx23885_dev *dev)
dev->cxhdl.priv = dev;
dev->cxhdl.func = cx23885_api_func;
cx2341x_handler_set_50hz(&dev->cxhdl, tsport->height == 576);
- v4l2_ctrl_add_handler(&dev->ctrl_handler, &dev->cxhdl.hdl, NULL);
+ v4l2_ctrl_add_handler(&dev->ctrl_handler, &dev->cxhdl.hdl, NULL, false);
/* Allocate and initialize V4L video device */
dev->v4l_device = cx23885_video_dev_alloc(tsport,
diff --git a/drivers/media/pci/cx88/cx88-blackbird.c b/drivers/media/pci/cx88/cx88-blackbird.c
index 199756547f03..6c0bb9fe4a31 100644
--- a/drivers/media/pci/cx88/cx88-blackbird.c
+++ b/drivers/media/pci/cx88/cx88-blackbird.c
@@ -1183,7 +1183,7 @@ static int cx8802_blackbird_probe(struct cx8802_driver *drv)
err = cx2341x_handler_init(&dev->cxhdl, 36);
if (err)
goto fail_core;
- v4l2_ctrl_add_handler(&dev->cxhdl.hdl, &core->video_hdl, NULL);
+ v4l2_ctrl_add_handler(&dev->cxhdl.hdl, &core->video_hdl, NULL, false);
/* blackbird stuff */
pr_info("cx23416 based mpeg encoder (blackbird reference design)\n");
diff --git a/drivers/media/pci/cx88/cx88-video.c b/drivers/media/pci/cx88/cx88-video.c
index df4e7a0686e0..e1549d352f70 100644
--- a/drivers/media/pci/cx88/cx88-video.c
+++ b/drivers/media/pci/cx88/cx88-video.c
@@ -1378,7 +1378,7 @@ static int cx8800_initdev(struct pci_dev *pci_dev,
if (vc->id == V4L2_CID_CHROMA_AGC)
core->chroma_agc = vc;
}
- v4l2_ctrl_add_handler(&core->video_hdl, &core->audio_hdl, NULL);
+ v4l2_ctrl_add_handler(&core->video_hdl, &core->audio_hdl, NULL, false);
/* load and configure helper modules */
diff --git a/drivers/media/pci/saa7134/saa7134-empress.c b/drivers/media/pci/saa7134/saa7134-empress.c
index 747a082229dc..9735bbb908e3 100644
--- a/drivers/media/pci/saa7134/saa7134-empress.c
+++ b/drivers/media/pci/saa7134/saa7134-empress.c
@@ -265,9 +265,9 @@ static int empress_init(struct saa7134_dev *dev)
"%s empress (%s)", dev->name,
saa7134_boards[dev->board].name);
v4l2_ctrl_handler_init(hdl, 21);
- v4l2_ctrl_add_handler(hdl, &dev->ctrl_handler, empress_ctrl_filter);
+ v4l2_ctrl_add_handler(hdl, &dev->ctrl_handler, empress_ctrl_filter, false);
if (dev->empress_sd)
- v4l2_ctrl_add_handler(hdl, dev->empress_sd->ctrl_handler, NULL);
+ v4l2_ctrl_add_handler(hdl, dev->empress_sd->ctrl_handler, NULL, true);
if (hdl->error) {
video_device_release(dev->empress_dev);
return hdl->error;
diff --git a/drivers/media/pci/saa7134/saa7134-video.c b/drivers/media/pci/saa7134/saa7134-video.c
index 1a22ae7cbdd9..8f28741ebb35 100644
--- a/drivers/media/pci/saa7134/saa7134-video.c
+++ b/drivers/media/pci/saa7134/saa7134-video.c
@@ -2137,7 +2137,7 @@ int saa7134_video_init1(struct saa7134_dev *dev)
hdl = &dev->radio_ctrl_handler;
v4l2_ctrl_handler_init(hdl, 2);
v4l2_ctrl_add_handler(hdl, &dev->ctrl_handler,
- v4l2_ctrl_radio_filter);
+ v4l2_ctrl_radio_filter, false);
if (hdl->error)
return hdl->error;
}
diff --git a/drivers/media/platform/exynos4-is/fimc-capture.c b/drivers/media/platform/exynos4-is/fimc-capture.c
index f56220e549bb..3e9fcf4f8a13 100644
--- a/drivers/media/platform/exynos4-is/fimc-capture.c
+++ b/drivers/media/platform/exynos4-is/fimc-capture.c
@@ -1424,7 +1424,7 @@ static int fimc_link_setup(struct media_entity *entity,
return 0;
return v4l2_ctrl_add_handler(&vc->ctx->ctrls.handler,
- sensor->ctrl_handler, NULL);
+ sensor->ctrl_handler, NULL, true);
}
static const struct media_entity_operations fimc_sd_media_ops = {
diff --git a/drivers/media/platform/omap3isp/ispvideo.c b/drivers/media/platform/omap3isp/ispvideo.c
index 5658f6a326f7..078d64114b24 100644
--- a/drivers/media/platform/omap3isp/ispvideo.c
+++ b/drivers/media/platform/omap3isp/ispvideo.c
@@ -940,7 +940,7 @@ isp_video_qbuf(struct file *file, void *fh, struct v4l2_buffer *b)
int ret;
mutex_lock(&video->queue_lock);
- ret = vb2_qbuf(&vfh->queue, b);
+ ret = vb2_qbuf(&vfh->queue, video->video.v4l2_dev->mdev, b);
mutex_unlock(&video->queue_lock);
return ret;
@@ -1028,7 +1028,7 @@ static int isp_video_check_external_subdevs(struct isp_video *video,
ctrls.count = 1;
ctrls.controls = &ctrl;
- ret = v4l2_g_ext_ctrls(pipe->external->ctrl_handler, &ctrls);
+ ret = v4l2_g_ext_ctrls(pipe->external->ctrl_handler, NULL, &ctrls);
if (ret < 0) {
dev_warn(isp->dev, "no pixel rate control in subdev %s\n",
pipe->external->name);
diff --git a/drivers/media/platform/rcar-vin/rcar-core.c b/drivers/media/platform/rcar-vin/rcar-core.c
index a3f135364474..f476b2f1eb35 100644
--- a/drivers/media/platform/rcar-vin/rcar-core.c
+++ b/drivers/media/platform/rcar-vin/rcar-core.c
@@ -475,7 +475,7 @@ static int rvin_parallel_subdevice_attach(struct rvin_dev *vin,
return ret;
ret = v4l2_ctrl_add_handler(&vin->ctrl_handler, subdev->ctrl_handler,
- NULL);
+ NULL, true);
if (ret < 0) {
v4l2_ctrl_handler_free(&vin->ctrl_handler);
return ret;
diff --git a/drivers/media/platform/rcar_drif.c b/drivers/media/platform/rcar_drif.c
index 8483dc36715d..c417ff8f6fe5 100644
--- a/drivers/media/platform/rcar_drif.c
+++ b/drivers/media/platform/rcar_drif.c
@@ -1164,7 +1164,7 @@ static int rcar_drif_notify_complete(struct v4l2_async_notifier *notifier)
}
ret = v4l2_ctrl_add_handler(&sdr->ctrl_hdl,
- sdr->ep.subdev->ctrl_handler, NULL);
+ sdr->ep.subdev->ctrl_handler, NULL, true);
if (ret) {
rdrif_err(sdr, "failed: ctrl add hdlr ret %d\n", ret);
goto error;
diff --git a/drivers/media/platform/s3c-camif/camif-capture.c b/drivers/media/platform/s3c-camif/camif-capture.c
index 23b008d1a47b..c3fc94ef251e 100644
--- a/drivers/media/platform/s3c-camif/camif-capture.c
+++ b/drivers/media/platform/s3c-camif/camif-capture.c
@@ -943,7 +943,7 @@ static int s3c_camif_qbuf(struct file *file, void *priv,
if (vp->owner && vp->owner != priv)
return -EBUSY;
- return vb2_qbuf(&vp->vb_queue, buf);
+ return vb2_qbuf(&vp->vb_queue, vp->vdev.v4l2_dev->mdev, buf);
}
static int s3c_camif_dqbuf(struct file *file, void *priv,
@@ -981,7 +981,7 @@ static int s3c_camif_prepare_buf(struct file *file, void *priv,
struct v4l2_buffer *b)
{
struct camif_vp *vp = video_drvdata(file);
- return vb2_prepare_buf(&vp->vb_queue, b);
+ return vb2_prepare_buf(&vp->vb_queue, vp->vdev.v4l2_dev->mdev, b);
}
static int s3c_camif_g_selection(struct file *file, void *priv,
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_dec.c b/drivers/media/platform/s5p-mfc/s5p_mfc_dec.c
index 670ca869babb..ece59ce1b149 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc_dec.c
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_dec.c
@@ -632,9 +632,9 @@ static int vidioc_qbuf(struct file *file, void *priv, struct v4l2_buffer *buf)
return -EIO;
}
if (buf->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
- return vb2_qbuf(&ctx->vq_src, buf);
+ return vb2_qbuf(&ctx->vq_src, NULL, buf);
else if (buf->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
- return vb2_qbuf(&ctx->vq_dst, buf);
+ return vb2_qbuf(&ctx->vq_dst, NULL, buf);
return -EINVAL;
}
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_enc.c b/drivers/media/platform/s5p-mfc/s5p_mfc_enc.c
index 7037d48bdc2c..8fcf627dedfb 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc_enc.c
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_enc.c
@@ -1621,9 +1621,9 @@ static int vidioc_qbuf(struct file *file, void *priv, struct v4l2_buffer *buf)
mfc_err("Call on QBUF after EOS command\n");
return -EIO;
}
- return vb2_qbuf(&ctx->vq_src, buf);
+ return vb2_qbuf(&ctx->vq_src, NULL, buf);
} else if (buf->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
- return vb2_qbuf(&ctx->vq_dst, buf);
+ return vb2_qbuf(&ctx->vq_dst, NULL, buf);
}
return -EINVAL;
}
diff --git a/drivers/media/platform/soc_camera/soc_camera.c b/drivers/media/platform/soc_camera/soc_camera.c
index 0a70fb67c401..21034339cdcb 100644
--- a/drivers/media/platform/soc_camera/soc_camera.c
+++ b/drivers/media/platform/soc_camera/soc_camera.c
@@ -394,7 +394,7 @@ static int soc_camera_qbuf(struct file *file, void *priv,
if (icd->streamer != file)
return -EBUSY;
- return vb2_qbuf(&icd->vb2_vidq, p);
+ return vb2_qbuf(&icd->vb2_vidq, NULL, p);
}
static int soc_camera_dqbuf(struct file *file, void *priv,
@@ -430,7 +430,7 @@ static int soc_camera_prepare_buf(struct file *file, void *priv,
{
struct soc_camera_device *icd = file->private_data;
- return vb2_prepare_buf(&icd->vb2_vidq, b);
+ return vb2_prepare_buf(&icd->vb2_vidq, NULL, b);
}
static int soc_camera_expbuf(struct file *file, void *priv,
@@ -1181,7 +1181,8 @@ static int soc_camera_probe_finish(struct soc_camera_device *icd)
v4l2_subdev_call(sd, video, g_tvnorms, &icd->vdev->tvnorms);
- ret = v4l2_ctrl_add_handler(&icd->ctrl_handler, sd->ctrl_handler, NULL);
+ ret = v4l2_ctrl_add_handler(&icd->ctrl_handler, sd->ctrl_handler,
+ NULL, true);
if (ret < 0)
return ret;
diff --git a/drivers/media/platform/vim2m.c b/drivers/media/platform/vim2m.c
index 60c522ee2e03..af150a0395df 100644
--- a/drivers/media/platform/vim2m.c
+++ b/drivers/media/platform/vim2m.c
@@ -3,7 +3,8 @@
*
* This is a virtual device driver for testing mem-to-mem videobuf framework.
* It simulates a device that uses memory buffers for both source and
- * destination, processes the data and issues an "irq" (simulated by a timer).
+ * destination, processes the data and issues an "irq" (simulated by a delayed
+ * workqueue).
* The device is capable of multi-instance, multi-buffer-per-transaction
* operation (via the mem2mem framework).
*
@@ -19,7 +20,6 @@
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/fs.h>
-#include <linux/timer.h>
#include <linux/sched.h>
#include <linux/slab.h>
@@ -148,7 +148,7 @@ struct vim2m_dev {
struct mutex dev_mutex;
spinlock_t irqlock;
- struct timer_list timer;
+ struct delayed_work work_run;
struct v4l2_m2m_dev *m2m_dev;
};
@@ -336,12 +336,6 @@ static int device_process(struct vim2m_ctx *ctx,
return 0;
}
-static void schedule_irq(struct vim2m_dev *dev, int msec_timeout)
-{
- dprintk(dev, "Scheduling a simulated irq\n");
- mod_timer(&dev->timer, jiffies + msecs_to_jiffies(msec_timeout));
-}
-
/*
* mem2mem callbacks
*/
@@ -385,15 +379,24 @@ static void device_run(void *priv)
src_buf = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
dst_buf = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx);
+ /* Apply request controls if any */
+ v4l2_ctrl_request_setup(src_buf->vb2_buf.req_obj.req,
+ &ctx->hdl);
+
device_process(ctx, src_buf, dst_buf);
- /* Run a timer, which simulates a hardware irq */
- schedule_irq(dev, ctx->transtime);
+ /* Complete request controls if any */
+ v4l2_ctrl_request_complete(src_buf->vb2_buf.req_obj.req,
+ &ctx->hdl);
+
+ /* Run delayed work, which simulates a hardware irq */
+ schedule_delayed_work(&dev->work_run, msecs_to_jiffies(ctx->transtime));
}
-static void device_isr(struct timer_list *t)
+static void device_work(struct work_struct *w)
{
- struct vim2m_dev *vim2m_dev = from_timer(vim2m_dev, t, timer);
+ struct vim2m_dev *vim2m_dev =
+ container_of(w, struct vim2m_dev, work_run.work);
struct vim2m_ctx *curr_ctx;
struct vb2_v4l2_buffer *src_vb, *dst_vb;
unsigned long flags;
@@ -805,6 +808,7 @@ static void vim2m_stop_streaming(struct vb2_queue *q)
struct vb2_v4l2_buffer *vbuf;
unsigned long flags;
+ flush_scheduled_work();
for (;;) {
if (V4L2_TYPE_IS_OUTPUT(q->type))
vbuf = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
@@ -812,12 +816,21 @@ static void vim2m_stop_streaming(struct vb2_queue *q)
vbuf = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);
if (vbuf == NULL)
return;
+ v4l2_ctrl_request_complete(vbuf->vb2_buf.req_obj.req,
+ &ctx->hdl);
spin_lock_irqsave(&ctx->dev->irqlock, flags);
v4l2_m2m_buf_done(vbuf, VB2_BUF_STATE_ERROR);
spin_unlock_irqrestore(&ctx->dev->irqlock, flags);
}
}
+static void vim2m_buf_request_complete(struct vb2_buffer *vb)
+{
+ struct vim2m_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
+
+ v4l2_ctrl_request_complete(vb->req_obj.req, &ctx->hdl);
+}
+
static const struct vb2_ops vim2m_qops = {
.queue_setup = vim2m_queue_setup,
.buf_prepare = vim2m_buf_prepare,
@@ -826,6 +839,7 @@ static const struct vb2_ops vim2m_qops = {
.stop_streaming = vim2m_stop_streaming,
.wait_prepare = vb2_ops_wait_prepare,
.wait_finish = vb2_ops_wait_finish,
+ .buf_request_complete = vim2m_buf_request_complete,
};
static int queue_init(void *priv, struct vb2_queue *src_vq, struct vb2_queue *dst_vq)
@@ -841,6 +855,7 @@ static int queue_init(void *priv, struct vb2_queue *src_vq, struct vb2_queue *ds
src_vq->mem_ops = &vb2_vmalloc_memops;
src_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
src_vq->lock = &ctx->dev->dev_mutex;
+ src_vq->supports_requests = true;
ret = vb2_queue_init(src_vq);
if (ret)
@@ -992,6 +1007,11 @@ static const struct v4l2_m2m_ops m2m_ops = {
.job_abort = job_abort,
};
+static const struct media_device_ops m2m_media_ops = {
+ .req_validate = vb2_request_validate,
+ .req_queue = vb2_m2m_request_queue,
+};
+
static int vim2m_probe(struct platform_device *pdev)
{
struct vim2m_dev *dev;
@@ -1015,6 +1035,7 @@ static int vim2m_probe(struct platform_device *pdev)
vfd = &dev->vfd;
vfd->lock = &dev->dev_mutex;
vfd->v4l2_dev = &dev->v4l2_dev;
+ INIT_DELAYED_WORK(&dev->work_run, device_work);
ret = video_register_device(vfd, VFL_TYPE_GRABBER, 0);
if (ret) {
@@ -1026,7 +1047,6 @@ static int vim2m_probe(struct platform_device *pdev)
v4l2_info(&dev->v4l2_dev,
"Device registered as /dev/video%d\n", vfd->num);
- timer_setup(&dev->timer, device_isr, 0);
platform_set_drvdata(pdev, dev);
dev->m2m_dev = v4l2_m2m_init(&m2m_ops);
@@ -1040,6 +1060,7 @@ static int vim2m_probe(struct platform_device *pdev)
dev->mdev.dev = &pdev->dev;
strscpy(dev->mdev.model, "vim2m", sizeof(dev->mdev.model));
media_device_init(&dev->mdev);
+ dev->mdev.ops = &m2m_media_ops;
dev->v4l2_dev.mdev = &dev->mdev;
ret = v4l2_m2m_register_media_controller(dev->m2m_dev,
@@ -1083,7 +1104,6 @@ static int vim2m_remove(struct platform_device *pdev)
media_device_cleanup(&dev->mdev);
#endif
v4l2_m2m_release(dev->m2m_dev);
- del_timer_sync(&dev->timer);
video_unregister_device(&dev->vfd);
v4l2_device_unregister(&dev->v4l2_dev);
diff --git a/drivers/media/platform/vivid/vivid-core.c b/drivers/media/platform/vivid/vivid-core.c
index 06961e7d8036..626e2b24a403 100644
--- a/drivers/media/platform/vivid/vivid-core.c
+++ b/drivers/media/platform/vivid/vivid-core.c
@@ -627,6 +627,13 @@ static void vivid_dev_release(struct v4l2_device *v4l2_dev)
kfree(dev);
}
+#ifdef CONFIG_MEDIA_CONTROLLER
+static const struct media_device_ops vivid_media_ops = {
+ .req_validate = vb2_request_validate,
+ .req_queue = vb2_request_queue,
+};
+#endif
+
static int vivid_create_instance(struct platform_device *pdev, int inst)
{
static const struct v4l2_dv_timings def_dv_timings =
@@ -657,6 +664,16 @@ static int vivid_create_instance(struct platform_device *pdev, int inst)
dev->inst = inst;
+#ifdef CONFIG_MEDIA_CONTROLLER
+ dev->v4l2_dev.mdev = &dev->mdev;
+
+ /* Initialize media device */
+ strlcpy(dev->mdev.model, VIVID_MODULE_NAME, sizeof(dev->mdev.model));
+ dev->mdev.dev = &pdev->dev;
+ media_device_init(&dev->mdev);
+ dev->mdev.ops = &vivid_media_ops;
+#endif
+
/* register v4l2_device */
snprintf(dev->v4l2_dev.name, sizeof(dev->v4l2_dev.name),
"%s-%03d", VIVID_MODULE_NAME, inst);
@@ -1060,6 +1077,7 @@ static int vivid_create_instance(struct platform_device *pdev, int inst)
q->min_buffers_needed = 2;
q->lock = &dev->mutex;
q->dev = dev->v4l2_dev.dev;
+ q->supports_requests = true;
ret = vb2_queue_init(q);
if (ret)
@@ -1080,6 +1098,7 @@ static int vivid_create_instance(struct platform_device *pdev, int inst)
q->min_buffers_needed = 2;
q->lock = &dev->mutex;
q->dev = dev->v4l2_dev.dev;
+ q->supports_requests = true;
ret = vb2_queue_init(q);
if (ret)
@@ -1100,6 +1119,7 @@ static int vivid_create_instance(struct platform_device *pdev, int inst)
q->min_buffers_needed = 2;
q->lock = &dev->mutex;
q->dev = dev->v4l2_dev.dev;
+ q->supports_requests = true;
ret = vb2_queue_init(q);
if (ret)
@@ -1120,6 +1140,7 @@ static int vivid_create_instance(struct platform_device *pdev, int inst)
q->min_buffers_needed = 2;
q->lock = &dev->mutex;
q->dev = dev->v4l2_dev.dev;
+ q->supports_requests = true;
ret = vb2_queue_init(q);
if (ret)
@@ -1139,6 +1160,7 @@ static int vivid_create_instance(struct platform_device *pdev, int inst)
q->min_buffers_needed = 8;
q->lock = &dev->mutex;
q->dev = dev->v4l2_dev.dev;
+ q->supports_requests = true;
ret = vb2_queue_init(q);
if (ret)
@@ -1174,6 +1196,13 @@ static int vivid_create_instance(struct platform_device *pdev, int inst)
vfd->lock = &dev->mutex;
video_set_drvdata(vfd, dev);
+#ifdef CONFIG_MEDIA_CONTROLLER
+ dev->vid_cap_pad.flags = MEDIA_PAD_FL_SINK;
+ ret = media_entity_pads_init(&vfd->entity, 1, &dev->vid_cap_pad);
+ if (ret)
+ goto unreg_dev;
+#endif
+
#ifdef CONFIG_VIDEO_VIVID_CEC
if (in_type_counter[HDMI]) {
struct cec_adapter *adap;
@@ -1226,6 +1255,13 @@ static int vivid_create_instance(struct platform_device *pdev, int inst)
vfd->lock = &dev->mutex;
video_set_drvdata(vfd, dev);
+#ifdef CONFIG_MEDIA_CONTROLLER
+ dev->vid_out_pad.flags = MEDIA_PAD_FL_SOURCE;
+ ret = media_entity_pads_init(&vfd->entity, 1, &dev->vid_out_pad);
+ if (ret)
+ goto unreg_dev;
+#endif
+
#ifdef CONFIG_VIDEO_VIVID_CEC
for (i = 0; i < dev->num_outputs; i++) {
struct cec_adapter *adap;
@@ -1275,6 +1311,13 @@ static int vivid_create_instance(struct platform_device *pdev, int inst)
vfd->tvnorms = tvnorms_cap;
video_set_drvdata(vfd, dev);
+#ifdef CONFIG_MEDIA_CONTROLLER
+ dev->vbi_cap_pad.flags = MEDIA_PAD_FL_SINK;
+ ret = media_entity_pads_init(&vfd->entity, 1, &dev->vbi_cap_pad);
+ if (ret)
+ goto unreg_dev;
+#endif
+
ret = video_register_device(vfd, VFL_TYPE_VBI, vbi_cap_nr[inst]);
if (ret < 0)
goto unreg_dev;
@@ -1300,6 +1343,13 @@ static int vivid_create_instance(struct platform_device *pdev, int inst)
vfd->tvnorms = tvnorms_out;
video_set_drvdata(vfd, dev);
+#ifdef CONFIG_MEDIA_CONTROLLER
+ dev->vbi_out_pad.flags = MEDIA_PAD_FL_SOURCE;
+ ret = media_entity_pads_init(&vfd->entity, 1, &dev->vbi_out_pad);
+ if (ret)
+ goto unreg_dev;
+#endif
+
ret = video_register_device(vfd, VFL_TYPE_VBI, vbi_out_nr[inst]);
if (ret < 0)
goto unreg_dev;
@@ -1323,6 +1373,13 @@ static int vivid_create_instance(struct platform_device *pdev, int inst)
vfd->lock = &dev->mutex;
video_set_drvdata(vfd, dev);
+#ifdef CONFIG_MEDIA_CONTROLLER
+ dev->sdr_cap_pad.flags = MEDIA_PAD_FL_SINK;
+ ret = media_entity_pads_init(&vfd->entity, 1, &dev->sdr_cap_pad);
+ if (ret)
+ goto unreg_dev;
+#endif
+
ret = video_register_device(vfd, VFL_TYPE_SDR, sdr_cap_nr[inst]);
if (ret < 0)
goto unreg_dev;
@@ -1369,12 +1426,25 @@ static int vivid_create_instance(struct platform_device *pdev, int inst)
video_device_node_name(vfd));
}
+#ifdef CONFIG_MEDIA_CONTROLLER
+ /* Register the media device */
+ ret = media_device_register(&dev->mdev);
+ if (ret) {
+ dev_err(dev->mdev.dev,
+ "media device register failed (err=%d)\n", ret);
+ goto unreg_dev;
+ }
+#endif
+
/* Now that everything is fine, let's add it to device list */
vivid_devs[inst] = dev;
return 0;
unreg_dev:
+#ifdef CONFIG_MEDIA_CONTROLLER
+ media_device_unregister(&dev->mdev);
+#endif
video_unregister_device(&dev->radio_tx_dev);
video_unregister_device(&dev->radio_rx_dev);
video_unregister_device(&dev->sdr_cap_dev);
@@ -1445,6 +1515,10 @@ static int vivid_remove(struct platform_device *pdev)
if (!dev)
continue;
+#ifdef CONFIG_MEDIA_CONTROLLER
+ media_device_unregister(&dev->mdev);
+#endif
+
if (dev->has_vid_cap) {
v4l2_info(&dev->v4l2_dev, "unregistering %s\n",
video_device_node_name(&dev->vid_cap_dev));
diff --git a/drivers/media/platform/vivid/vivid-core.h b/drivers/media/platform/vivid/vivid-core.h
index cd4c8230563c..1891254c8f0b 100644
--- a/drivers/media/platform/vivid/vivid-core.h
+++ b/drivers/media/platform/vivid/vivid-core.h
@@ -136,6 +136,14 @@ struct vivid_cec_work {
struct vivid_dev {
unsigned inst;
struct v4l2_device v4l2_dev;
+#ifdef CONFIG_MEDIA_CONTROLLER
+ struct media_device mdev;
+ struct media_pad vid_cap_pad;
+ struct media_pad vid_out_pad;
+ struct media_pad vbi_cap_pad;
+ struct media_pad vbi_out_pad;
+ struct media_pad sdr_cap_pad;
+#endif
struct v4l2_ctrl_handler ctrl_hdl_user_gen;
struct v4l2_ctrl_handler ctrl_hdl_user_vid;
struct v4l2_ctrl_handler ctrl_hdl_user_aud;
diff --git a/drivers/media/platform/vivid/vivid-ctrls.c b/drivers/media/platform/vivid/vivid-ctrls.c
index 999aa101b150..bfffeda12f14 100644
--- a/drivers/media/platform/vivid/vivid-ctrls.c
+++ b/drivers/media/platform/vivid/vivid-ctrls.c
@@ -1662,59 +1662,59 @@ int vivid_create_controls(struct vivid_dev *dev, bool show_ccs_cap,
v4l2_ctrl_auto_cluster(2, &dev->autogain, 0, true);
if (dev->has_vid_cap) {
- v4l2_ctrl_add_handler(hdl_vid_cap, hdl_user_gen, NULL);
- v4l2_ctrl_add_handler(hdl_vid_cap, hdl_user_vid, NULL);
- v4l2_ctrl_add_handler(hdl_vid_cap, hdl_user_aud, NULL);
- v4l2_ctrl_add_handler(hdl_vid_cap, hdl_streaming, NULL);
- v4l2_ctrl_add_handler(hdl_vid_cap, hdl_sdtv_cap, NULL);
- v4l2_ctrl_add_handler(hdl_vid_cap, hdl_loop_cap, NULL);
- v4l2_ctrl_add_handler(hdl_vid_cap, hdl_fb, NULL);
+ v4l2_ctrl_add_handler(hdl_vid_cap, hdl_user_gen, NULL, false);
+ v4l2_ctrl_add_handler(hdl_vid_cap, hdl_user_vid, NULL, false);
+ v4l2_ctrl_add_handler(hdl_vid_cap, hdl_user_aud, NULL, false);
+ v4l2_ctrl_add_handler(hdl_vid_cap, hdl_streaming, NULL, false);
+ v4l2_ctrl_add_handler(hdl_vid_cap, hdl_sdtv_cap, NULL, false);
+ v4l2_ctrl_add_handler(hdl_vid_cap, hdl_loop_cap, NULL, false);
+ v4l2_ctrl_add_handler(hdl_vid_cap, hdl_fb, NULL, false);
if (hdl_vid_cap->error)
return hdl_vid_cap->error;
dev->vid_cap_dev.ctrl_handler = hdl_vid_cap;
}
if (dev->has_vid_out) {
- v4l2_ctrl_add_handler(hdl_vid_out, hdl_user_gen, NULL);
- v4l2_ctrl_add_handler(hdl_vid_out, hdl_user_aud, NULL);
- v4l2_ctrl_add_handler(hdl_vid_out, hdl_streaming, NULL);
- v4l2_ctrl_add_handler(hdl_vid_out, hdl_fb, NULL);
+ v4l2_ctrl_add_handler(hdl_vid_out, hdl_user_gen, NULL, false);
+ v4l2_ctrl_add_handler(hdl_vid_out, hdl_user_aud, NULL, false);
+ v4l2_ctrl_add_handler(hdl_vid_out, hdl_streaming, NULL, false);
+ v4l2_ctrl_add_handler(hdl_vid_out, hdl_fb, NULL, false);
if (hdl_vid_out->error)
return hdl_vid_out->error;
dev->vid_out_dev.ctrl_handler = hdl_vid_out;
}
if (dev->has_vbi_cap) {
- v4l2_ctrl_add_handler(hdl_vbi_cap, hdl_user_gen, NULL);
- v4l2_ctrl_add_handler(hdl_vbi_cap, hdl_streaming, NULL);
- v4l2_ctrl_add_handler(hdl_vbi_cap, hdl_sdtv_cap, NULL);
- v4l2_ctrl_add_handler(hdl_vbi_cap, hdl_loop_cap, NULL);
+ v4l2_ctrl_add_handler(hdl_vbi_cap, hdl_user_gen, NULL, false);
+ v4l2_ctrl_add_handler(hdl_vbi_cap, hdl_streaming, NULL, false);
+ v4l2_ctrl_add_handler(hdl_vbi_cap, hdl_sdtv_cap, NULL, false);
+ v4l2_ctrl_add_handler(hdl_vbi_cap, hdl_loop_cap, NULL, false);
if (hdl_vbi_cap->error)
return hdl_vbi_cap->error;
dev->vbi_cap_dev.ctrl_handler = hdl_vbi_cap;
}
if (dev->has_vbi_out) {
- v4l2_ctrl_add_handler(hdl_vbi_out, hdl_user_gen, NULL);
- v4l2_ctrl_add_handler(hdl_vbi_out, hdl_streaming, NULL);
+ v4l2_ctrl_add_handler(hdl_vbi_out, hdl_user_gen, NULL, false);
+ v4l2_ctrl_add_handler(hdl_vbi_out, hdl_streaming, NULL, false);
if (hdl_vbi_out->error)
return hdl_vbi_out->error;
dev->vbi_out_dev.ctrl_handler = hdl_vbi_out;
}
if (dev->has_radio_rx) {
- v4l2_ctrl_add_handler(hdl_radio_rx, hdl_user_gen, NULL);
- v4l2_ctrl_add_handler(hdl_radio_rx, hdl_user_aud, NULL);
+ v4l2_ctrl_add_handler(hdl_radio_rx, hdl_user_gen, NULL, false);
+ v4l2_ctrl_add_handler(hdl_radio_rx, hdl_user_aud, NULL, false);
if (hdl_radio_rx->error)
return hdl_radio_rx->error;
dev->radio_rx_dev.ctrl_handler = hdl_radio_rx;
}
if (dev->has_radio_tx) {
- v4l2_ctrl_add_handler(hdl_radio_tx, hdl_user_gen, NULL);
- v4l2_ctrl_add_handler(hdl_radio_tx, hdl_user_aud, NULL);
+ v4l2_ctrl_add_handler(hdl_radio_tx, hdl_user_gen, NULL, false);
+ v4l2_ctrl_add_handler(hdl_radio_tx, hdl_user_aud, NULL, false);
if (hdl_radio_tx->error)
return hdl_radio_tx->error;
dev->radio_tx_dev.ctrl_handler = hdl_radio_tx;
}
if (dev->has_sdr_cap) {
- v4l2_ctrl_add_handler(hdl_sdr_cap, hdl_user_gen, NULL);
- v4l2_ctrl_add_handler(hdl_sdr_cap, hdl_streaming, NULL);
+ v4l2_ctrl_add_handler(hdl_sdr_cap, hdl_user_gen, NULL, false);
+ v4l2_ctrl_add_handler(hdl_sdr_cap, hdl_streaming, NULL, false);
if (hdl_sdr_cap->error)
return hdl_sdr_cap->error;
dev->sdr_cap_dev.ctrl_handler = hdl_sdr_cap;
diff --git a/drivers/media/platform/vivid/vivid-kthread-cap.c b/drivers/media/platform/vivid/vivid-kthread-cap.c
index f06003bb8e42..eebfff2126be 100644
--- a/drivers/media/platform/vivid/vivid-kthread-cap.c
+++ b/drivers/media/platform/vivid/vivid-kthread-cap.c
@@ -703,6 +703,8 @@ static void vivid_thread_vid_cap_tick(struct vivid_dev *dev, int dropped_bufs)
goto update_mv;
if (vid_cap_buf) {
+ v4l2_ctrl_request_setup(vid_cap_buf->vb.vb2_buf.req_obj.req,
+ &dev->ctrl_hdl_vid_cap);
/* Fill buffer */
vivid_fillbuff(dev, vid_cap_buf);
dprintk(dev, 1, "filled buffer %d\n",
@@ -713,6 +715,8 @@ static void vivid_thread_vid_cap_tick(struct vivid_dev *dev, int dropped_bufs)
dev->fb_cap.fmt.pixelformat == dev->fmt_cap->fourcc)
vivid_overlay(dev, vid_cap_buf);
+ v4l2_ctrl_request_complete(vid_cap_buf->vb.vb2_buf.req_obj.req,
+ &dev->ctrl_hdl_vid_cap);
vb2_buffer_done(&vid_cap_buf->vb.vb2_buf, dev->dqbuf_error ?
VB2_BUF_STATE_ERROR : VB2_BUF_STATE_DONE);
dprintk(dev, 2, "vid_cap buffer %d done\n",
@@ -720,10 +724,14 @@ static void vivid_thread_vid_cap_tick(struct vivid_dev *dev, int dropped_bufs)
}
if (vbi_cap_buf) {
+ v4l2_ctrl_request_setup(vbi_cap_buf->vb.vb2_buf.req_obj.req,
+ &dev->ctrl_hdl_vbi_cap);
if (dev->stream_sliced_vbi_cap)
vivid_sliced_vbi_cap_process(dev, vbi_cap_buf);
else
vivid_raw_vbi_cap_process(dev, vbi_cap_buf);
+ v4l2_ctrl_request_complete(vbi_cap_buf->vb.vb2_buf.req_obj.req,
+ &dev->ctrl_hdl_vbi_cap);
vb2_buffer_done(&vbi_cap_buf->vb.vb2_buf, dev->dqbuf_error ?
VB2_BUF_STATE_ERROR : VB2_BUF_STATE_DONE);
dprintk(dev, 2, "vbi_cap %d done\n",
@@ -891,6 +899,8 @@ void vivid_stop_generating_vid_cap(struct vivid_dev *dev, bool *pstreaming)
buf = list_entry(dev->vid_cap_active.next,
struct vivid_buffer, list);
list_del(&buf->list);
+ v4l2_ctrl_request_complete(buf->vb.vb2_buf.req_obj.req,
+ &dev->ctrl_hdl_vid_cap);
vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
dprintk(dev, 2, "vid_cap buffer %d done\n",
buf->vb.vb2_buf.index);
@@ -904,6 +914,8 @@ void vivid_stop_generating_vid_cap(struct vivid_dev *dev, bool *pstreaming)
buf = list_entry(dev->vbi_cap_active.next,
struct vivid_buffer, list);
list_del(&buf->list);
+ v4l2_ctrl_request_complete(buf->vb.vb2_buf.req_obj.req,
+ &dev->ctrl_hdl_vbi_cap);
vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
dprintk(dev, 2, "vbi_cap buffer %d done\n",
buf->vb.vb2_buf.index);
diff --git a/drivers/media/platform/vivid/vivid-kthread-out.c b/drivers/media/platform/vivid/vivid-kthread-out.c
index 9981e7548019..5a14810eeb69 100644
--- a/drivers/media/platform/vivid/vivid-kthread-out.c
+++ b/drivers/media/platform/vivid/vivid-kthread-out.c
@@ -75,6 +75,10 @@ static void vivid_thread_vid_out_tick(struct vivid_dev *dev)
return;
if (vid_out_buf) {
+ v4l2_ctrl_request_setup(vid_out_buf->vb.vb2_buf.req_obj.req,
+ &dev->ctrl_hdl_vid_out);
+ v4l2_ctrl_request_complete(vid_out_buf->vb.vb2_buf.req_obj.req,
+ &dev->ctrl_hdl_vid_out);
vid_out_buf->vb.sequence = dev->vid_out_seq_count;
if (dev->field_out == V4L2_FIELD_ALTERNATE) {
/*
@@ -92,6 +96,10 @@ static void vivid_thread_vid_out_tick(struct vivid_dev *dev)
}
if (vbi_out_buf) {
+ v4l2_ctrl_request_setup(vbi_out_buf->vb.vb2_buf.req_obj.req,
+ &dev->ctrl_hdl_vbi_out);
+ v4l2_ctrl_request_complete(vbi_out_buf->vb.vb2_buf.req_obj.req,
+ &dev->ctrl_hdl_vbi_out);
if (dev->stream_sliced_vbi_out)
vivid_sliced_vbi_out_process(dev, vbi_out_buf);
@@ -262,6 +270,8 @@ void vivid_stop_generating_vid_out(struct vivid_dev *dev, bool *pstreaming)
buf = list_entry(dev->vid_out_active.next,
struct vivid_buffer, list);
list_del(&buf->list);
+ v4l2_ctrl_request_complete(buf->vb.vb2_buf.req_obj.req,
+ &dev->ctrl_hdl_vid_out);
vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
dprintk(dev, 2, "vid_out buffer %d done\n",
buf->vb.vb2_buf.index);
@@ -275,6 +285,8 @@ void vivid_stop_generating_vid_out(struct vivid_dev *dev, bool *pstreaming)
buf = list_entry(dev->vbi_out_active.next,
struct vivid_buffer, list);
list_del(&buf->list);
+ v4l2_ctrl_request_complete(buf->vb.vb2_buf.req_obj.req,
+ &dev->ctrl_hdl_vbi_out);
vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
dprintk(dev, 2, "vbi_out buffer %d done\n",
buf->vb.vb2_buf.index);
diff --git a/drivers/media/platform/vivid/vivid-sdr-cap.c b/drivers/media/platform/vivid/vivid-sdr-cap.c
index 200b789a3f21..dcdc80e272c2 100644
--- a/drivers/media/platform/vivid/vivid-sdr-cap.c
+++ b/drivers/media/platform/vivid/vivid-sdr-cap.c
@@ -102,6 +102,10 @@ static void vivid_thread_sdr_cap_tick(struct vivid_dev *dev)
if (sdr_cap_buf) {
sdr_cap_buf->vb.sequence = dev->sdr_cap_seq_count;
+ v4l2_ctrl_request_setup(sdr_cap_buf->vb.vb2_buf.req_obj.req,
+ &dev->ctrl_hdl_sdr_cap);
+ v4l2_ctrl_request_complete(sdr_cap_buf->vb.vb2_buf.req_obj.req,
+ &dev->ctrl_hdl_sdr_cap);
vivid_sdr_cap_process(dev, sdr_cap_buf);
sdr_cap_buf->vb.vb2_buf.timestamp =
ktime_get_ns() + dev->time_wrap_offset;
@@ -272,6 +276,8 @@ static int sdr_cap_start_streaming(struct vb2_queue *vq, unsigned count)
list_for_each_entry_safe(buf, tmp, &dev->sdr_cap_active, list) {
list_del(&buf->list);
+ v4l2_ctrl_request_complete(buf->vb.vb2_buf.req_obj.req,
+ &dev->ctrl_hdl_sdr_cap);
vb2_buffer_done(&buf->vb.vb2_buf,
VB2_BUF_STATE_QUEUED);
}
@@ -293,6 +299,8 @@ static void sdr_cap_stop_streaming(struct vb2_queue *vq)
buf = list_entry(dev->sdr_cap_active.next,
struct vivid_buffer, list);
list_del(&buf->list);
+ v4l2_ctrl_request_complete(buf->vb.vb2_buf.req_obj.req,
+ &dev->ctrl_hdl_sdr_cap);
vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
}
@@ -303,12 +311,20 @@ static void sdr_cap_stop_streaming(struct vb2_queue *vq)
mutex_lock(&dev->mutex);
}
+static void sdr_cap_buf_request_complete(struct vb2_buffer *vb)
+{
+ struct vivid_dev *dev = vb2_get_drv_priv(vb->vb2_queue);
+
+ v4l2_ctrl_request_complete(vb->req_obj.req, &dev->ctrl_hdl_sdr_cap);
+}
+
const struct vb2_ops vivid_sdr_cap_qops = {
.queue_setup = sdr_cap_queue_setup,
.buf_prepare = sdr_cap_buf_prepare,
.buf_queue = sdr_cap_buf_queue,
.start_streaming = sdr_cap_start_streaming,
.stop_streaming = sdr_cap_stop_streaming,
+ .buf_request_complete = sdr_cap_buf_request_complete,
.wait_prepare = vb2_ops_wait_prepare,
.wait_finish = vb2_ops_wait_finish,
};
diff --git a/drivers/media/platform/vivid/vivid-vbi-cap.c b/drivers/media/platform/vivid/vivid-vbi-cap.c
index 92a852955173..903cebeb5ce5 100644
--- a/drivers/media/platform/vivid/vivid-vbi-cap.c
+++ b/drivers/media/platform/vivid/vivid-vbi-cap.c
@@ -204,6 +204,8 @@ static int vbi_cap_start_streaming(struct vb2_queue *vq, unsigned count)
list_for_each_entry_safe(buf, tmp, &dev->vbi_cap_active, list) {
list_del(&buf->list);
+ v4l2_ctrl_request_complete(buf->vb.vb2_buf.req_obj.req,
+ &dev->ctrl_hdl_vbi_cap);
vb2_buffer_done(&buf->vb.vb2_buf,
VB2_BUF_STATE_QUEUED);
}
@@ -220,12 +222,20 @@ static void vbi_cap_stop_streaming(struct vb2_queue *vq)
vivid_stop_generating_vid_cap(dev, &dev->vbi_cap_streaming);
}
+static void vbi_cap_buf_request_complete(struct vb2_buffer *vb)
+{
+ struct vivid_dev *dev = vb2_get_drv_priv(vb->vb2_queue);
+
+ v4l2_ctrl_request_complete(vb->req_obj.req, &dev->ctrl_hdl_vbi_cap);
+}
+
const struct vb2_ops vivid_vbi_cap_qops = {
.queue_setup = vbi_cap_queue_setup,
.buf_prepare = vbi_cap_buf_prepare,
.buf_queue = vbi_cap_buf_queue,
.start_streaming = vbi_cap_start_streaming,
.stop_streaming = vbi_cap_stop_streaming,
+ .buf_request_complete = vbi_cap_buf_request_complete,
.wait_prepare = vb2_ops_wait_prepare,
.wait_finish = vb2_ops_wait_finish,
};
diff --git a/drivers/media/platform/vivid/vivid-vbi-out.c b/drivers/media/platform/vivid/vivid-vbi-out.c
index 69486c130a7e..9357c07e30d6 100644
--- a/drivers/media/platform/vivid/vivid-vbi-out.c
+++ b/drivers/media/platform/vivid/vivid-vbi-out.c
@@ -96,6 +96,8 @@ static int vbi_out_start_streaming(struct vb2_queue *vq, unsigned count)
list_for_each_entry_safe(buf, tmp, &dev->vbi_out_active, list) {
list_del(&buf->list);
+ v4l2_ctrl_request_complete(buf->vb.vb2_buf.req_obj.req,
+ &dev->ctrl_hdl_vbi_out);
vb2_buffer_done(&buf->vb.vb2_buf,
VB2_BUF_STATE_QUEUED);
}
@@ -115,12 +117,20 @@ static void vbi_out_stop_streaming(struct vb2_queue *vq)
dev->vbi_out_have_cc[1] = false;
}
+static void vbi_out_buf_request_complete(struct vb2_buffer *vb)
+{
+ struct vivid_dev *dev = vb2_get_drv_priv(vb->vb2_queue);
+
+ v4l2_ctrl_request_complete(vb->req_obj.req, &dev->ctrl_hdl_vbi_out);
+}
+
const struct vb2_ops vivid_vbi_out_qops = {
.queue_setup = vbi_out_queue_setup,
.buf_prepare = vbi_out_buf_prepare,
.buf_queue = vbi_out_buf_queue,
.start_streaming = vbi_out_start_streaming,
.stop_streaming = vbi_out_stop_streaming,
+ .buf_request_complete = vbi_out_buf_request_complete,
.wait_prepare = vb2_ops_wait_prepare,
.wait_finish = vb2_ops_wait_finish,
};
diff --git a/drivers/media/platform/vivid/vivid-vid-cap.c b/drivers/media/platform/vivid/vivid-vid-cap.c
index 6cf910a60ecf..9c8e8be81ce3 100644
--- a/drivers/media/platform/vivid/vivid-vid-cap.c
+++ b/drivers/media/platform/vivid/vivid-vid-cap.c
@@ -243,6 +243,8 @@ static int vid_cap_start_streaming(struct vb2_queue *vq, unsigned count)
list_for_each_entry_safe(buf, tmp, &dev->vid_cap_active, list) {
list_del(&buf->list);
+ v4l2_ctrl_request_complete(buf->vb.vb2_buf.req_obj.req,
+ &dev->ctrl_hdl_vid_cap);
vb2_buffer_done(&buf->vb.vb2_buf,
VB2_BUF_STATE_QUEUED);
}
@@ -260,6 +262,13 @@ static void vid_cap_stop_streaming(struct vb2_queue *vq)
dev->can_loop_video = false;
}
+static void vid_cap_buf_request_complete(struct vb2_buffer *vb)
+{
+ struct vivid_dev *dev = vb2_get_drv_priv(vb->vb2_queue);
+
+ v4l2_ctrl_request_complete(vb->req_obj.req, &dev->ctrl_hdl_vid_cap);
+}
+
const struct vb2_ops vivid_vid_cap_qops = {
.queue_setup = vid_cap_queue_setup,
.buf_prepare = vid_cap_buf_prepare,
@@ -267,6 +276,7 @@ const struct vb2_ops vivid_vid_cap_qops = {
.buf_queue = vid_cap_buf_queue,
.start_streaming = vid_cap_start_streaming,
.stop_streaming = vid_cap_stop_streaming,
+ .buf_request_complete = vid_cap_buf_request_complete,
.wait_prepare = vb2_ops_wait_prepare,
.wait_finish = vb2_ops_wait_finish,
};
diff --git a/drivers/media/platform/vivid/vivid-vid-out.c b/drivers/media/platform/vivid/vivid-vid-out.c
index 50248e2176a0..aaf13f03d5d4 100644
--- a/drivers/media/platform/vivid/vivid-vid-out.c
+++ b/drivers/media/platform/vivid/vivid-vid-out.c
@@ -162,6 +162,8 @@ static int vid_out_start_streaming(struct vb2_queue *vq, unsigned count)
list_for_each_entry_safe(buf, tmp, &dev->vid_out_active, list) {
list_del(&buf->list);
+ v4l2_ctrl_request_complete(buf->vb.vb2_buf.req_obj.req,
+ &dev->ctrl_hdl_vid_out);
vb2_buffer_done(&buf->vb.vb2_buf,
VB2_BUF_STATE_QUEUED);
}
@@ -179,12 +181,20 @@ static void vid_out_stop_streaming(struct vb2_queue *vq)
dev->can_loop_video = false;
}
+static void vid_out_buf_request_complete(struct vb2_buffer *vb)
+{
+ struct vivid_dev *dev = vb2_get_drv_priv(vb->vb2_queue);
+
+ v4l2_ctrl_request_complete(vb->req_obj.req, &dev->ctrl_hdl_vid_out);
+}
+
const struct vb2_ops vivid_vid_out_qops = {
.queue_setup = vid_out_queue_setup,
.buf_prepare = vid_out_buf_prepare,
.buf_queue = vid_out_buf_queue,
.start_streaming = vid_out_start_streaming,
.stop_streaming = vid_out_stop_streaming,
+ .buf_request_complete = vid_out_buf_request_complete,
.wait_prepare = vb2_ops_wait_prepare,
.wait_finish = vb2_ops_wait_finish,
};
diff --git a/drivers/media/usb/cpia2/cpia2_v4l.c b/drivers/media/usb/cpia2/cpia2_v4l.c
index aa7f3c307b22..3f401fbd0ecc 100644
--- a/drivers/media/usb/cpia2/cpia2_v4l.c
+++ b/drivers/media/usb/cpia2/cpia2_v4l.c
@@ -949,7 +949,7 @@ static int cpia2_dqbuf(struct file *file, void *fh, struct v4l2_buffer *buf)
buf->m.offset = cam->buffers[buf->index].data - cam->frame_buffer;
buf->length = cam->frame_size;
buf->reserved2 = 0;
- buf->reserved = 0;
+ buf->request_fd = 0;
memset(&buf->timecode, 0, sizeof(buf->timecode));
DBG("DQBUF #%d status:%d seq:%d length:%d\n", buf->index,
diff --git a/drivers/media/usb/cx231xx/cx231xx-417.c b/drivers/media/usb/cx231xx/cx231xx-417.c
index f700ec35b7f3..2641e23d946b 100644
--- a/drivers/media/usb/cx231xx/cx231xx-417.c
+++ b/drivers/media/usb/cx231xx/cx231xx-417.c
@@ -1992,7 +1992,7 @@ int cx231xx_417_register(struct cx231xx *dev)
dev->mpeg_ctrl_handler.ops = &cx231xx_ops;
if (dev->sd_cx25840)
v4l2_ctrl_add_handler(&dev->mpeg_ctrl_handler.hdl,
- dev->sd_cx25840->ctrl_handler, NULL);
+ dev->sd_cx25840->ctrl_handler, NULL, false);
if (dev->mpeg_ctrl_handler.hdl.error) {
err = dev->mpeg_ctrl_handler.hdl.error;
dprintk(3, "%s: can't add cx25840 controls\n", dev->name);
diff --git a/drivers/media/usb/cx231xx/cx231xx-video.c b/drivers/media/usb/cx231xx/cx231xx-video.c
index f2f034c5cd62..c990f70c0ea6 100644
--- a/drivers/media/usb/cx231xx/cx231xx-video.c
+++ b/drivers/media/usb/cx231xx/cx231xx-video.c
@@ -2204,10 +2204,10 @@ int cx231xx_register_analog_devices(struct cx231xx *dev)
if (dev->sd_cx25840) {
v4l2_ctrl_add_handler(&dev->ctrl_handler,
- dev->sd_cx25840->ctrl_handler, NULL);
+ dev->sd_cx25840->ctrl_handler, NULL, true);
v4l2_ctrl_add_handler(&dev->radio_ctrl_handler,
dev->sd_cx25840->ctrl_handler,
- v4l2_ctrl_radio_filter);
+ v4l2_ctrl_radio_filter, true);
}
if (dev->ctrl_handler.error)
diff --git a/drivers/media/usb/msi2500/msi2500.c b/drivers/media/usb/msi2500/msi2500.c
index 0fc4076c6d16..10b5b95bee34 100644
--- a/drivers/media/usb/msi2500/msi2500.c
+++ b/drivers/media/usb/msi2500/msi2500.c
@@ -1278,7 +1278,7 @@ static int msi2500_probe(struct usb_interface *intf,
}
/* currently all controls are from subdev */
- v4l2_ctrl_add_handler(&dev->hdl, sd->ctrl_handler, NULL);
+ v4l2_ctrl_add_handler(&dev->hdl, sd->ctrl_handler, NULL, true);
dev->v4l2_dev.ctrl_handler = &dev->hdl;
dev->vdev.v4l2_dev = &dev->v4l2_dev;
diff --git a/drivers/media/usb/tm6000/tm6000-video.c b/drivers/media/usb/tm6000/tm6000-video.c
index 6c992f197255..ee7b5318b351 100644
--- a/drivers/media/usb/tm6000/tm6000-video.c
+++ b/drivers/media/usb/tm6000/tm6000-video.c
@@ -1627,7 +1627,7 @@ int tm6000_v4l2_register(struct tm6000_core *dev)
v4l2_ctrl_new_std(&dev->ctrl_handler, &tm6000_ctrl_ops,
V4L2_CID_HUE, -128, 127, 1, 0);
v4l2_ctrl_add_handler(&dev->ctrl_handler,
- &dev->radio_ctrl_handler, NULL);
+ &dev->radio_ctrl_handler, NULL, false);
if (dev->radio_ctrl_handler.error)
ret = dev->radio_ctrl_handler.error;
diff --git a/drivers/media/usb/uvc/uvc_queue.c b/drivers/media/usb/uvc/uvc_queue.c
index fecccb5e7628..8964e16f2b22 100644
--- a/drivers/media/usb/uvc/uvc_queue.c
+++ b/drivers/media/usb/uvc/uvc_queue.c
@@ -300,12 +300,13 @@ int uvc_create_buffers(struct uvc_video_queue *queue,
return ret;
}
-int uvc_queue_buffer(struct uvc_video_queue *queue, struct v4l2_buffer *buf)
+int uvc_queue_buffer(struct uvc_video_queue *queue,
+ struct media_device *mdev, struct v4l2_buffer *buf)
{
int ret;
mutex_lock(&queue->mutex);
- ret = vb2_qbuf(&queue->queue, buf);
+ ret = vb2_qbuf(&queue->queue, mdev, buf);
mutex_unlock(&queue->mutex);
return ret;
diff --git a/drivers/media/usb/uvc/uvc_v4l2.c b/drivers/media/usb/uvc/uvc_v4l2.c
index b26182ce7462..84be596d3269 100644
--- a/drivers/media/usb/uvc/uvc_v4l2.c
+++ b/drivers/media/usb/uvc/uvc_v4l2.c
@@ -751,7 +751,8 @@ static int uvc_ioctl_qbuf(struct file *file, void *fh, struct v4l2_buffer *buf)
if (!uvc_has_privileges(handle))
return -EBUSY;
- return uvc_queue_buffer(&stream->queue, buf);
+ return uvc_queue_buffer(&stream->queue,
+ stream->vdev.v4l2_dev->mdev, buf);
}
static int uvc_ioctl_expbuf(struct file *file, void *fh,
diff --git a/drivers/media/usb/uvc/uvcvideo.h b/drivers/media/usb/uvc/uvcvideo.h
index 591eae3d0b0d..c0cbd833d0a4 100644
--- a/drivers/media/usb/uvc/uvcvideo.h
+++ b/drivers/media/usb/uvc/uvcvideo.h
@@ -700,6 +700,7 @@ int uvc_query_buffer(struct uvc_video_queue *queue,
int uvc_create_buffers(struct uvc_video_queue *queue,
struct v4l2_create_buffers *v4l2_cb);
int uvc_queue_buffer(struct uvc_video_queue *queue,
+ struct media_device *mdev,
struct v4l2_buffer *v4l2_buf);
int uvc_export_buffer(struct uvc_video_queue *queue,
struct v4l2_exportbuffer *exp);
diff --git a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
index 6481212fda77..f4325329fbd6 100644
--- a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
+++ b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
@@ -244,6 +244,7 @@ struct v4l2_format32 {
* return: number of created buffers
* @memory: buffer memory type
* @format: frame format, for which buffers are requested
+ * @capabilities: capabilities of this buffer type.
* @reserved: future extensions
*/
struct v4l2_create_buffers32 {
@@ -251,7 +252,8 @@ struct v4l2_create_buffers32 {
__u32 count;
__u32 memory; /* enum v4l2_memory */
struct v4l2_format32 format;
- __u32 reserved[8];
+ __u32 capabilities;
+ __u32 reserved[7];
};
static int __bufsize_v4l2_format(struct v4l2_format32 __user *p32, u32 *size)
@@ -411,6 +413,7 @@ static int put_v4l2_create32(struct v4l2_create_buffers __user *p64,
if (!access_ok(VERIFY_WRITE, p32, sizeof(*p32)) ||
copy_in_user(p32, p64,
offsetof(struct v4l2_create_buffers32, format)) ||
+ assign_in_user(&p32->capabilities, &p64->capabilities) ||
copy_in_user(p32->reserved, p64->reserved, sizeof(p64->reserved)))
return -EFAULT;
return __put_v4l2_format32(&p64->format, &p32->format);
@@ -482,7 +485,7 @@ struct v4l2_buffer32 {
} m;
__u32 length;
__u32 reserved2;
- __u32 reserved;
+ __s32 request_fd;
};
static int get_v4l2_plane32(struct v4l2_plane __user *p64,
@@ -581,6 +584,7 @@ static int get_v4l2_buffer32(struct v4l2_buffer __user *p64,
{
u32 type;
u32 length;
+ s32 request_fd;
enum v4l2_memory memory;
struct v4l2_plane32 __user *uplane32;
struct v4l2_plane __user *uplane;
@@ -595,7 +599,9 @@ static int get_v4l2_buffer32(struct v4l2_buffer __user *p64,
get_user(memory, &p32->memory) ||
put_user(memory, &p64->memory) ||
get_user(length, &p32->length) ||
- put_user(length, &p64->length))
+ put_user(length, &p64->length) ||
+ get_user(request_fd, &p32->request_fd) ||
+ put_user(request_fd, &p64->request_fd))
return -EFAULT;
if (V4L2_TYPE_IS_OUTPUT(type))
@@ -699,7 +705,7 @@ static int put_v4l2_buffer32(struct v4l2_buffer __user *p64,
copy_in_user(&p32->timecode, &p64->timecode, sizeof(p64->timecode)) ||
assign_in_user(&p32->sequence, &p64->sequence) ||
assign_in_user(&p32->reserved2, &p64->reserved2) ||
- assign_in_user(&p32->reserved, &p64->reserved) ||
+ assign_in_user(&p32->request_fd, &p64->request_fd) ||
get_user(length, &p64->length) ||
put_user(length, &p32->length))
return -EFAULT;
@@ -834,7 +840,8 @@ struct v4l2_ext_controls32 {
__u32 which;
__u32 count;
__u32 error_idx;
- __u32 reserved[2];
+ __s32 request_fd;
+ __u32 reserved[1];
compat_caddr_t controls; /* actually struct v4l2_ext_control32 * */
};
@@ -909,6 +916,7 @@ static int get_v4l2_ext_controls32(struct file *file,
get_user(count, &p32->count) ||
put_user(count, &p64->count) ||
assign_in_user(&p64->error_idx, &p32->error_idx) ||
+ assign_in_user(&p64->request_fd, &p32->request_fd) ||
copy_in_user(p64->reserved, p32->reserved, sizeof(p64->reserved)))
return -EFAULT;
@@ -974,6 +982,7 @@ static int put_v4l2_ext_controls32(struct file *file,
get_user(count, &p64->count) ||
put_user(count, &p32->count) ||
assign_in_user(&p32->error_idx, &p64->error_idx) ||
+ assign_in_user(&p32->request_fd, &p64->request_fd) ||
copy_in_user(p32->reserved, p64->reserved, sizeof(p32->reserved)) ||
get_user(kcontrols, &p64->controls))
return -EFAULT;
diff --git a/drivers/media/v4l2-core/v4l2-ctrls.c b/drivers/media/v4l2-core/v4l2-ctrls.c
index 4c0ecf29d278..6e37950292cd 100644
--- a/drivers/media/v4l2-core/v4l2-ctrls.c
+++ b/drivers/media/v4l2-core/v4l2-ctrls.c
@@ -37,8 +37,8 @@
struct v4l2_ctrl_helper {
/* Pointer to the control reference of the master control */
struct v4l2_ctrl_ref *mref;
- /* The control corresponding to the v4l2_ext_control ID field. */
- struct v4l2_ctrl *ctrl;
+ /* The control ref corresponding to the v4l2_ext_control ID field. */
+ struct v4l2_ctrl_ref *ref;
/* v4l2_ext_control index of the next control belonging to the
same cluster, or 0 if there isn't any. */
u32 next;
@@ -844,6 +844,8 @@ const char *v4l2_ctrl_get_name(u32 id)
case V4L2_CID_MPEG_VIDEO_MV_V_SEARCH_RANGE: return "Vertical MV Search Range";
case V4L2_CID_MPEG_VIDEO_REPEAT_SEQ_HEADER: return "Repeat Sequence Header";
case V4L2_CID_MPEG_VIDEO_FORCE_KEY_FRAME: return "Force Key Frame";
+ case V4L2_CID_MPEG_VIDEO_MPEG2_SLICE_PARAMS: return "MPEG-2 Slice Parameters";
+ case V4L2_CID_MPEG_VIDEO_MPEG2_QUANTIZATION: return "MPEG-2 Quantization Matrices";
/* VPX controls */
case V4L2_CID_MPEG_VIDEO_VPX_NUM_PARTITIONS: return "VPX Number of Partitions";
@@ -1292,6 +1294,12 @@ void v4l2_ctrl_fill(u32 id, const char **name, enum v4l2_ctrl_type *type,
case V4L2_CID_RDS_TX_ALT_FREQS:
*type = V4L2_CTRL_TYPE_U32;
break;
+ case V4L2_CID_MPEG_VIDEO_MPEG2_SLICE_PARAMS:
+ *type = V4L2_CTRL_TYPE_MPEG2_SLICE_PARAMS;
+ break;
+ case V4L2_CID_MPEG_VIDEO_MPEG2_QUANTIZATION:
+ *type = V4L2_CTRL_TYPE_MPEG2_QUANTIZATION;
+ break;
default:
*type = V4L2_CTRL_TYPE_INTEGER;
break;
@@ -1550,6 +1558,7 @@ static void std_log(const struct v4l2_ctrl *ctrl)
static int std_validate(const struct v4l2_ctrl *ctrl, u32 idx,
union v4l2_ctrl_ptr ptr)
{
+ struct v4l2_ctrl_mpeg2_slice_params *p_mpeg2_slice_params;
size_t len;
u64 offset;
s64 val;
@@ -1612,6 +1621,54 @@ static int std_validate(const struct v4l2_ctrl *ctrl, u32 idx,
return -ERANGE;
return 0;
+ case V4L2_CTRL_TYPE_MPEG2_SLICE_PARAMS:
+ p_mpeg2_slice_params = ptr.p;
+
+ switch (p_mpeg2_slice_params->sequence.chroma_format) {
+ case 1: /* 4:2:0 */
+ case 2: /* 4:2:2 */
+ case 3: /* 4:4:4 */
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (p_mpeg2_slice_params->picture.intra_dc_precision) {
+ case 0: /* 8 bits */
+ case 1: /* 9 bits */
+ case 11: /* 11 bits */
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (p_mpeg2_slice_params->picture.picture_structure) {
+ case 1: /* interlaced top field */
+ case 2: /* interlaced bottom field */
+ case 3: /* progressive */
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (p_mpeg2_slice_params->picture.picture_coding_type) {
+ case V4L2_MPEG2_PICTURE_CODING_TYPE_I:
+ case V4L2_MPEG2_PICTURE_CODING_TYPE_P:
+ case V4L2_MPEG2_PICTURE_CODING_TYPE_B:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (p_mpeg2_slice_params->backward_ref_index >= VIDEO_MAX_FRAME ||
+ p_mpeg2_slice_params->forward_ref_index >= VIDEO_MAX_FRAME)
+ return -EINVAL;
+
+ return 0;
+
+ case V4L2_CTRL_TYPE_MPEG2_QUANTIZATION:
+ return 0;
+
default:
return -EINVAL;
}
@@ -1668,6 +1725,13 @@ static int new_to_user(struct v4l2_ext_control *c,
return ptr_to_user(c, ctrl, ctrl->p_new);
}
+/* Helper function: copy the request value back to the caller */
+static int req_to_user(struct v4l2_ext_control *c,
+ struct v4l2_ctrl_ref *ref)
+{
+ return ptr_to_user(c, ref->ctrl, ref->p_req);
+}
+
/* Helper function: copy the initial control value back to the caller */
static int def_to_user(struct v4l2_ext_control *c, struct v4l2_ctrl *ctrl)
{
@@ -1787,6 +1851,26 @@ static void cur_to_new(struct v4l2_ctrl *ctrl)
ptr_to_ptr(ctrl, ctrl->p_cur, ctrl->p_new);
}
+/* Copy the new value to the request value */
+static void new_to_req(struct v4l2_ctrl_ref *ref)
+{
+ if (!ref)
+ return;
+ ptr_to_ptr(ref->ctrl, ref->ctrl->p_new, ref->p_req);
+ ref->req = ref;
+}
+
+/* Copy the request value to the new value */
+static void req_to_new(struct v4l2_ctrl_ref *ref)
+{
+ if (!ref)
+ return;
+ if (ref->req)
+ ptr_to_ptr(ref->ctrl, ref->req->p_req, ref->ctrl->p_new);
+ else
+ ptr_to_ptr(ref->ctrl, ref->ctrl->p_cur, ref->ctrl->p_new);
+}
+
/* Return non-zero if one or more of the controls in the cluster has a new
value that differs from the current value. */
static int cluster_changed(struct v4l2_ctrl *master)
@@ -1896,11 +1980,15 @@ int v4l2_ctrl_handler_init_class(struct v4l2_ctrl_handler *hdl,
lockdep_set_class_and_name(hdl->lock, key, name);
INIT_LIST_HEAD(&hdl->ctrls);
INIT_LIST_HEAD(&hdl->ctrl_refs);
+ INIT_LIST_HEAD(&hdl->requests);
+ INIT_LIST_HEAD(&hdl->requests_queued);
+ hdl->request_is_queued = false;
hdl->nr_of_buckets = 1 + nr_of_controls_hint / 8;
hdl->buckets = kvmalloc_array(hdl->nr_of_buckets,
sizeof(hdl->buckets[0]),
GFP_KERNEL | __GFP_ZERO);
hdl->error = hdl->buckets ? 0 : -ENOMEM;
+ media_request_object_init(&hdl->req_obj);
return hdl->error;
}
EXPORT_SYMBOL(v4l2_ctrl_handler_init_class);
@@ -1915,6 +2003,14 @@ void v4l2_ctrl_handler_free(struct v4l2_ctrl_handler *hdl)
if (hdl == NULL || hdl->buckets == NULL)
return;
+ if (!hdl->req_obj.req && !list_empty(&hdl->requests)) {
+ struct v4l2_ctrl_handler *req, *next_req;
+
+ list_for_each_entry_safe(req, next_req, &hdl->requests, requests) {
+ media_request_object_unbind(&req->req_obj);
+ media_request_object_put(&req->req_obj);
+ }
+ }
mutex_lock(hdl->lock);
/* Free all nodes */
list_for_each_entry_safe(ref, next_ref, &hdl->ctrl_refs, node) {
@@ -2016,13 +2112,19 @@ EXPORT_SYMBOL(v4l2_ctrl_find);
/* Allocate a new v4l2_ctrl_ref and hook it into the handler. */
static int handler_new_ref(struct v4l2_ctrl_handler *hdl,
- struct v4l2_ctrl *ctrl)
+ struct v4l2_ctrl *ctrl,
+ struct v4l2_ctrl_ref **ctrl_ref,
+ bool from_other_dev, bool allocate_req)
{
struct v4l2_ctrl_ref *ref;
struct v4l2_ctrl_ref *new_ref;
u32 id = ctrl->id;
u32 class_ctrl = V4L2_CTRL_ID2WHICH(id) | 1;
int bucket = id % hdl->nr_of_buckets; /* which bucket to use */
+ unsigned int size_extra_req = 0;
+
+ if (ctrl_ref)
+ *ctrl_ref = NULL;
/*
* Automatically add the control class if it is not yet present and
@@ -2036,10 +2138,16 @@ static int handler_new_ref(struct v4l2_ctrl_handler *hdl,
if (hdl->error)
return hdl->error;
- new_ref = kzalloc(sizeof(*new_ref), GFP_KERNEL);
+ if (allocate_req)
+ size_extra_req = ctrl->elems * ctrl->elem_size;
+ new_ref = kzalloc(sizeof(*new_ref) + size_extra_req, GFP_KERNEL);
if (!new_ref)
return handler_set_err(hdl, -ENOMEM);
new_ref->ctrl = ctrl;
+ new_ref->from_other_dev = from_other_dev;
+ if (size_extra_req)
+ new_ref->p_req.p = &new_ref[1];
+
if (ctrl->handler == hdl) {
/* By default each control starts in a cluster of its own.
new_ref->ctrl is basically a cluster array with one
@@ -2079,6 +2187,8 @@ insert_in_hash:
/* Insert the control node in the hash */
new_ref->next = hdl->buckets[bucket];
hdl->buckets[bucket] = new_ref;
+ if (ctrl_ref)
+ *ctrl_ref = new_ref;
unlock:
mutex_unlock(hdl->lock);
@@ -2133,6 +2243,12 @@ static struct v4l2_ctrl *v4l2_ctrl_new(struct v4l2_ctrl_handler *hdl,
case V4L2_CTRL_TYPE_U32:
elem_size = sizeof(u32);
break;
+ case V4L2_CTRL_TYPE_MPEG2_SLICE_PARAMS:
+ elem_size = sizeof(struct v4l2_ctrl_mpeg2_slice_params);
+ break;
+ case V4L2_CTRL_TYPE_MPEG2_QUANTIZATION:
+ elem_size = sizeof(struct v4l2_ctrl_mpeg2_quantization);
+ break;
default:
if (type < V4L2_CTRL_COMPOUND_TYPES)
elem_size = sizeof(s32);
@@ -2220,7 +2336,7 @@ static struct v4l2_ctrl *v4l2_ctrl_new(struct v4l2_ctrl_handler *hdl,
ctrl->type_ops->init(ctrl, idx, ctrl->p_new);
}
- if (handler_new_ref(hdl, ctrl)) {
+ if (handler_new_ref(hdl, ctrl, NULL, false, false)) {
kvfree(ctrl);
return NULL;
}
@@ -2389,7 +2505,8 @@ EXPORT_SYMBOL(v4l2_ctrl_new_int_menu);
/* Add the controls from another handler to our own. */
int v4l2_ctrl_add_handler(struct v4l2_ctrl_handler *hdl,
struct v4l2_ctrl_handler *add,
- bool (*filter)(const struct v4l2_ctrl *ctrl))
+ bool (*filter)(const struct v4l2_ctrl *ctrl),
+ bool from_other_dev)
{
struct v4l2_ctrl_ref *ref;
int ret = 0;
@@ -2412,7 +2529,7 @@ int v4l2_ctrl_add_handler(struct v4l2_ctrl_handler *hdl,
/* Filter any unwanted controls */
if (filter && !filter(ctrl))
continue;
- ret = handler_new_ref(hdl, ctrl);
+ ret = handler_new_ref(hdl, ctrl, NULL, from_other_dev, false);
if (ret)
break;
}
@@ -2815,6 +2932,148 @@ int v4l2_querymenu(struct v4l2_ctrl_handler *hdl, struct v4l2_querymenu *qm)
}
EXPORT_SYMBOL(v4l2_querymenu);
+static int v4l2_ctrl_request_clone(struct v4l2_ctrl_handler *hdl,
+ const struct v4l2_ctrl_handler *from)
+{
+ struct v4l2_ctrl_ref *ref;
+ int err = 0;
+
+ if (WARN_ON(!hdl || hdl == from))
+ return -EINVAL;
+
+ if (hdl->error)
+ return hdl->error;
+
+ WARN_ON(hdl->lock != &hdl->_lock);
+
+ mutex_lock(from->lock);
+ list_for_each_entry(ref, &from->ctrl_refs, node) {
+ struct v4l2_ctrl *ctrl = ref->ctrl;
+ struct v4l2_ctrl_ref *new_ref;
+
+ /* Skip refs inherited from other devices */
+ if (ref->from_other_dev)
+ continue;
+ /* And buttons */
+ if (ctrl->type == V4L2_CTRL_TYPE_BUTTON)
+ continue;
+ err = handler_new_ref(hdl, ctrl, &new_ref, false, true);
+ if (err)
+ break;
+ }
+ mutex_unlock(from->lock);
+ return err;
+}
+
+static void v4l2_ctrl_request_queue(struct media_request_object *obj)
+{
+ struct v4l2_ctrl_handler *hdl =
+ container_of(obj, struct v4l2_ctrl_handler, req_obj);
+ struct v4l2_ctrl_handler *main_hdl = obj->priv;
+ struct v4l2_ctrl_handler *prev_hdl = NULL;
+ struct v4l2_ctrl_ref *ref_ctrl, *ref_ctrl_prev = NULL;
+
+ if (list_empty(&main_hdl->requests_queued))
+ goto queue;
+
+ prev_hdl = list_last_entry(&main_hdl->requests_queued,
+ struct v4l2_ctrl_handler, requests_queued);
+ /*
+ * Note: prev_hdl and hdl must contain the same list of control
+ * references, so if any differences are detected then that is a
+ * driver bug and the WARN_ON is triggered.
+ */
+ mutex_lock(prev_hdl->lock);
+ ref_ctrl_prev = list_first_entry(&prev_hdl->ctrl_refs,
+ struct v4l2_ctrl_ref, node);
+ list_for_each_entry(ref_ctrl, &hdl->ctrl_refs, node) {
+ if (ref_ctrl->req)
+ continue;
+ while (ref_ctrl_prev->ctrl->id < ref_ctrl->ctrl->id) {
+ /* Should never happen, but just in case... */
+ if (list_is_last(&ref_ctrl_prev->node,
+ &prev_hdl->ctrl_refs))
+ break;
+ ref_ctrl_prev = list_next_entry(ref_ctrl_prev, node);
+ }
+ if (WARN_ON(ref_ctrl_prev->ctrl->id != ref_ctrl->ctrl->id))
+ break;
+ ref_ctrl->req = ref_ctrl_prev->req;
+ }
+ mutex_unlock(prev_hdl->lock);
+queue:
+ list_add_tail(&hdl->requests_queued, &main_hdl->requests_queued);
+ hdl->request_is_queued = true;
+}
+
+static void v4l2_ctrl_request_unbind(struct media_request_object *obj)
+{
+ struct v4l2_ctrl_handler *hdl =
+ container_of(obj, struct v4l2_ctrl_handler, req_obj);
+
+ list_del_init(&hdl->requests);
+ if (hdl->request_is_queued) {
+ list_del_init(&hdl->requests_queued);
+ hdl->request_is_queued = false;
+ }
+}
+
+static void v4l2_ctrl_request_release(struct media_request_object *obj)
+{
+ struct v4l2_ctrl_handler *hdl =
+ container_of(obj, struct v4l2_ctrl_handler, req_obj);
+
+ v4l2_ctrl_handler_free(hdl);
+ kfree(hdl);
+}
+
+static const struct media_request_object_ops req_ops = {
+ .queue = v4l2_ctrl_request_queue,
+ .unbind = v4l2_ctrl_request_unbind,
+ .release = v4l2_ctrl_request_release,
+};
+
+struct v4l2_ctrl_handler *v4l2_ctrl_request_hdl_find(struct media_request *req,
+ struct v4l2_ctrl_handler *parent)
+{
+ struct media_request_object *obj;
+
+ if (WARN_ON(req->state != MEDIA_REQUEST_STATE_VALIDATING &&
+ req->state != MEDIA_REQUEST_STATE_QUEUED))
+ return NULL;
+
+ obj = media_request_object_find(req, &req_ops, parent);
+ if (obj)
+ return container_of(obj, struct v4l2_ctrl_handler, req_obj);
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(v4l2_ctrl_request_hdl_find);
+
+struct v4l2_ctrl *
+v4l2_ctrl_request_hdl_ctrl_find(struct v4l2_ctrl_handler *hdl, u32 id)
+{
+ struct v4l2_ctrl_ref *ref = find_ref_lock(hdl, id);
+
+ return (ref && ref->req == ref) ? ref->ctrl : NULL;
+}
+EXPORT_SYMBOL_GPL(v4l2_ctrl_request_hdl_ctrl_find);
+
+static int v4l2_ctrl_request_bind(struct media_request *req,
+ struct v4l2_ctrl_handler *hdl,
+ struct v4l2_ctrl_handler *from)
+{
+ int ret;
+
+ ret = v4l2_ctrl_request_clone(hdl, from);
+
+ if (!ret) {
+ ret = media_request_object_bind(req, &req_ops,
+ from, false, &hdl->req_obj);
+ if (!ret)
+ list_add_tail(&hdl->requests, &from->requests);
+ }
+ return ret;
+}
/* Some general notes on the atomic requirements of VIDIOC_G/TRY/S_EXT_CTRLS:
@@ -2876,6 +3135,7 @@ static int prepare_ext_ctrls(struct v4l2_ctrl_handler *hdl,
if (cs->which &&
cs->which != V4L2_CTRL_WHICH_DEF_VAL &&
+ cs->which != V4L2_CTRL_WHICH_REQUEST_VAL &&
V4L2_CTRL_ID2WHICH(id) != cs->which)
return -EINVAL;
@@ -2886,6 +3146,7 @@ static int prepare_ext_ctrls(struct v4l2_ctrl_handler *hdl,
ref = find_ref_lock(hdl, id);
if (ref == NULL)
return -EINVAL;
+ h->ref = ref;
ctrl = ref->ctrl;
if (ctrl->flags & V4L2_CTRL_FLAG_DISABLED)
return -EINVAL;
@@ -2908,7 +3169,6 @@ static int prepare_ext_ctrls(struct v4l2_ctrl_handler *hdl,
}
/* Store the ref to the master control of the cluster */
h->mref = ref;
- h->ctrl = ctrl;
/* Initially set next to 0, meaning that there is no other
control in this helper array belonging to the same
cluster */
@@ -2955,15 +3215,15 @@ static int prepare_ext_ctrls(struct v4l2_ctrl_handler *hdl,
whether there are any controls at all. */
static int class_check(struct v4l2_ctrl_handler *hdl, u32 which)
{
- if (which == 0 || which == V4L2_CTRL_WHICH_DEF_VAL)
+ if (which == 0 || which == V4L2_CTRL_WHICH_DEF_VAL ||
+ which == V4L2_CTRL_WHICH_REQUEST_VAL)
return 0;
return find_ref_lock(hdl, which | 1) ? 0 : -EINVAL;
}
-
-
/* Get extended controls. Allocates the helpers array if needed. */
-int v4l2_g_ext_ctrls(struct v4l2_ctrl_handler *hdl, struct v4l2_ext_controls *cs)
+static int v4l2_g_ext_ctrls_common(struct v4l2_ctrl_handler *hdl,
+ struct v4l2_ext_controls *cs)
{
struct v4l2_ctrl_helper helper[4];
struct v4l2_ctrl_helper *helpers = helper;
@@ -2993,7 +3253,7 @@ int v4l2_g_ext_ctrls(struct v4l2_ctrl_handler *hdl, struct v4l2_ext_controls *cs
cs->error_idx = cs->count;
for (i = 0; !ret && i < cs->count; i++)
- if (helpers[i].ctrl->flags & V4L2_CTRL_FLAG_WRITE_ONLY)
+ if (helpers[i].ref->ctrl->flags & V4L2_CTRL_FLAG_WRITE_ONLY)
ret = -EACCES;
for (i = 0; !ret && i < cs->count; i++) {
@@ -3027,8 +3287,12 @@ int v4l2_g_ext_ctrls(struct v4l2_ctrl_handler *hdl, struct v4l2_ext_controls *cs
u32 idx = i;
do {
- ret = ctrl_to_user(cs->controls + idx,
- helpers[idx].ctrl);
+ if (helpers[idx].ref->req)
+ ret = req_to_user(cs->controls + idx,
+ helpers[idx].ref->req);
+ else
+ ret = ctrl_to_user(cs->controls + idx,
+ helpers[idx].ref->ctrl);
idx = helpers[idx].next;
} while (!ret && idx);
}
@@ -3039,6 +3303,91 @@ int v4l2_g_ext_ctrls(struct v4l2_ctrl_handler *hdl, struct v4l2_ext_controls *cs
kvfree(helpers);
return ret;
}
+
+static struct media_request_object *
+v4l2_ctrls_find_req_obj(struct v4l2_ctrl_handler *hdl,
+ struct media_request *req, bool set)
+{
+ struct media_request_object *obj;
+ struct v4l2_ctrl_handler *new_hdl;
+ int ret;
+
+ if (IS_ERR(req))
+ return ERR_CAST(req);
+
+ if (set && WARN_ON(req->state != MEDIA_REQUEST_STATE_UPDATING))
+ return ERR_PTR(-EBUSY);
+
+ obj = media_request_object_find(req, &req_ops, hdl);
+ if (obj)
+ return obj;
+ if (!set)
+ return ERR_PTR(-ENOENT);
+
+ new_hdl = kzalloc(sizeof(*new_hdl), GFP_KERNEL);
+ if (!new_hdl)
+ return ERR_PTR(-ENOMEM);
+
+ obj = &new_hdl->req_obj;
+ ret = v4l2_ctrl_handler_init(new_hdl, (hdl->nr_of_buckets - 1) * 8);
+ if (!ret)
+ ret = v4l2_ctrl_request_bind(req, new_hdl, hdl);
+ if (ret) {
+ kfree(new_hdl);
+
+ return ERR_PTR(ret);
+ }
+
+ media_request_object_get(obj);
+ return obj;
+}
+
+int v4l2_g_ext_ctrls(struct v4l2_ctrl_handler *hdl, struct media_device *mdev,
+ struct v4l2_ext_controls *cs)
+{
+ struct media_request_object *obj = NULL;
+ struct media_request *req = NULL;
+ int ret;
+
+ if (cs->which == V4L2_CTRL_WHICH_REQUEST_VAL) {
+ if (!mdev || cs->request_fd < 0)
+ return -EINVAL;
+
+ req = media_request_get_by_fd(mdev, cs->request_fd);
+ if (IS_ERR(req))
+ return PTR_ERR(req);
+
+ if (req->state != MEDIA_REQUEST_STATE_COMPLETE) {
+ media_request_put(req);
+ return -EACCES;
+ }
+
+ ret = media_request_lock_for_access(req);
+ if (ret) {
+ media_request_put(req);
+ return ret;
+ }
+
+ obj = v4l2_ctrls_find_req_obj(hdl, req, false);
+ if (IS_ERR(obj)) {
+ media_request_unlock_for_access(req);
+ media_request_put(req);
+ return PTR_ERR(obj);
+ }
+
+ hdl = container_of(obj, struct v4l2_ctrl_handler,
+ req_obj);
+ }
+
+ ret = v4l2_g_ext_ctrls_common(hdl, cs);
+
+ if (obj) {
+ media_request_unlock_for_access(req);
+ media_request_object_put(obj);
+ media_request_put(req);
+ }
+ return ret;
+}
EXPORT_SYMBOL(v4l2_g_ext_ctrls);
/* Helper function to get a single control */
@@ -3180,7 +3529,7 @@ static int validate_ctrls(struct v4l2_ext_controls *cs,
cs->error_idx = cs->count;
for (i = 0; i < cs->count; i++) {
- struct v4l2_ctrl *ctrl = helpers[i].ctrl;
+ struct v4l2_ctrl *ctrl = helpers[i].ref->ctrl;
union v4l2_ctrl_ptr p_new;
cs->error_idx = i;
@@ -3227,9 +3576,9 @@ static void update_from_auto_cluster(struct v4l2_ctrl *master)
}
/* Try or try-and-set controls */
-static int try_set_ext_ctrls(struct v4l2_fh *fh, struct v4l2_ctrl_handler *hdl,
- struct v4l2_ext_controls *cs,
- bool set)
+static int try_set_ext_ctrls_common(struct v4l2_fh *fh,
+ struct v4l2_ctrl_handler *hdl,
+ struct v4l2_ext_controls *cs, bool set)
{
struct v4l2_ctrl_helper helper[4];
struct v4l2_ctrl_helper *helpers = helper;
@@ -3292,7 +3641,7 @@ static int try_set_ext_ctrls(struct v4l2_fh *fh, struct v4l2_ctrl_handler *hdl,
do {
/* Check if the auto control is part of the
list, and remember the new value. */
- if (helpers[tmp_idx].ctrl == master)
+ if (helpers[tmp_idx].ref->ctrl == master)
new_auto_val = cs->controls[tmp_idx].value;
tmp_idx = helpers[tmp_idx].next;
} while (tmp_idx);
@@ -3305,7 +3654,7 @@ static int try_set_ext_ctrls(struct v4l2_fh *fh, struct v4l2_ctrl_handler *hdl,
/* Copy the new caller-supplied control values.
user_to_new() sets 'is_new' to 1. */
do {
- struct v4l2_ctrl *ctrl = helpers[idx].ctrl;
+ struct v4l2_ctrl *ctrl = helpers[idx].ref->ctrl;
ret = user_to_new(cs->controls + idx, ctrl);
if (!ret && ctrl->is_ptr)
@@ -3314,14 +3663,23 @@ static int try_set_ext_ctrls(struct v4l2_fh *fh, struct v4l2_ctrl_handler *hdl,
} while (!ret && idx);
if (!ret)
- ret = try_or_set_cluster(fh, master, set, 0);
+ ret = try_or_set_cluster(fh, master,
+ !hdl->req_obj.req && set, 0);
+ if (!ret && hdl->req_obj.req && set) {
+ for (j = 0; j < master->ncontrols; j++) {
+ struct v4l2_ctrl_ref *ref =
+ find_ref(hdl, master->cluster[j]->id);
+
+ new_to_req(ref);
+ }
+ }
/* Copy the new values back to userspace. */
if (!ret) {
idx = i;
do {
ret = new_to_user(cs->controls + idx,
- helpers[idx].ctrl);
+ helpers[idx].ref->ctrl);
idx = helpers[idx].next;
} while (!ret && idx);
}
@@ -3333,16 +3691,60 @@ static int try_set_ext_ctrls(struct v4l2_fh *fh, struct v4l2_ctrl_handler *hdl,
return ret;
}
-int v4l2_try_ext_ctrls(struct v4l2_ctrl_handler *hdl, struct v4l2_ext_controls *cs)
+static int try_set_ext_ctrls(struct v4l2_fh *fh,
+ struct v4l2_ctrl_handler *hdl, struct media_device *mdev,
+ struct v4l2_ext_controls *cs, bool set)
+{
+ struct media_request_object *obj = NULL;
+ struct media_request *req = NULL;
+ int ret;
+
+ if (cs->which == V4L2_CTRL_WHICH_REQUEST_VAL) {
+ if (!mdev || cs->request_fd < 0)
+ return -EINVAL;
+
+ req = media_request_get_by_fd(mdev, cs->request_fd);
+ if (IS_ERR(req))
+ return PTR_ERR(req);
+
+ ret = media_request_lock_for_update(req);
+ if (ret) {
+ media_request_put(req);
+ return ret;
+ }
+
+ obj = v4l2_ctrls_find_req_obj(hdl, req, set);
+ if (IS_ERR(obj)) {
+ media_request_unlock_for_update(req);
+ media_request_put(req);
+ return PTR_ERR(obj);
+ }
+ hdl = container_of(obj, struct v4l2_ctrl_handler,
+ req_obj);
+ }
+
+ ret = try_set_ext_ctrls_common(fh, hdl, cs, set);
+
+ if (obj) {
+ media_request_unlock_for_update(req);
+ media_request_object_put(obj);
+ media_request_put(req);
+ }
+
+ return ret;
+}
+
+int v4l2_try_ext_ctrls(struct v4l2_ctrl_handler *hdl, struct media_device *mdev,
+ struct v4l2_ext_controls *cs)
{
- return try_set_ext_ctrls(NULL, hdl, cs, false);
+ return try_set_ext_ctrls(NULL, hdl, mdev, cs, false);
}
EXPORT_SYMBOL(v4l2_try_ext_ctrls);
int v4l2_s_ext_ctrls(struct v4l2_fh *fh, struct v4l2_ctrl_handler *hdl,
- struct v4l2_ext_controls *cs)
+ struct media_device *mdev, struct v4l2_ext_controls *cs)
{
- return try_set_ext_ctrls(fh, hdl, cs, true);
+ return try_set_ext_ctrls(fh, hdl, mdev, cs, true);
}
EXPORT_SYMBOL(v4l2_s_ext_ctrls);
@@ -3441,6 +3843,162 @@ int __v4l2_ctrl_s_ctrl_string(struct v4l2_ctrl *ctrl, const char *s)
}
EXPORT_SYMBOL(__v4l2_ctrl_s_ctrl_string);
+void v4l2_ctrl_request_complete(struct media_request *req,
+ struct v4l2_ctrl_handler *main_hdl)
+{
+ struct media_request_object *obj;
+ struct v4l2_ctrl_handler *hdl;
+ struct v4l2_ctrl_ref *ref;
+
+ if (!req || !main_hdl)
+ return;
+
+ /*
+ * Note that it is valid if nothing was found. It means
+ * that this request doesn't have any controls and so just
+ * wants to leave the controls unchanged.
+ */
+ obj = media_request_object_find(req, &req_ops, main_hdl);
+ if (!obj)
+ return;
+ hdl = container_of(obj, struct v4l2_ctrl_handler, req_obj);
+
+ list_for_each_entry(ref, &hdl->ctrl_refs, node) {
+ struct v4l2_ctrl *ctrl = ref->ctrl;
+ struct v4l2_ctrl *master = ctrl->cluster[0];
+ unsigned int i;
+
+ if (ctrl->flags & V4L2_CTRL_FLAG_VOLATILE) {
+ ref->req = ref;
+
+ v4l2_ctrl_lock(master);
+ /* g_volatile_ctrl will update the current control values */
+ for (i = 0; i < master->ncontrols; i++)
+ cur_to_new(master->cluster[i]);
+ call_op(master, g_volatile_ctrl);
+ new_to_req(ref);
+ v4l2_ctrl_unlock(master);
+ continue;
+ }
+ if (ref->req == ref)
+ continue;
+
+ v4l2_ctrl_lock(ctrl);
+ if (ref->req)
+ ptr_to_ptr(ctrl, ref->req->p_req, ref->p_req);
+ else
+ ptr_to_ptr(ctrl, ctrl->p_cur, ref->p_req);
+ v4l2_ctrl_unlock(ctrl);
+ }
+
+ WARN_ON(!hdl->request_is_queued);
+ list_del_init(&hdl->requests_queued);
+ hdl->request_is_queued = false;
+ media_request_object_complete(obj);
+ media_request_object_put(obj);
+}
+EXPORT_SYMBOL(v4l2_ctrl_request_complete);
+
+void v4l2_ctrl_request_setup(struct media_request *req,
+ struct v4l2_ctrl_handler *main_hdl)
+{
+ struct media_request_object *obj;
+ struct v4l2_ctrl_handler *hdl;
+ struct v4l2_ctrl_ref *ref;
+
+ if (!req || !main_hdl)
+ return;
+
+ if (WARN_ON(req->state != MEDIA_REQUEST_STATE_QUEUED))
+ return;
+
+ /*
+ * Note that it is valid if nothing was found. It means
+ * that this request doesn't have any controls and so just
+ * wants to leave the controls unchanged.
+ */
+ obj = media_request_object_find(req, &req_ops, main_hdl);
+ if (!obj)
+ return;
+ if (obj->completed) {
+ media_request_object_put(obj);
+ return;
+ }
+ hdl = container_of(obj, struct v4l2_ctrl_handler, req_obj);
+
+ list_for_each_entry(ref, &hdl->ctrl_refs, node)
+ ref->req_done = false;
+
+ list_for_each_entry(ref, &hdl->ctrl_refs, node) {
+ struct v4l2_ctrl *ctrl = ref->ctrl;
+ struct v4l2_ctrl *master = ctrl->cluster[0];
+ bool have_new_data = false;
+ int i;
+
+ /*
+ * Skip if this control was already handled by a cluster.
+ * Skip button controls and read-only controls.
+ */
+ if (ref->req_done || ctrl->type == V4L2_CTRL_TYPE_BUTTON ||
+ (ctrl->flags & V4L2_CTRL_FLAG_READ_ONLY))
+ continue;
+
+ v4l2_ctrl_lock(master);
+ for (i = 0; i < master->ncontrols; i++) {
+ if (master->cluster[i]) {
+ struct v4l2_ctrl_ref *r =
+ find_ref(hdl, master->cluster[i]->id);
+
+ if (r->req && r == r->req) {
+ have_new_data = true;
+ break;
+ }
+ }
+ }
+ if (!have_new_data) {
+ v4l2_ctrl_unlock(master);
+ continue;
+ }
+
+ for (i = 0; i < master->ncontrols; i++) {
+ if (master->cluster[i]) {
+ struct v4l2_ctrl_ref *r =
+ find_ref(hdl, master->cluster[i]->id);
+
+ req_to_new(r);
+ master->cluster[i]->is_new = 1;
+ r->req_done = true;
+ }
+ }
+ /*
+ * For volatile autoclusters that are currently in auto mode
+ * we need to discover if it will be set to manual mode.
+ * If so, then we have to copy the current volatile values
+ * first since those will become the new manual values (which
+ * may be overwritten by explicit new values from this set
+ * of controls).
+ */
+ if (master->is_auto && master->has_volatiles &&
+ !is_cur_manual(master)) {
+ s32 new_auto_val = *master->p_new.p_s32;
+
+ /*
+ * If the new value == the manual value, then copy
+ * the current volatile values.
+ */
+ if (new_auto_val == master->manual_mode_value)
+ update_from_auto_cluster(master);
+ }
+
+ try_or_set_cluster(NULL, master, true, 0);
+
+ v4l2_ctrl_unlock(master);
+ }
+
+ media_request_object_put(obj);
+}
+EXPORT_SYMBOL(v4l2_ctrl_request_setup);
+
void v4l2_ctrl_notify(struct v4l2_ctrl *ctrl, v4l2_ctrl_notify_fnc notify, void *priv)
{
if (ctrl == NULL)
diff --git a/drivers/media/v4l2-core/v4l2-dev.c b/drivers/media/v4l2-core/v4l2-dev.c
index 69e775930fc4..feb749aaaa42 100644
--- a/drivers/media/v4l2-core/v4l2-dev.c
+++ b/drivers/media/v4l2-core/v4l2-dev.c
@@ -444,8 +444,22 @@ static int v4l2_release(struct inode *inode, struct file *filp)
struct video_device *vdev = video_devdata(filp);
int ret = 0;
- if (vdev->fops->release)
- ret = vdev->fops->release(filp);
+ /*
+ * We need to serialize the release() with queueing new requests.
+ * The release() may trigger the cancellation of a streaming
+ * operation, and that should not be mixed with queueing a new
+ * request at the same time.
+ */
+ if (vdev->fops->release) {
+ if (v4l2_device_supports_requests(vdev->v4l2_dev)) {
+ mutex_lock(&vdev->v4l2_dev->mdev->req_queue_mutex);
+ ret = vdev->fops->release(filp);
+ mutex_unlock(&vdev->v4l2_dev->mdev->req_queue_mutex);
+ } else {
+ ret = vdev->fops->release(filp);
+ }
+ }
+
if (vdev->dev_debug & V4L2_DEV_DEBUG_FOP)
dprintk("%s: release\n",
video_device_node_name(vdev));
diff --git a/drivers/media/v4l2-core/v4l2-device.c b/drivers/media/v4l2-core/v4l2-device.c
index 098562901f25..df0ac38c4050 100644
--- a/drivers/media/v4l2-core/v4l2-device.c
+++ b/drivers/media/v4l2-core/v4l2-device.c
@@ -178,7 +178,8 @@ int v4l2_device_register_subdev(struct v4l2_device *v4l2_dev,
sd->v4l2_dev = v4l2_dev;
/* This just returns 0 if either of the two args is NULL */
- err = v4l2_ctrl_add_handler(v4l2_dev->ctrl_handler, sd->ctrl_handler, NULL);
+ err = v4l2_ctrl_add_handler(v4l2_dev->ctrl_handler, sd->ctrl_handler,
+ NULL, true);
if (err)
goto error_module;
diff --git a/drivers/media/v4l2-core/v4l2-ioctl.c b/drivers/media/v4l2-core/v4l2-ioctl.c
index 7de041bae84f..c63746968fa3 100644
--- a/drivers/media/v4l2-core/v4l2-ioctl.c
+++ b/drivers/media/v4l2-core/v4l2-ioctl.c
@@ -474,13 +474,13 @@ static void v4l_print_buffer(const void *arg, bool write_only)
const struct v4l2_plane *plane;
int i;
- pr_cont("%02ld:%02d:%02d.%08ld index=%d, type=%s, flags=0x%08x, field=%s, sequence=%d, memory=%s",
+ pr_cont("%02ld:%02d:%02d.%08ld index=%d, type=%s, request_fd=%d, flags=0x%08x, field=%s, sequence=%d, memory=%s",
p->timestamp.tv_sec / 3600,
(int)(p->timestamp.tv_sec / 60) % 60,
(int)(p->timestamp.tv_sec % 60),
(long)p->timestamp.tv_usec,
p->index,
- prt_names(p->type, v4l2_type_names),
+ prt_names(p->type, v4l2_type_names), p->request_fd,
p->flags, prt_names(p->field, v4l2_field_names),
p->sequence, prt_names(p->memory, v4l2_memory_names));
@@ -590,8 +590,8 @@ static void v4l_print_ext_controls(const void *arg, bool write_only)
const struct v4l2_ext_controls *p = arg;
int i;
- pr_cont("which=0x%x, count=%d, error_idx=%d",
- p->which, p->count, p->error_idx);
+ pr_cont("which=0x%x, count=%d, error_idx=%d, request_fd=%d",
+ p->which, p->count, p->error_idx, p->request_fd);
for (i = 0; i < p->count; i++) {
if (!p->controls[i].size)
pr_cont(", id/val=0x%x/0x%x",
@@ -907,7 +907,7 @@ static int check_ext_ctrls(struct v4l2_ext_controls *c, int allow_priv)
__u32 i;
/* zero the reserved fields */
- c->reserved[0] = c->reserved[1] = 0;
+ c->reserved[0] = 0;
for (i = 0; i < c->count; i++)
c->controls[i].reserved2[0] = 0;
@@ -1309,6 +1309,7 @@ static void v4l_fill_fmtdesc(struct v4l2_fmtdesc *fmt)
case V4L2_PIX_FMT_H263: descr = "H.263"; break;
case V4L2_PIX_FMT_MPEG1: descr = "MPEG-1 ES"; break;
case V4L2_PIX_FMT_MPEG2: descr = "MPEG-2 ES"; break;
+ case V4L2_PIX_FMT_MPEG2_SLICE: descr = "MPEG-2 Parsed Slice Data"; break;
case V4L2_PIX_FMT_MPEG4: descr = "MPEG-4 part 2 ES"; break;
case V4L2_PIX_FMT_XVID: descr = "Xvid"; break;
case V4L2_PIX_FMT_VC1_ANNEX_G: descr = "VC-1 (SMPTE 412M Annex G)"; break;
@@ -1336,6 +1337,7 @@ static void v4l_fill_fmtdesc(struct v4l2_fmtdesc *fmt)
case V4L2_PIX_FMT_SE401: descr = "GSPCA SE401"; break;
case V4L2_PIX_FMT_S5C_UYVY_JPG: descr = "S5C73MX interleaved UYVY/JPEG"; break;
case V4L2_PIX_FMT_MT21C: descr = "Mediatek Compressed Format"; break;
+ case V4L2_PIX_FMT_SUNXI_TILED_NV12: descr = "Sunxi Tiled NV12 Format"; break;
default:
WARN(1, "Unknown pixelformat 0x%08x\n", fmt->pixelformat);
if (fmt->description[0])
@@ -1877,7 +1879,7 @@ static int v4l_reqbufs(const struct v4l2_ioctl_ops *ops,
if (ret)
return ret;
- CLEAR_AFTER_FIELD(p, memory);
+ CLEAR_AFTER_FIELD(p, capabilities);
return ops->vidioc_reqbufs(file, fh, p);
}
@@ -1918,7 +1920,7 @@ static int v4l_create_bufs(const struct v4l2_ioctl_ops *ops,
if (ret)
return ret;
- CLEAR_AFTER_FIELD(create, format);
+ CLEAR_AFTER_FIELD(create, capabilities);
v4l_sanitize_format(&create->format);
@@ -2109,9 +2111,9 @@ static int v4l_g_ext_ctrls(const struct v4l2_ioctl_ops *ops,
p->error_idx = p->count;
if (vfh && vfh->ctrl_handler)
- return v4l2_g_ext_ctrls(vfh->ctrl_handler, p);
+ return v4l2_g_ext_ctrls(vfh->ctrl_handler, vfd->v4l2_dev->mdev, p);
if (vfd->ctrl_handler)
- return v4l2_g_ext_ctrls(vfd->ctrl_handler, p);
+ return v4l2_g_ext_ctrls(vfd->ctrl_handler, vfd->v4l2_dev->mdev, p);
if (ops->vidioc_g_ext_ctrls == NULL)
return -ENOTTY;
return check_ext_ctrls(p, 0) ? ops->vidioc_g_ext_ctrls(file, fh, p) :
@@ -2128,9 +2130,9 @@ static int v4l_s_ext_ctrls(const struct v4l2_ioctl_ops *ops,
p->error_idx = p->count;
if (vfh && vfh->ctrl_handler)
- return v4l2_s_ext_ctrls(vfh, vfh->ctrl_handler, p);
+ return v4l2_s_ext_ctrls(vfh, vfh->ctrl_handler, vfd->v4l2_dev->mdev, p);
if (vfd->ctrl_handler)
- return v4l2_s_ext_ctrls(NULL, vfd->ctrl_handler, p);
+ return v4l2_s_ext_ctrls(NULL, vfd->ctrl_handler, vfd->v4l2_dev->mdev, p);
if (ops->vidioc_s_ext_ctrls == NULL)
return -ENOTTY;
return check_ext_ctrls(p, 0) ? ops->vidioc_s_ext_ctrls(file, fh, p) :
@@ -2147,9 +2149,9 @@ static int v4l_try_ext_ctrls(const struct v4l2_ioctl_ops *ops,
p->error_idx = p->count;
if (vfh && vfh->ctrl_handler)
- return v4l2_try_ext_ctrls(vfh->ctrl_handler, p);
+ return v4l2_try_ext_ctrls(vfh->ctrl_handler, vfd->v4l2_dev->mdev, p);
if (vfd->ctrl_handler)
- return v4l2_try_ext_ctrls(vfd->ctrl_handler, p);
+ return v4l2_try_ext_ctrls(vfd->ctrl_handler, vfd->v4l2_dev->mdev, p);
if (ops->vidioc_try_ext_ctrls == NULL)
return -ENOTTY;
return check_ext_ctrls(p, 0) ? ops->vidioc_try_ext_ctrls(file, fh, p) :
@@ -2780,6 +2782,7 @@ static long __video_do_ioctl(struct file *file,
unsigned int cmd, void *arg)
{
struct video_device *vfd = video_devdata(file);
+ struct mutex *req_queue_lock = NULL;
struct mutex *lock; /* ioctl serialization mutex */
const struct v4l2_ioctl_ops *ops = vfd->ioctl_ops;
bool write_only = false;
@@ -2799,10 +2802,27 @@ static long __video_do_ioctl(struct file *file,
if (test_bit(V4L2_FL_USES_V4L2_FH, &vfd->flags))
vfh = file->private_data;
+ /*
+ * We need to serialize streamon/off with queueing new requests.
+ * These ioctls may trigger the cancellation of a streaming
+ * operation, and that should not be mixed with queueing a new
+ * request at the same time.
+ */
+ if (v4l2_device_supports_requests(vfd->v4l2_dev) &&
+ (cmd == VIDIOC_STREAMON || cmd == VIDIOC_STREAMOFF)) {
+ req_queue_lock = &vfd->v4l2_dev->mdev->req_queue_mutex;
+
+ if (mutex_lock_interruptible(req_queue_lock))
+ return -ERESTARTSYS;
+ }
+
lock = v4l2_ioctl_get_lock(vfd, vfh, cmd, arg);
- if (lock && mutex_lock_interruptible(lock))
+ if (lock && mutex_lock_interruptible(lock)) {
+ if (req_queue_lock)
+ mutex_unlock(req_queue_lock);
return -ERESTARTSYS;
+ }
if (!video_is_registered(vfd)) {
ret = -ENODEV;
@@ -2861,6 +2881,8 @@ done:
unlock:
if (lock)
mutex_unlock(lock);
+ if (req_queue_lock)
+ mutex_unlock(req_queue_lock);
return ret;
}
diff --git a/drivers/media/v4l2-core/v4l2-mem2mem.c b/drivers/media/v4l2-core/v4l2-mem2mem.c
index ce9bd1b91210..d7806db222d8 100644
--- a/drivers/media/v4l2-core/v4l2-mem2mem.c
+++ b/drivers/media/v4l2-core/v4l2-mem2mem.c
@@ -387,7 +387,7 @@ static void v4l2_m2m_cancel_job(struct v4l2_m2m_ctx *m2m_ctx)
spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
if (m2m_dev->m2m_ops->job_abort)
m2m_dev->m2m_ops->job_abort(m2m_ctx->priv);
- dprintk("m2m_ctx %p running, will wait to complete", m2m_ctx);
+ dprintk("m2m_ctx %p running, will wait to complete\n", m2m_ctx);
wait_event(m2m_ctx->finished,
!(m2m_ctx->job_flags & TRANS_RUNNING));
} else if (m2m_ctx->job_flags & TRANS_QUEUED) {
@@ -473,12 +473,19 @@ EXPORT_SYMBOL_GPL(v4l2_m2m_querybuf);
int v4l2_m2m_qbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
struct v4l2_buffer *buf)
{
+ struct video_device *vdev = video_devdata(file);
struct vb2_queue *vq;
int ret;
vq = v4l2_m2m_get_vq(m2m_ctx, buf->type);
- ret = vb2_qbuf(vq, buf);
- if (!ret)
+ if (!V4L2_TYPE_IS_OUTPUT(vq->type) &&
+ (buf->flags & V4L2_BUF_FLAG_REQUEST_FD)) {
+ dprintk("%s: requests cannot be used with capture buffers\n",
+ __func__);
+ return -EPERM;
+ }
+ ret = vb2_qbuf(vq, vdev->v4l2_dev->mdev, buf);
+ if (!ret && !(buf->flags & V4L2_BUF_FLAG_IN_REQUEST))
v4l2_m2m_try_schedule(m2m_ctx);
return ret;
@@ -498,15 +505,11 @@ EXPORT_SYMBOL_GPL(v4l2_m2m_dqbuf);
int v4l2_m2m_prepare_buf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
struct v4l2_buffer *buf)
{
+ struct video_device *vdev = video_devdata(file);
struct vb2_queue *vq;
- int ret;
vq = v4l2_m2m_get_vq(m2m_ctx, buf->type);
- ret = vb2_prepare_buf(vq, buf);
- if (!ret)
- v4l2_m2m_try_schedule(m2m_ctx);
-
- return ret;
+ return vb2_prepare_buf(vq, vdev->v4l2_dev->mdev, buf);
}
EXPORT_SYMBOL_GPL(v4l2_m2m_prepare_buf);
@@ -950,6 +953,52 @@ void v4l2_m2m_buf_queue(struct v4l2_m2m_ctx *m2m_ctx,
}
EXPORT_SYMBOL_GPL(v4l2_m2m_buf_queue);
+void vb2_m2m_request_queue(struct media_request *req)
+{
+ struct media_request_object *obj, *obj_safe;
+ struct v4l2_m2m_ctx *m2m_ctx = NULL;
+
+ /*
+ * Queue all objects. Note that buffer objects are at the end of the
+ * objects list, after all other object types. Once buffer objects
+ * are queued, the driver might delete them immediately (if the driver
+ * processes the buffer at once), so we have to use
+ * list_for_each_entry_safe() to handle the case where the object we
+ * queue is deleted.
+ */
+ list_for_each_entry_safe(obj, obj_safe, &req->objects, list) {
+ struct v4l2_m2m_ctx *m2m_ctx_obj;
+ struct vb2_buffer *vb;
+
+ if (!obj->ops->queue)
+ continue;
+
+ if (vb2_request_object_is_buffer(obj)) {
+ /* Sanity checks */
+ vb = container_of(obj, struct vb2_buffer, req_obj);
+ WARN_ON(!V4L2_TYPE_IS_OUTPUT(vb->vb2_queue->type));
+ m2m_ctx_obj = container_of(vb->vb2_queue,
+ struct v4l2_m2m_ctx,
+ out_q_ctx.q);
+ WARN_ON(m2m_ctx && m2m_ctx_obj != m2m_ctx);
+ m2m_ctx = m2m_ctx_obj;
+ }
+
+ /*
+ * The buffer we queue here can in theory be immediately
+ * unbound, hence the use of list_for_each_entry_safe()
+ * above and why we call the queue op last.
+ */
+ obj->ops->queue(obj);
+ }
+
+ WARN_ON(!m2m_ctx);
+
+ if (m2m_ctx)
+ v4l2_m2m_try_schedule(m2m_ctx);
+}
+EXPORT_SYMBOL_GPL(vb2_m2m_request_queue);
+
/* Videobuf2 ioctl helpers */
int v4l2_m2m_ioctl_reqbufs(struct file *file, void *priv,
diff --git a/drivers/media/v4l2-core/v4l2-subdev.c b/drivers/media/v4l2-core/v4l2-subdev.c
index 792f41dffe23..f5f0d71ec745 100644
--- a/drivers/media/v4l2-core/v4l2-subdev.c
+++ b/drivers/media/v4l2-core/v4l2-subdev.c
@@ -222,17 +222,20 @@ static long subdev_do_ioctl(struct file *file, unsigned int cmd, void *arg)
case VIDIOC_G_EXT_CTRLS:
if (!vfh->ctrl_handler)
return -ENOTTY;
- return v4l2_g_ext_ctrls(vfh->ctrl_handler, arg);
+ return v4l2_g_ext_ctrls(vfh->ctrl_handler,
+ sd->v4l2_dev->mdev, arg);
case VIDIOC_S_EXT_CTRLS:
if (!vfh->ctrl_handler)
return -ENOTTY;
- return v4l2_s_ext_ctrls(vfh, vfh->ctrl_handler, arg);
+ return v4l2_s_ext_ctrls(vfh, vfh->ctrl_handler,
+ sd->v4l2_dev->mdev, arg);
case VIDIOC_TRY_EXT_CTRLS:
if (!vfh->ctrl_handler)
return -ENOTTY;
- return v4l2_try_ext_ctrls(vfh->ctrl_handler, arg);
+ return v4l2_try_ext_ctrls(vfh->ctrl_handler,
+ sd->v4l2_dev->mdev, arg);
case VIDIOC_DQEVENT:
if (!(sd->flags & V4L2_SUBDEV_FL_HAS_EVENTS))
diff --git a/drivers/staging/media/Kconfig b/drivers/staging/media/Kconfig
index db5cf67047ad..b3620a8f2d9f 100644
--- a/drivers/staging/media/Kconfig
+++ b/drivers/staging/media/Kconfig
@@ -31,6 +31,8 @@ source "drivers/staging/media/mt9t031/Kconfig"
source "drivers/staging/media/omap4iss/Kconfig"
+source "drivers/staging/media/sunxi/Kconfig"
+
source "drivers/staging/media/tegra-vde/Kconfig"
source "drivers/staging/media/zoran/Kconfig"
diff --git a/drivers/staging/media/Makefile b/drivers/staging/media/Makefile
index 503fbe47fa58..42948f805548 100644
--- a/drivers/staging/media/Makefile
+++ b/drivers/staging/media/Makefile
@@ -5,5 +5,6 @@ obj-$(CONFIG_SOC_CAMERA_IMX074) += imx074/
obj-$(CONFIG_SOC_CAMERA_MT9T031) += mt9t031/
obj-$(CONFIG_VIDEO_DM365_VPFE) += davinci_vpfe/
obj-$(CONFIG_VIDEO_OMAP4) += omap4iss/
+obj-$(CONFIG_VIDEO_SUNXI) += sunxi/
obj-$(CONFIG_TEGRA_VDE) += tegra-vde/
obj-$(CONFIG_VIDEO_ZORAN) += zoran/
diff --git a/drivers/staging/media/davinci_vpfe/vpfe_video.c b/drivers/staging/media/davinci_vpfe/vpfe_video.c
index 5e42490331b7..5e9769ea8a50 100644
--- a/drivers/staging/media/davinci_vpfe/vpfe_video.c
+++ b/drivers/staging/media/davinci_vpfe/vpfe_video.c
@@ -1135,10 +1135,6 @@ static int vpfe_buffer_prepare(struct vb2_buffer *vb)
v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_buffer_prepare\n");
- if (vb->state != VB2_BUF_STATE_ACTIVE &&
- vb->state != VB2_BUF_STATE_PREPARED)
- return 0;
-
/* Initialize buffer */
vb2_set_plane_payload(vb, 0, video->fmt.fmt.pix.sizeimage);
if (vb2_plane_vaddr(vb, 0) &&
@@ -1429,7 +1425,8 @@ static int vpfe_qbuf(struct file *file, void *priv,
return -EACCES;
}
- return vb2_qbuf(&video->buffer_queue, p);
+ return vb2_qbuf(&video->buffer_queue,
+ video->video_dev.v4l2_dev->mdev, p);
}
/*
diff --git a/drivers/staging/media/imx/imx-media-dev.c b/drivers/staging/media/imx/imx-media-dev.c
index 026b9cbe581d..4b344a4a3706 100644
--- a/drivers/staging/media/imx/imx-media-dev.c
+++ b/drivers/staging/media/imx/imx-media-dev.c
@@ -350,7 +350,7 @@ static int imx_media_inherit_controls(struct imx_media_dev *imxmd,
ret = v4l2_ctrl_add_handler(vfd->ctrl_handler,
sd->ctrl_handler,
- NULL);
+ NULL, true);
if (ret)
return ret;
}
diff --git a/drivers/staging/media/imx/imx-media-fim.c b/drivers/staging/media/imx/imx-media-fim.c
index 6df189135db8..8cf773eef9da 100644
--- a/drivers/staging/media/imx/imx-media-fim.c
+++ b/drivers/staging/media/imx/imx-media-fim.c
@@ -463,7 +463,7 @@ int imx_media_fim_add_controls(struct imx_media_fim *fim)
{
/* add the FIM controls to the calling subdev ctrl handler */
return v4l2_ctrl_add_handler(fim->sd->ctrl_handler,
- &fim->ctrl_handler, NULL);
+ &fim->ctrl_handler, NULL, false);
}
EXPORT_SYMBOL_GPL(imx_media_fim_add_controls);
diff --git a/drivers/staging/media/omap4iss/iss_video.c b/drivers/staging/media/omap4iss/iss_video.c
index c1322aeaf01e..c2c5a9cd8642 100644
--- a/drivers/staging/media/omap4iss/iss_video.c
+++ b/drivers/staging/media/omap4iss/iss_video.c
@@ -802,9 +802,10 @@ iss_video_querybuf(struct file *file, void *fh, struct v4l2_buffer *b)
static int
iss_video_qbuf(struct file *file, void *fh, struct v4l2_buffer *b)
{
+ struct iss_video *video = video_drvdata(file);
struct iss_video_fh *vfh = to_iss_video_fh(fh);
- return vb2_qbuf(&vfh->queue, b);
+ return vb2_qbuf(&vfh->queue, video->video.v4l2_dev->mdev, b);
}
static int
diff --git a/drivers/staging/media/sunxi/Kconfig b/drivers/staging/media/sunxi/Kconfig
new file mode 100644
index 000000000000..c78d92240ceb
--- /dev/null
+++ b/drivers/staging/media/sunxi/Kconfig
@@ -0,0 +1,15 @@
+config VIDEO_SUNXI
+ bool "Allwinner sunXi family Video Devices"
+ depends on ARCH_SUNXI || COMPILE_TEST
+ help
+ If you have an Allwinner SoC based on the sunXi family, say Y.
+
+ Note that this option doesn't include new drivers in the
+ kernel: saying N will just cause Kconfig to skip all the
+ questions about Allwinner media devices.
+
+if VIDEO_SUNXI
+
+source "drivers/staging/media/sunxi/cedrus/Kconfig"
+
+endif
diff --git a/drivers/staging/media/sunxi/Makefile b/drivers/staging/media/sunxi/Makefile
new file mode 100644
index 000000000000..cee2846c3ecf
--- /dev/null
+++ b/drivers/staging/media/sunxi/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_VIDEO_SUNXI_CEDRUS) += cedrus/
diff --git a/drivers/staging/media/sunxi/cedrus/Kconfig b/drivers/staging/media/sunxi/cedrus/Kconfig
new file mode 100644
index 000000000000..a7a34e89c42d
--- /dev/null
+++ b/drivers/staging/media/sunxi/cedrus/Kconfig
@@ -0,0 +1,14 @@
+config VIDEO_SUNXI_CEDRUS
+ tristate "Allwinner Cedrus VPU driver"
+ depends on VIDEO_DEV && VIDEO_V4L2 && MEDIA_CONTROLLER
+ depends on HAS_DMA
+ depends on OF
+ select SUNXI_SRAM
+ select VIDEOBUF2_DMA_CONTIG
+ select V4L2_MEM2MEM_DEV
+ help
+ Support for the VPU found in Allwinner SoCs, also known as the Cedar
+ video engine.
+
+ To compile this driver as a module, choose M here: the module
+ will be called sunxi-cedrus.
diff --git a/drivers/staging/media/sunxi/cedrus/Makefile b/drivers/staging/media/sunxi/cedrus/Makefile
new file mode 100644
index 000000000000..e9dc68b7bcb6
--- /dev/null
+++ b/drivers/staging/media/sunxi/cedrus/Makefile
@@ -0,0 +1,3 @@
+obj-$(CONFIG_VIDEO_SUNXI_CEDRUS) += sunxi-cedrus.o
+
+sunxi-cedrus-y = cedrus.o cedrus_video.o cedrus_hw.o cedrus_dec.o cedrus_mpeg2.o
diff --git a/drivers/staging/media/sunxi/cedrus/TODO b/drivers/staging/media/sunxi/cedrus/TODO
new file mode 100644
index 000000000000..ec277ece47af
--- /dev/null
+++ b/drivers/staging/media/sunxi/cedrus/TODO
@@ -0,0 +1,7 @@
+Before this stateless decoder driver can leave the staging area:
+* The Request API needs to be stabilized;
+* The codec-specific controls need to be thoroughly reviewed to ensure they
+ cover all intended uses cases;
+* Userspace support for the Request API needs to be reviewed;
+* Another stateless decoder driver should be submitted;
+* At least one stateless encoder driver should be submitted.
diff --git a/drivers/staging/media/sunxi/cedrus/cedrus.c b/drivers/staging/media/sunxi/cedrus/cedrus.c
new file mode 100644
index 000000000000..82558455384a
--- /dev/null
+++ b/drivers/staging/media/sunxi/cedrus/cedrus.c
@@ -0,0 +1,431 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Cedrus VPU driver
+ *
+ * Copyright (C) 2016 Florent Revest <florent.revest@free-electrons.com>
+ * Copyright (C) 2018 Paul Kocialkowski <paul.kocialkowski@bootlin.com>
+ * Copyright (C) 2018 Bootlin
+ *
+ * Based on the vim2m driver, that is:
+ *
+ * Copyright (c) 2009-2010 Samsung Electronics Co., Ltd.
+ * Pawel Osciak, <pawel@osciak.com>
+ * Marek Szyprowski, <m.szyprowski@samsung.com>
+ */
+
+#include <linux/platform_device.h>
+#include <linux/module.h>
+#include <linux/of.h>
+
+#include <media/v4l2-device.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-mem2mem.h>
+
+#include "cedrus.h"
+#include "cedrus_video.h"
+#include "cedrus_dec.h"
+#include "cedrus_hw.h"
+
+static const struct cedrus_control cedrus_controls[] = {
+ {
+ .id = V4L2_CID_MPEG_VIDEO_MPEG2_SLICE_PARAMS,
+ .elem_size = sizeof(struct v4l2_ctrl_mpeg2_slice_params),
+ .codec = CEDRUS_CODEC_MPEG2,
+ .required = true,
+ },
+ {
+ .id = V4L2_CID_MPEG_VIDEO_MPEG2_QUANTIZATION,
+ .elem_size = sizeof(struct v4l2_ctrl_mpeg2_quantization),
+ .codec = CEDRUS_CODEC_MPEG2,
+ .required = false,
+ },
+};
+
+#define CEDRUS_CONTROLS_COUNT ARRAY_SIZE(cedrus_controls)
+
+void *cedrus_find_control_data(struct cedrus_ctx *ctx, u32 id)
+{
+ unsigned int i;
+
+ for (i = 0; ctx->ctrls[i]; i++)
+ if (ctx->ctrls[i]->id == id)
+ return ctx->ctrls[i]->p_cur.p;
+
+ return NULL;
+}
+
+static int cedrus_init_ctrls(struct cedrus_dev *dev, struct cedrus_ctx *ctx)
+{
+ struct v4l2_ctrl_handler *hdl = &ctx->hdl;
+ struct v4l2_ctrl *ctrl;
+ unsigned int ctrl_size;
+ unsigned int i;
+
+ v4l2_ctrl_handler_init(hdl, CEDRUS_CONTROLS_COUNT);
+ if (hdl->error) {
+ v4l2_err(&dev->v4l2_dev,
+ "Failed to initialize control handler\n");
+ return hdl->error;
+ }
+
+ ctrl_size = sizeof(ctrl) * CEDRUS_CONTROLS_COUNT + 1;
+
+ ctx->ctrls = kzalloc(ctrl_size, GFP_KERNEL);
+ memset(ctx->ctrls, 0, ctrl_size);
+
+ for (i = 0; i < CEDRUS_CONTROLS_COUNT; i++) {
+ struct v4l2_ctrl_config cfg = { 0 };
+
+ cfg.elem_size = cedrus_controls[i].elem_size;
+ cfg.id = cedrus_controls[i].id;
+
+ ctrl = v4l2_ctrl_new_custom(hdl, &cfg, NULL);
+ if (hdl->error) {
+ v4l2_err(&dev->v4l2_dev,
+ "Failed to create new custom control\n");
+
+ v4l2_ctrl_handler_free(hdl);
+ kfree(ctx->ctrls);
+ return hdl->error;
+ }
+
+ ctx->ctrls[i] = ctrl;
+ }
+
+ ctx->fh.ctrl_handler = hdl;
+ v4l2_ctrl_handler_setup(hdl);
+
+ return 0;
+}
+
+static int cedrus_request_validate(struct media_request *req)
+{
+ struct media_request_object *obj;
+ struct v4l2_ctrl_handler *parent_hdl, *hdl;
+ struct cedrus_ctx *ctx = NULL;
+ struct v4l2_ctrl *ctrl_test;
+ unsigned int count;
+ unsigned int i;
+
+ count = vb2_request_buffer_cnt(req);
+ if (!count) {
+ v4l2_info(&ctx->dev->v4l2_dev,
+ "No buffer was provided with the request\n");
+ return -ENOENT;
+ } else if (count > 1) {
+ v4l2_info(&ctx->dev->v4l2_dev,
+ "More than one buffer was provided with the request\n");
+ return -EINVAL;
+ }
+
+ list_for_each_entry(obj, &req->objects, list) {
+ struct vb2_buffer *vb;
+
+ if (vb2_request_object_is_buffer(obj)) {
+ vb = container_of(obj, struct vb2_buffer, req_obj);
+ ctx = vb2_get_drv_priv(vb->vb2_queue);
+
+ break;
+ }
+ }
+
+ if (!ctx)
+ return -ENOENT;
+
+ parent_hdl = &ctx->hdl;
+
+ hdl = v4l2_ctrl_request_hdl_find(req, parent_hdl);
+ if (!hdl) {
+ v4l2_info(&ctx->dev->v4l2_dev, "Missing codec control(s)\n");
+ return -ENOENT;
+ }
+
+ for (i = 0; i < CEDRUS_CONTROLS_COUNT; i++) {
+ if (cedrus_controls[i].codec != ctx->current_codec ||
+ !cedrus_controls[i].required)
+ continue;
+
+ ctrl_test = v4l2_ctrl_request_hdl_ctrl_find(hdl,
+ cedrus_controls[i].id);
+ if (!ctrl_test) {
+ v4l2_info(&ctx->dev->v4l2_dev,
+ "Missing required codec control\n");
+ return -ENOENT;
+ }
+ }
+
+ v4l2_ctrl_request_hdl_put(hdl);
+
+ return vb2_request_validate(req);
+}
+
+static int cedrus_open(struct file *file)
+{
+ struct cedrus_dev *dev = video_drvdata(file);
+ struct cedrus_ctx *ctx = NULL;
+ int ret;
+
+ if (mutex_lock_interruptible(&dev->dev_mutex))
+ return -ERESTARTSYS;
+
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx) {
+ mutex_unlock(&dev->dev_mutex);
+ return -ENOMEM;
+ }
+
+ v4l2_fh_init(&ctx->fh, video_devdata(file));
+ file->private_data = &ctx->fh;
+ ctx->dev = dev;
+
+ ret = cedrus_init_ctrls(dev, ctx);
+ if (ret)
+ goto err_free;
+
+ ctx->fh.m2m_ctx = v4l2_m2m_ctx_init(dev->m2m_dev, ctx,
+ &cedrus_queue_init);
+ if (IS_ERR(ctx->fh.m2m_ctx)) {
+ ret = PTR_ERR(ctx->fh.m2m_ctx);
+ goto err_ctrls;
+ }
+
+ v4l2_fh_add(&ctx->fh);
+
+ mutex_unlock(&dev->dev_mutex);
+
+ return 0;
+
+err_ctrls:
+ v4l2_ctrl_handler_free(&ctx->hdl);
+err_free:
+ kfree(ctx);
+ mutex_unlock(&dev->dev_mutex);
+
+ return ret;
+}
+
+static int cedrus_release(struct file *file)
+{
+ struct cedrus_dev *dev = video_drvdata(file);
+ struct cedrus_ctx *ctx = container_of(file->private_data,
+ struct cedrus_ctx, fh);
+
+ mutex_lock(&dev->dev_mutex);
+
+ v4l2_fh_del(&ctx->fh);
+ v4l2_m2m_ctx_release(ctx->fh.m2m_ctx);
+
+ v4l2_ctrl_handler_free(&ctx->hdl);
+ kfree(ctx->ctrls);
+
+ v4l2_fh_exit(&ctx->fh);
+
+ kfree(ctx);
+
+ mutex_unlock(&dev->dev_mutex);
+
+ return 0;
+}
+
+static const struct v4l2_file_operations cedrus_fops = {
+ .owner = THIS_MODULE,
+ .open = cedrus_open,
+ .release = cedrus_release,
+ .poll = v4l2_m2m_fop_poll,
+ .unlocked_ioctl = video_ioctl2,
+ .mmap = v4l2_m2m_fop_mmap,
+};
+
+static const struct video_device cedrus_video_device = {
+ .name = CEDRUS_NAME,
+ .vfl_dir = VFL_DIR_M2M,
+ .fops = &cedrus_fops,
+ .ioctl_ops = &cedrus_ioctl_ops,
+ .minor = -1,
+ .release = video_device_release_empty,
+ .device_caps = V4L2_CAP_VIDEO_M2M | V4L2_CAP_STREAMING,
+};
+
+static const struct v4l2_m2m_ops cedrus_m2m_ops = {
+ .device_run = cedrus_device_run,
+};
+
+static const struct media_device_ops cedrus_m2m_media_ops = {
+ .req_validate = cedrus_request_validate,
+ .req_queue = vb2_m2m_request_queue,
+};
+
+static int cedrus_probe(struct platform_device *pdev)
+{
+ struct cedrus_dev *dev;
+ struct video_device *vfd;
+ int ret;
+
+ dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL);
+ if (!dev)
+ return -ENOMEM;
+
+ dev->vfd = cedrus_video_device;
+ dev->dev = &pdev->dev;
+ dev->pdev = pdev;
+
+ ret = cedrus_hw_probe(dev);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to probe hardware\n");
+ return ret;
+ }
+
+ dev->dec_ops[CEDRUS_CODEC_MPEG2] = &cedrus_dec_ops_mpeg2;
+
+ mutex_init(&dev->dev_mutex);
+ spin_lock_init(&dev->irq_lock);
+
+ ret = v4l2_device_register(&pdev->dev, &dev->v4l2_dev);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to register V4L2 device\n");
+ return ret;
+ }
+
+ vfd = &dev->vfd;
+ vfd->lock = &dev->dev_mutex;
+ vfd->v4l2_dev = &dev->v4l2_dev;
+
+ snprintf(vfd->name, sizeof(vfd->name), "%s", cedrus_video_device.name);
+ video_set_drvdata(vfd, dev);
+
+ dev->m2m_dev = v4l2_m2m_init(&cedrus_m2m_ops);
+ if (IS_ERR(dev->m2m_dev)) {
+ v4l2_err(&dev->v4l2_dev,
+ "Failed to initialize V4L2 M2M device\n");
+ ret = PTR_ERR(dev->m2m_dev);
+
+ goto err_video;
+ }
+
+ dev->mdev.dev = &pdev->dev;
+ strscpy(dev->mdev.model, CEDRUS_NAME, sizeof(dev->mdev.model));
+
+ media_device_init(&dev->mdev);
+ dev->mdev.ops = &cedrus_m2m_media_ops;
+ dev->v4l2_dev.mdev = &dev->mdev;
+
+ ret = v4l2_m2m_register_media_controller(dev->m2m_dev, vfd,
+ MEDIA_ENT_F_PROC_VIDEO_DECODER);
+ if (ret) {
+ v4l2_err(&dev->v4l2_dev,
+ "Failed to initialize V4L2 M2M media controller\n");
+ goto err_m2m;
+ }
+
+ ret = video_register_device(vfd, VFL_TYPE_GRABBER, 0);
+ if (ret) {
+ v4l2_err(&dev->v4l2_dev, "Failed to register video device\n");
+ goto err_v4l2;
+ }
+
+ v4l2_info(&dev->v4l2_dev,
+ "Device registered as /dev/video%d\n", vfd->num);
+
+ ret = media_device_register(&dev->mdev);
+ if (ret) {
+ v4l2_err(&dev->v4l2_dev, "Failed to register media device\n");
+ goto err_m2m_mc;
+ }
+
+ platform_set_drvdata(pdev, dev);
+
+ return 0;
+
+err_m2m_mc:
+ v4l2_m2m_unregister_media_controller(dev->m2m_dev);
+err_m2m:
+ v4l2_m2m_release(dev->m2m_dev);
+err_video:
+ video_unregister_device(&dev->vfd);
+err_v4l2:
+ v4l2_device_unregister(&dev->v4l2_dev);
+
+ return ret;
+}
+
+static int cedrus_remove(struct platform_device *pdev)
+{
+ struct cedrus_dev *dev = platform_get_drvdata(pdev);
+
+ if (media_devnode_is_registered(dev->mdev.devnode)) {
+ media_device_unregister(&dev->mdev);
+ v4l2_m2m_unregister_media_controller(dev->m2m_dev);
+ media_device_cleanup(&dev->mdev);
+ }
+
+ v4l2_m2m_release(dev->m2m_dev);
+ video_unregister_device(&dev->vfd);
+ v4l2_device_unregister(&dev->v4l2_dev);
+
+ cedrus_hw_remove(dev);
+
+ return 0;
+}
+
+static const struct cedrus_variant sun4i_a10_cedrus_variant = {
+ /* No particular capability. */
+};
+
+static const struct cedrus_variant sun5i_a13_cedrus_variant = {
+ /* No particular capability. */
+};
+
+static const struct cedrus_variant sun7i_a20_cedrus_variant = {
+ /* No particular capability. */
+};
+
+static const struct cedrus_variant sun8i_a33_cedrus_variant = {
+ .capabilities = CEDRUS_CAPABILITY_UNTILED,
+};
+
+static const struct cedrus_variant sun8i_h3_cedrus_variant = {
+ .capabilities = CEDRUS_CAPABILITY_UNTILED,
+};
+
+static const struct of_device_id cedrus_dt_match[] = {
+ {
+ .compatible = "allwinner,sun4i-a10-video-engine",
+ .data = &sun4i_a10_cedrus_variant,
+ },
+ {
+ .compatible = "allwinner,sun5i-a13-video-engine",
+ .data = &sun5i_a13_cedrus_variant,
+ },
+ {
+ .compatible = "allwinner,sun7i-a20-video-engine",
+ .data = &sun7i_a20_cedrus_variant,
+ },
+ {
+ .compatible = "allwinner,sun8i-a33-video-engine",
+ .data = &sun8i_a33_cedrus_variant,
+ },
+ {
+ .compatible = "allwinner,sun8i-h3-video-engine",
+ .data = &sun8i_h3_cedrus_variant,
+ },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, cedrus_dt_match);
+
+static struct platform_driver cedrus_driver = {
+ .probe = cedrus_probe,
+ .remove = cedrus_remove,
+ .driver = {
+ .name = CEDRUS_NAME,
+ .owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(cedrus_dt_match),
+ },
+};
+module_platform_driver(cedrus_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Florent Revest <florent.revest@free-electrons.com>");
+MODULE_AUTHOR("Paul Kocialkowski <paul.kocialkowski@bootlin.com>");
+MODULE_AUTHOR("Maxime Ripard <maxime.ripard@bootlin.com>");
+MODULE_DESCRIPTION("Cedrus VPU driver");
diff --git a/drivers/staging/media/sunxi/cedrus/cedrus.h b/drivers/staging/media/sunxi/cedrus/cedrus.h
new file mode 100644
index 000000000000..3f61248c57ac
--- /dev/null
+++ b/drivers/staging/media/sunxi/cedrus/cedrus.h
@@ -0,0 +1,167 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Cedrus VPU driver
+ *
+ * Copyright (C) 2016 Florent Revest <florent.revest@free-electrons.com>
+ * Copyright (C) 2018 Paul Kocialkowski <paul.kocialkowski@bootlin.com>
+ * Copyright (C) 2018 Bootlin
+ *
+ * Based on the vim2m driver, that is:
+ *
+ * Copyright (c) 2009-2010 Samsung Electronics Co., Ltd.
+ * Pawel Osciak, <pawel@osciak.com>
+ * Marek Szyprowski, <m.szyprowski@samsung.com>
+ */
+
+#ifndef _CEDRUS_H_
+#define _CEDRUS_H_
+
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-mem2mem.h>
+#include <media/videobuf2-v4l2.h>
+#include <media/videobuf2-dma-contig.h>
+
+#include <linux/platform_device.h>
+
+#define CEDRUS_NAME "cedrus"
+
+#define CEDRUS_CAPABILITY_UNTILED BIT(0)
+
+enum cedrus_codec {
+ CEDRUS_CODEC_MPEG2,
+
+ CEDRUS_CODEC_LAST,
+};
+
+enum cedrus_irq_status {
+ CEDRUS_IRQ_NONE,
+ CEDRUS_IRQ_ERROR,
+ CEDRUS_IRQ_OK,
+};
+
+struct cedrus_control {
+ u32 id;
+ u32 elem_size;
+ enum cedrus_codec codec;
+ unsigned char required:1;
+};
+
+struct cedrus_mpeg2_run {
+ const struct v4l2_ctrl_mpeg2_slice_params *slice_params;
+ const struct v4l2_ctrl_mpeg2_quantization *quantization;
+};
+
+struct cedrus_run {
+ struct vb2_v4l2_buffer *src;
+ struct vb2_v4l2_buffer *dst;
+
+ union {
+ struct cedrus_mpeg2_run mpeg2;
+ };
+};
+
+struct cedrus_buffer {
+ struct v4l2_m2m_buffer m2m_buf;
+};
+
+struct cedrus_ctx {
+ struct v4l2_fh fh;
+ struct cedrus_dev *dev;
+
+ struct v4l2_pix_format src_fmt;
+ struct v4l2_pix_format dst_fmt;
+ enum cedrus_codec current_codec;
+
+ struct v4l2_ctrl_handler hdl;
+ struct v4l2_ctrl **ctrls;
+
+ struct vb2_buffer *dst_bufs[VIDEO_MAX_FRAME];
+};
+
+struct cedrus_dec_ops {
+ void (*irq_clear)(struct cedrus_ctx *ctx);
+ void (*irq_disable)(struct cedrus_ctx *ctx);
+ enum cedrus_irq_status (*irq_status)(struct cedrus_ctx *ctx);
+ void (*setup)(struct cedrus_ctx *ctx, struct cedrus_run *run);
+ int (*start)(struct cedrus_ctx *ctx);
+ void (*stop)(struct cedrus_ctx *ctx);
+ void (*trigger)(struct cedrus_ctx *ctx);
+};
+
+struct cedrus_variant {
+ unsigned int capabilities;
+};
+
+struct cedrus_dev {
+ struct v4l2_device v4l2_dev;
+ struct video_device vfd;
+ struct media_device mdev;
+ struct media_pad pad[2];
+ struct platform_device *pdev;
+ struct device *dev;
+ struct v4l2_m2m_dev *m2m_dev;
+ struct cedrus_dec_ops *dec_ops[CEDRUS_CODEC_LAST];
+
+ /* Device file mutex */
+ struct mutex dev_mutex;
+ /* Interrupt spinlock */
+ spinlock_t irq_lock;
+
+ void __iomem *base;
+
+ struct clk *mod_clk;
+ struct clk *ahb_clk;
+ struct clk *ram_clk;
+
+ struct reset_control *rstc;
+
+ unsigned int capabilities;
+};
+
+extern struct cedrus_dec_ops cedrus_dec_ops_mpeg2;
+
+static inline void cedrus_write(struct cedrus_dev *dev, u32 reg, u32 val)
+{
+ writel(val, dev->base + reg);
+}
+
+static inline u32 cedrus_read(struct cedrus_dev *dev, u32 reg)
+{
+ return readl(dev->base + reg);
+}
+
+static inline dma_addr_t cedrus_buf_addr(struct vb2_buffer *buf,
+ struct v4l2_pix_format *pix_fmt,
+ unsigned int plane)
+{
+ dma_addr_t addr = vb2_dma_contig_plane_dma_addr(buf, 0);
+
+ return addr + (pix_fmt ? (dma_addr_t)pix_fmt->bytesperline *
+ pix_fmt->height * plane : 0);
+}
+
+static inline dma_addr_t cedrus_dst_buf_addr(struct cedrus_ctx *ctx,
+ unsigned int index,
+ unsigned int plane)
+{
+ struct vb2_buffer *buf = ctx->dst_bufs[index];
+
+ return buf ? cedrus_buf_addr(buf, &ctx->dst_fmt, plane) : 0;
+}
+
+static inline struct cedrus_buffer *
+vb2_v4l2_to_cedrus_buffer(const struct vb2_v4l2_buffer *p)
+{
+ return container_of(p, struct cedrus_buffer, m2m_buf.vb);
+}
+
+static inline struct cedrus_buffer *
+vb2_to_cedrus_buffer(const struct vb2_buffer *p)
+{
+ return vb2_v4l2_to_cedrus_buffer(to_vb2_v4l2_buffer(p));
+}
+
+void *cedrus_find_control_data(struct cedrus_ctx *ctx, u32 id);
+
+#endif
diff --git a/drivers/staging/media/sunxi/cedrus/cedrus_dec.c b/drivers/staging/media/sunxi/cedrus/cedrus_dec.c
new file mode 100644
index 000000000000..e40180a33951
--- /dev/null
+++ b/drivers/staging/media/sunxi/cedrus/cedrus_dec.c
@@ -0,0 +1,70 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Cedrus VPU driver
+ *
+ * Copyright (C) 2016 Florent Revest <florent.revest@free-electrons.com>
+ * Copyright (C) 2018 Paul Kocialkowski <paul.kocialkowski@bootlin.com>
+ * Copyright (C) 2018 Bootlin
+ *
+ * Based on the vim2m driver, that is:
+ *
+ * Copyright (c) 2009-2010 Samsung Electronics Co., Ltd.
+ * Pawel Osciak, <pawel@osciak.com>
+ * Marek Szyprowski, <m.szyprowski@samsung.com>
+ */
+
+#include <media/v4l2-device.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-mem2mem.h>
+
+#include "cedrus.h"
+#include "cedrus_dec.h"
+#include "cedrus_hw.h"
+
+void cedrus_device_run(void *priv)
+{
+ struct cedrus_ctx *ctx = priv;
+ struct cedrus_dev *dev = ctx->dev;
+ struct cedrus_run run = { 0 };
+ struct media_request *src_req;
+ unsigned long flags;
+
+ run.src = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
+ run.dst = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx);
+
+ /* Apply request(s) controls if needed. */
+ src_req = run.src->vb2_buf.req_obj.req;
+
+ if (src_req)
+ v4l2_ctrl_request_setup(src_req, &ctx->hdl);
+
+ spin_lock_irqsave(&ctx->dev->irq_lock, flags);
+
+ switch (ctx->src_fmt.pixelformat) {
+ case V4L2_PIX_FMT_MPEG2_SLICE:
+ run.mpeg2.slice_params = cedrus_find_control_data(ctx,
+ V4L2_CID_MPEG_VIDEO_MPEG2_SLICE_PARAMS);
+ run.mpeg2.quantization = cedrus_find_control_data(ctx,
+ V4L2_CID_MPEG_VIDEO_MPEG2_QUANTIZATION);
+ break;
+
+ default:
+ break;
+ }
+
+ dev->dec_ops[ctx->current_codec]->setup(ctx, &run);
+
+ spin_unlock_irqrestore(&ctx->dev->irq_lock, flags);
+
+ /* Complete request(s) controls if needed. */
+
+ if (src_req)
+ v4l2_ctrl_request_complete(src_req, &ctx->hdl);
+
+ spin_lock_irqsave(&ctx->dev->irq_lock, flags);
+
+ dev->dec_ops[ctx->current_codec]->trigger(ctx);
+
+ spin_unlock_irqrestore(&ctx->dev->irq_lock, flags);
+}
diff --git a/drivers/staging/media/sunxi/cedrus/cedrus_dec.h b/drivers/staging/media/sunxi/cedrus/cedrus_dec.h
new file mode 100644
index 000000000000..4f423d3a1cad
--- /dev/null
+++ b/drivers/staging/media/sunxi/cedrus/cedrus_dec.h
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Cedrus VPU driver
+ *
+ * Copyright (C) 2016 Florent Revest <florent.revest@free-electrons.com>
+ * Copyright (C) 2018 Paul Kocialkowski <paul.kocialkowski@bootlin.com>
+ * Copyright (C) 2018 Bootlin
+ *
+ * Based on the vim2m driver, that is:
+ *
+ * Copyright (c) 2009-2010 Samsung Electronics Co., Ltd.
+ * Pawel Osciak, <pawel@osciak.com>
+ * Marek Szyprowski, <m.szyprowski@samsung.com>
+ */
+
+#ifndef _CEDRUS_DEC_H_
+#define _CEDRUS_DEC_H_
+
+extern const struct v4l2_ioctl_ops cedrus_ioctl_ops;
+
+void cedrus_device_work(struct work_struct *work);
+void cedrus_device_run(void *priv);
+
+int cedrus_queue_init(void *priv, struct vb2_queue *src_vq,
+ struct vb2_queue *dst_vq);
+
+#endif
diff --git a/drivers/staging/media/sunxi/cedrus/cedrus_hw.c b/drivers/staging/media/sunxi/cedrus/cedrus_hw.c
new file mode 100644
index 000000000000..32adbcbe6175
--- /dev/null
+++ b/drivers/staging/media/sunxi/cedrus/cedrus_hw.c
@@ -0,0 +1,327 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Cedrus VPU driver
+ *
+ * Copyright (C) 2016 Florent Revest <florent.revest@free-electrons.com>
+ * Copyright (C) 2018 Paul Kocialkowski <paul.kocialkowski@bootlin.com>
+ * Copyright (C) 2018 Bootlin
+ *
+ * Based on the vim2m driver, that is:
+ *
+ * Copyright (c) 2009-2010 Samsung Electronics Co., Ltd.
+ * Pawel Osciak, <pawel@osciak.com>
+ * Marek Szyprowski, <m.szyprowski@samsung.com>
+ */
+
+#include <linux/platform_device.h>
+#include <linux/of_reserved_mem.h>
+#include <linux/of_device.h>
+#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
+#include <linux/clk.h>
+#include <linux/regmap.h>
+#include <linux/reset.h>
+#include <linux/soc/sunxi/sunxi_sram.h>
+
+#include <media/videobuf2-core.h>
+#include <media/v4l2-mem2mem.h>
+
+#include "cedrus.h"
+#include "cedrus_hw.h"
+#include "cedrus_regs.h"
+
+int cedrus_engine_enable(struct cedrus_dev *dev, enum cedrus_codec codec)
+{
+ u32 reg = 0;
+
+ /*
+ * FIXME: This is only valid on 32-bits DDR's, we should test
+ * it on the A13/A33.
+ */
+ reg |= VE_MODE_REC_WR_MODE_2MB;
+ reg |= VE_MODE_DDR_MODE_BW_128;
+
+ switch (codec) {
+ case CEDRUS_CODEC_MPEG2:
+ reg |= VE_MODE_DEC_MPEG;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ cedrus_write(dev, VE_MODE, reg);
+
+ return 0;
+}
+
+void cedrus_engine_disable(struct cedrus_dev *dev)
+{
+ cedrus_write(dev, VE_MODE, VE_MODE_DISABLED);
+}
+
+void cedrus_dst_format_set(struct cedrus_dev *dev,
+ struct v4l2_pix_format *fmt)
+{
+ unsigned int width = fmt->width;
+ unsigned int height = fmt->height;
+ u32 chroma_size;
+ u32 reg;
+
+ switch (fmt->pixelformat) {
+ case V4L2_PIX_FMT_NV12:
+ chroma_size = ALIGN(width, 16) * ALIGN(height, 16) / 2;
+
+ reg = VE_PRIMARY_OUT_FMT_NV12;
+ cedrus_write(dev, VE_PRIMARY_OUT_FMT, reg);
+
+ reg = VE_CHROMA_BUF_LEN_SDRT(chroma_size / 2);
+ cedrus_write(dev, VE_CHROMA_BUF_LEN, reg);
+
+ reg = chroma_size / 2;
+ cedrus_write(dev, VE_PRIMARY_CHROMA_BUF_LEN, reg);
+
+ reg = VE_PRIMARY_FB_LINE_STRIDE_LUMA(ALIGN(width, 16)) |
+ VE_PRIMARY_FB_LINE_STRIDE_CHROMA(ALIGN(width, 16) / 2);
+ cedrus_write(dev, VE_PRIMARY_FB_LINE_STRIDE, reg);
+
+ break;
+ case V4L2_PIX_FMT_SUNXI_TILED_NV12:
+ default:
+ reg = VE_PRIMARY_OUT_FMT_TILED_32_NV12;
+ cedrus_write(dev, VE_PRIMARY_OUT_FMT, reg);
+
+ reg = VE_SECONDARY_OUT_FMT_TILED_32_NV12;
+ cedrus_write(dev, VE_CHROMA_BUF_LEN, reg);
+
+ break;
+ }
+}
+
+static irqreturn_t cedrus_bh(int irq, void *data)
+{
+ struct cedrus_dev *dev = data;
+ struct cedrus_ctx *ctx;
+
+ ctx = v4l2_m2m_get_curr_priv(dev->m2m_dev);
+ if (!ctx) {
+ v4l2_err(&dev->v4l2_dev,
+ "Instance released before the end of transaction\n");
+ return IRQ_HANDLED;
+ }
+
+ v4l2_m2m_job_finish(ctx->dev->m2m_dev, ctx->fh.m2m_ctx);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t cedrus_irq(int irq, void *data)
+{
+ struct cedrus_dev *dev = data;
+ struct cedrus_ctx *ctx;
+ struct vb2_v4l2_buffer *src_buf, *dst_buf;
+ enum vb2_buffer_state state;
+ enum cedrus_irq_status status;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->irq_lock, flags);
+
+ ctx = v4l2_m2m_get_curr_priv(dev->m2m_dev);
+ if (!ctx) {
+ v4l2_err(&dev->v4l2_dev,
+ "Instance released before the end of transaction\n");
+ spin_unlock_irqrestore(&dev->irq_lock, flags);
+
+ return IRQ_NONE;
+ }
+
+ status = dev->dec_ops[ctx->current_codec]->irq_status(ctx);
+ if (status == CEDRUS_IRQ_NONE) {
+ spin_unlock_irqrestore(&dev->irq_lock, flags);
+ return IRQ_NONE;
+ }
+
+ dev->dec_ops[ctx->current_codec]->irq_disable(ctx);
+ dev->dec_ops[ctx->current_codec]->irq_clear(ctx);
+
+ src_buf = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
+ dst_buf = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);
+
+ if (!src_buf || !dst_buf) {
+ v4l2_err(&dev->v4l2_dev,
+ "Missing source and/or destination buffers\n");
+ spin_unlock_irqrestore(&dev->irq_lock, flags);
+
+ return IRQ_HANDLED;
+ }
+
+ if (status == CEDRUS_IRQ_ERROR)
+ state = VB2_BUF_STATE_ERROR;
+ else
+ state = VB2_BUF_STATE_DONE;
+
+ v4l2_m2m_buf_done(src_buf, state);
+ v4l2_m2m_buf_done(dst_buf, state);
+
+ spin_unlock_irqrestore(&dev->irq_lock, flags);
+
+ return IRQ_WAKE_THREAD;
+}
+
+int cedrus_hw_probe(struct cedrus_dev *dev)
+{
+ const struct cedrus_variant *variant;
+ struct resource *res;
+ int irq_dec;
+ int ret;
+
+ variant = of_device_get_match_data(dev->dev);
+ if (!variant)
+ return -EINVAL;
+
+ dev->capabilities = variant->capabilities;
+
+ irq_dec = platform_get_irq(dev->pdev, 0);
+ if (irq_dec <= 0) {
+ v4l2_err(&dev->v4l2_dev, "Failed to get IRQ\n");
+
+ return irq_dec;
+ }
+ ret = devm_request_threaded_irq(dev->dev, irq_dec, cedrus_irq,
+ cedrus_bh, 0, dev_name(dev->dev),
+ dev);
+ if (ret) {
+ v4l2_err(&dev->v4l2_dev, "Failed to request IRQ\n");
+
+ return ret;
+ }
+
+ /*
+ * The VPU is only able to handle bus addresses so we have to subtract
+ * the RAM offset to the physcal addresses.
+ *
+ * This information will eventually be obtained from device-tree.
+ */
+
+#ifdef PHYS_PFN_OFFSET
+ dev->dev->dma_pfn_offset = PHYS_PFN_OFFSET;
+#endif
+
+ ret = of_reserved_mem_device_init(dev->dev);
+ if (ret && ret != -ENODEV) {
+ v4l2_err(&dev->v4l2_dev, "Failed to reserve memory\n");
+
+ return ret;
+ }
+
+ ret = sunxi_sram_claim(dev->dev);
+ if (ret) {
+ v4l2_err(&dev->v4l2_dev, "Failed to claim SRAM\n");
+
+ goto err_mem;
+ }
+
+ dev->ahb_clk = devm_clk_get(dev->dev, "ahb");
+ if (IS_ERR(dev->ahb_clk)) {
+ v4l2_err(&dev->v4l2_dev, "Failed to get AHB clock\n");
+
+ ret = PTR_ERR(dev->ahb_clk);
+ goto err_sram;
+ }
+
+ dev->mod_clk = devm_clk_get(dev->dev, "mod");
+ if (IS_ERR(dev->mod_clk)) {
+ v4l2_err(&dev->v4l2_dev, "Failed to get MOD clock\n");
+
+ ret = PTR_ERR(dev->mod_clk);
+ goto err_sram;
+ }
+
+ dev->ram_clk = devm_clk_get(dev->dev, "ram");
+ if (IS_ERR(dev->ram_clk)) {
+ v4l2_err(&dev->v4l2_dev, "Failed to get RAM clock\n");
+
+ ret = PTR_ERR(dev->ram_clk);
+ goto err_sram;
+ }
+
+ dev->rstc = devm_reset_control_get(dev->dev, NULL);
+ if (IS_ERR(dev->rstc)) {
+ v4l2_err(&dev->v4l2_dev, "Failed to get reset control\n");
+
+ ret = PTR_ERR(dev->rstc);
+ goto err_sram;
+ }
+
+ res = platform_get_resource(dev->pdev, IORESOURCE_MEM, 0);
+ dev->base = devm_ioremap_resource(dev->dev, res);
+ if (!dev->base) {
+ v4l2_err(&dev->v4l2_dev, "Failed to map registers\n");
+
+ ret = -ENOMEM;
+ goto err_sram;
+ }
+
+ ret = clk_set_rate(dev->mod_clk, CEDRUS_CLOCK_RATE_DEFAULT);
+ if (ret) {
+ v4l2_err(&dev->v4l2_dev, "Failed to set clock rate\n");
+
+ goto err_sram;
+ }
+
+ ret = clk_prepare_enable(dev->ahb_clk);
+ if (ret) {
+ v4l2_err(&dev->v4l2_dev, "Failed to enable AHB clock\n");
+
+ goto err_sram;
+ }
+
+ ret = clk_prepare_enable(dev->mod_clk);
+ if (ret) {
+ v4l2_err(&dev->v4l2_dev, "Failed to enable MOD clock\n");
+
+ goto err_ahb_clk;
+ }
+
+ ret = clk_prepare_enable(dev->ram_clk);
+ if (ret) {
+ v4l2_err(&dev->v4l2_dev, "Failed to enable RAM clock\n");
+
+ goto err_mod_clk;
+ }
+
+ ret = reset_control_reset(dev->rstc);
+ if (ret) {
+ v4l2_err(&dev->v4l2_dev, "Failed to apply reset\n");
+
+ goto err_ram_clk;
+ }
+
+ return 0;
+
+err_ram_clk:
+ clk_disable_unprepare(dev->ram_clk);
+err_mod_clk:
+ clk_disable_unprepare(dev->mod_clk);
+err_ahb_clk:
+ clk_disable_unprepare(dev->ahb_clk);
+err_sram:
+ sunxi_sram_release(dev->dev);
+err_mem:
+ of_reserved_mem_device_release(dev->dev);
+
+ return ret;
+}
+
+void cedrus_hw_remove(struct cedrus_dev *dev)
+{
+ reset_control_assert(dev->rstc);
+
+ clk_disable_unprepare(dev->ram_clk);
+ clk_disable_unprepare(dev->mod_clk);
+ clk_disable_unprepare(dev->ahb_clk);
+
+ sunxi_sram_release(dev->dev);
+
+ of_reserved_mem_device_release(dev->dev);
+}
diff --git a/drivers/staging/media/sunxi/cedrus/cedrus_hw.h b/drivers/staging/media/sunxi/cedrus/cedrus_hw.h
new file mode 100644
index 000000000000..b43c77d54b95
--- /dev/null
+++ b/drivers/staging/media/sunxi/cedrus/cedrus_hw.h
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Cedrus VPU driver
+ *
+ * Copyright (C) 2016 Florent Revest <florent.revest@free-electrons.com>
+ * Copyright (C) 2018 Paul Kocialkowski <paul.kocialkowski@bootlin.com>
+ * Copyright (C) 2018 Bootlin
+ *
+ * Based on the vim2m driver, that is:
+ *
+ * Copyright (c) 2009-2010 Samsung Electronics Co., Ltd.
+ * Pawel Osciak, <pawel@osciak.com>
+ * Marek Szyprowski, <m.szyprowski@samsung.com>
+ */
+
+#ifndef _CEDRUS_HW_H_
+#define _CEDRUS_HW_H_
+
+#define CEDRUS_CLOCK_RATE_DEFAULT 320000000
+
+int cedrus_engine_enable(struct cedrus_dev *dev, enum cedrus_codec codec);
+void cedrus_engine_disable(struct cedrus_dev *dev);
+
+void cedrus_dst_format_set(struct cedrus_dev *dev,
+ struct v4l2_pix_format *fmt);
+
+int cedrus_hw_probe(struct cedrus_dev *dev);
+void cedrus_hw_remove(struct cedrus_dev *dev);
+
+#endif
diff --git a/drivers/staging/media/sunxi/cedrus/cedrus_mpeg2.c b/drivers/staging/media/sunxi/cedrus/cedrus_mpeg2.c
new file mode 100644
index 000000000000..9abd39cae38c
--- /dev/null
+++ b/drivers/staging/media/sunxi/cedrus/cedrus_mpeg2.c
@@ -0,0 +1,246 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Cedrus VPU driver
+ *
+ * Copyright (C) 2016 Florent Revest <florent.revest@free-electrons.com>
+ * Copyright (C) 2018 Paul Kocialkowski <paul.kocialkowski@bootlin.com>
+ * Copyright (C) 2018 Bootlin
+ */
+
+#include <media/videobuf2-dma-contig.h>
+
+#include "cedrus.h"
+#include "cedrus_hw.h"
+#include "cedrus_regs.h"
+
+/* Default MPEG-2 quantization coefficients, from the specification. */
+
+static const u8 intra_quantization_matrix_default[64] = {
+ 8, 16, 16, 19, 16, 19, 22, 22,
+ 22, 22, 22, 22, 26, 24, 26, 27,
+ 27, 27, 26, 26, 26, 26, 27, 27,
+ 27, 29, 29, 29, 34, 34, 34, 29,
+ 29, 29, 27, 27, 29, 29, 32, 32,
+ 34, 34, 37, 38, 37, 35, 35, 34,
+ 35, 38, 38, 40, 40, 40, 48, 48,
+ 46, 46, 56, 56, 58, 69, 69, 83
+};
+
+static const u8 non_intra_quantization_matrix_default[64] = {
+ 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16
+};
+
+static enum cedrus_irq_status cedrus_mpeg2_irq_status(struct cedrus_ctx *ctx)
+{
+ struct cedrus_dev *dev = ctx->dev;
+ u32 reg;
+
+ reg = cedrus_read(dev, VE_DEC_MPEG_STATUS);
+ reg &= VE_DEC_MPEG_STATUS_CHECK_MASK;
+
+ if (!reg)
+ return CEDRUS_IRQ_NONE;
+
+ if (reg & VE_DEC_MPEG_STATUS_CHECK_ERROR ||
+ !(reg & VE_DEC_MPEG_STATUS_SUCCESS))
+ return CEDRUS_IRQ_ERROR;
+
+ return CEDRUS_IRQ_OK;
+}
+
+static void cedrus_mpeg2_irq_clear(struct cedrus_ctx *ctx)
+{
+ struct cedrus_dev *dev = ctx->dev;
+
+ cedrus_write(dev, VE_DEC_MPEG_STATUS, VE_DEC_MPEG_STATUS_CHECK_MASK);
+}
+
+static void cedrus_mpeg2_irq_disable(struct cedrus_ctx *ctx)
+{
+ struct cedrus_dev *dev = ctx->dev;
+ u32 reg = cedrus_read(dev, VE_DEC_MPEG_CTRL);
+
+ reg &= ~VE_DEC_MPEG_CTRL_IRQ_MASK;
+
+ cedrus_write(dev, VE_DEC_MPEG_CTRL, reg);
+}
+
+static void cedrus_mpeg2_setup(struct cedrus_ctx *ctx, struct cedrus_run *run)
+{
+ const struct v4l2_ctrl_mpeg2_slice_params *slice_params;
+ const struct v4l2_mpeg2_sequence *sequence;
+ const struct v4l2_mpeg2_picture *picture;
+ const struct v4l2_ctrl_mpeg2_quantization *quantization;
+ dma_addr_t src_buf_addr, dst_luma_addr, dst_chroma_addr;
+ dma_addr_t fwd_luma_addr, fwd_chroma_addr;
+ dma_addr_t bwd_luma_addr, bwd_chroma_addr;
+ struct cedrus_dev *dev = ctx->dev;
+ const u8 *matrix;
+ unsigned int i;
+ u32 reg;
+
+ slice_params = run->mpeg2.slice_params;
+ sequence = &slice_params->sequence;
+ picture = &slice_params->picture;
+
+ quantization = run->mpeg2.quantization;
+
+ /* Activate MPEG engine. */
+ cedrus_engine_enable(dev, CEDRUS_CODEC_MPEG2);
+
+ /* Set intra quantization matrix. */
+
+ if (quantization && quantization->load_intra_quantiser_matrix)
+ matrix = quantization->intra_quantiser_matrix;
+ else
+ matrix = intra_quantization_matrix_default;
+
+ for (i = 0; i < 64; i++) {
+ reg = VE_DEC_MPEG_IQMINPUT_WEIGHT(i, matrix[i]);
+ reg |= VE_DEC_MPEG_IQMINPUT_FLAG_INTRA;
+
+ cedrus_write(dev, VE_DEC_MPEG_IQMINPUT, reg);
+ }
+
+ /* Set non-intra quantization matrix. */
+
+ if (quantization && quantization->load_non_intra_quantiser_matrix)
+ matrix = quantization->non_intra_quantiser_matrix;
+ else
+ matrix = non_intra_quantization_matrix_default;
+
+ for (i = 0; i < 64; i++) {
+ reg = VE_DEC_MPEG_IQMINPUT_WEIGHT(i, matrix[i]);
+ reg |= VE_DEC_MPEG_IQMINPUT_FLAG_NON_INTRA;
+
+ cedrus_write(dev, VE_DEC_MPEG_IQMINPUT, reg);
+ }
+
+ /* Set MPEG picture header. */
+
+ reg = VE_DEC_MPEG_MP12HDR_SLICE_TYPE(picture->picture_coding_type);
+ reg |= VE_DEC_MPEG_MP12HDR_F_CODE(0, 0, picture->f_code[0][0]);
+ reg |= VE_DEC_MPEG_MP12HDR_F_CODE(0, 1, picture->f_code[0][1]);
+ reg |= VE_DEC_MPEG_MP12HDR_F_CODE(1, 0, picture->f_code[1][0]);
+ reg |= VE_DEC_MPEG_MP12HDR_F_CODE(1, 1, picture->f_code[1][1]);
+ reg |= VE_DEC_MPEG_MP12HDR_INTRA_DC_PRECISION(picture->intra_dc_precision);
+ reg |= VE_DEC_MPEG_MP12HDR_INTRA_PICTURE_STRUCTURE(picture->picture_structure);
+ reg |= VE_DEC_MPEG_MP12HDR_TOP_FIELD_FIRST(picture->top_field_first);
+ reg |= VE_DEC_MPEG_MP12HDR_FRAME_PRED_FRAME_DCT(picture->frame_pred_frame_dct);
+ reg |= VE_DEC_MPEG_MP12HDR_CONCEALMENT_MOTION_VECTORS(picture->concealment_motion_vectors);
+ reg |= VE_DEC_MPEG_MP12HDR_Q_SCALE_TYPE(picture->q_scale_type);
+ reg |= VE_DEC_MPEG_MP12HDR_INTRA_VLC_FORMAT(picture->intra_vlc_format);
+ reg |= VE_DEC_MPEG_MP12HDR_ALTERNATE_SCAN(picture->alternate_scan);
+ reg |= VE_DEC_MPEG_MP12HDR_FULL_PEL_FORWARD_VECTOR(0);
+ reg |= VE_DEC_MPEG_MP12HDR_FULL_PEL_BACKWARD_VECTOR(0);
+
+ cedrus_write(dev, VE_DEC_MPEG_MP12HDR, reg);
+
+ /* Set frame dimensions. */
+
+ reg = VE_DEC_MPEG_PICCODEDSIZE_WIDTH(sequence->horizontal_size);
+ reg |= VE_DEC_MPEG_PICCODEDSIZE_HEIGHT(sequence->vertical_size);
+
+ cedrus_write(dev, VE_DEC_MPEG_PICCODEDSIZE, reg);
+
+ reg = VE_DEC_MPEG_PICBOUNDSIZE_WIDTH(ctx->src_fmt.width);
+ reg |= VE_DEC_MPEG_PICBOUNDSIZE_HEIGHT(ctx->src_fmt.height);
+
+ cedrus_write(dev, VE_DEC_MPEG_PICBOUNDSIZE, reg);
+
+ /* Forward and backward prediction reference buffers. */
+
+ fwd_luma_addr = cedrus_dst_buf_addr(ctx,
+ slice_params->forward_ref_index,
+ 0);
+ fwd_chroma_addr = cedrus_dst_buf_addr(ctx,
+ slice_params->forward_ref_index,
+ 1);
+
+ cedrus_write(dev, VE_DEC_MPEG_FWD_REF_LUMA_ADDR, fwd_luma_addr);
+ cedrus_write(dev, VE_DEC_MPEG_FWD_REF_CHROMA_ADDR, fwd_chroma_addr);
+
+ bwd_luma_addr = cedrus_dst_buf_addr(ctx,
+ slice_params->backward_ref_index,
+ 0);
+ bwd_chroma_addr = cedrus_dst_buf_addr(ctx,
+ slice_params->backward_ref_index,
+ 1);
+
+ cedrus_write(dev, VE_DEC_MPEG_BWD_REF_LUMA_ADDR, bwd_luma_addr);
+ cedrus_write(dev, VE_DEC_MPEG_BWD_REF_CHROMA_ADDR, bwd_chroma_addr);
+
+ /* Destination luma and chroma buffers. */
+
+ dst_luma_addr = cedrus_dst_buf_addr(ctx, run->dst->vb2_buf.index, 0);
+ dst_chroma_addr = cedrus_dst_buf_addr(ctx, run->dst->vb2_buf.index, 1);
+
+ cedrus_write(dev, VE_DEC_MPEG_REC_LUMA, dst_luma_addr);
+ cedrus_write(dev, VE_DEC_MPEG_REC_CHROMA, dst_chroma_addr);
+
+ /* Source offset and length in bits. */
+
+ cedrus_write(dev, VE_DEC_MPEG_VLD_OFFSET,
+ slice_params->data_bit_offset);
+
+ reg = slice_params->bit_size - slice_params->data_bit_offset;
+ cedrus_write(dev, VE_DEC_MPEG_VLD_LEN, reg);
+
+ /* Source beginning and end addresses. */
+
+ src_buf_addr = vb2_dma_contig_plane_dma_addr(&run->src->vb2_buf, 0);
+
+ reg = VE_DEC_MPEG_VLD_ADDR_BASE(src_buf_addr);
+ reg |= VE_DEC_MPEG_VLD_ADDR_VALID_PIC_DATA;
+ reg |= VE_DEC_MPEG_VLD_ADDR_LAST_PIC_DATA;
+ reg |= VE_DEC_MPEG_VLD_ADDR_FIRST_PIC_DATA;
+
+ cedrus_write(dev, VE_DEC_MPEG_VLD_ADDR, reg);
+
+ reg = src_buf_addr + DIV_ROUND_UP(slice_params->bit_size, 8);
+ cedrus_write(dev, VE_DEC_MPEG_VLD_END_ADDR, reg);
+
+ /* Macroblock address: start at the beginning. */
+ reg = VE_DEC_MPEG_MBADDR_Y(0) | VE_DEC_MPEG_MBADDR_X(0);
+ cedrus_write(dev, VE_DEC_MPEG_MBADDR, reg);
+
+ /* Clear previous errors. */
+ cedrus_write(dev, VE_DEC_MPEG_ERROR, 0);
+
+ /* Clear correct macroblocks register. */
+ cedrus_write(dev, VE_DEC_MPEG_CRTMBADDR, 0);
+
+ /* Enable appropriate interruptions and components. */
+
+ reg = VE_DEC_MPEG_CTRL_IRQ_MASK | VE_DEC_MPEG_CTRL_MC_NO_WRITEBACK |
+ VE_DEC_MPEG_CTRL_MC_CACHE_EN;
+
+ cedrus_write(dev, VE_DEC_MPEG_CTRL, reg);
+}
+
+static void cedrus_mpeg2_trigger(struct cedrus_ctx *ctx)
+{
+ struct cedrus_dev *dev = ctx->dev;
+ u32 reg;
+
+ /* Trigger MPEG engine. */
+ reg = VE_DEC_MPEG_TRIGGER_HW_MPEG_VLD | VE_DEC_MPEG_TRIGGER_MPEG2 |
+ VE_DEC_MPEG_TRIGGER_MB_BOUNDARY;
+
+ cedrus_write(dev, VE_DEC_MPEG_TRIGGER, reg);
+}
+
+struct cedrus_dec_ops cedrus_dec_ops_mpeg2 = {
+ .irq_clear = cedrus_mpeg2_irq_clear,
+ .irq_disable = cedrus_mpeg2_irq_disable,
+ .irq_status = cedrus_mpeg2_irq_status,
+ .setup = cedrus_mpeg2_setup,
+ .trigger = cedrus_mpeg2_trigger,
+};
diff --git a/drivers/staging/media/sunxi/cedrus/cedrus_regs.h b/drivers/staging/media/sunxi/cedrus/cedrus_regs.h
new file mode 100644
index 000000000000..de2d6b6f64bf
--- /dev/null
+++ b/drivers/staging/media/sunxi/cedrus/cedrus_regs.h
@@ -0,0 +1,235 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Cedrus VPU driver
+ *
+ * Copyright (c) 2013-2016 Jens Kuske <jenskuske@gmail.com>
+ * Copyright (C) 2016 Florent Revest <florent.revest@free-electrons.com>
+ * Copyright (C) 2018 Paul Kocialkowski <paul.kocialkowski@bootlin.com>
+ */
+
+#ifndef _CEDRUS_REGS_H_
+#define _CEDRUS_REGS_H_
+
+/*
+ * Common acronyms and contractions used in register descriptions:
+ * * VLD : Variable-Length Decoder
+ * * IQ: Inverse Quantization
+ * * IDCT: Inverse Discrete Cosine Transform
+ * * MC: Motion Compensation
+ * * STCD: Start Code Detect
+ * * SDRT: Scale Down and Rotate
+ */
+
+#define VE_ENGINE_DEC_MPEG 0x100
+#define VE_ENGINE_DEC_H264 0x200
+
+#define VE_MODE 0x00
+
+#define VE_MODE_REC_WR_MODE_2MB (0x01 << 20)
+#define VE_MODE_REC_WR_MODE_1MB (0x00 << 20)
+#define VE_MODE_DDR_MODE_BW_128 (0x03 << 16)
+#define VE_MODE_DDR_MODE_BW_256 (0x02 << 16)
+#define VE_MODE_DISABLED (0x07 << 0)
+#define VE_MODE_DEC_H265 (0x04 << 0)
+#define VE_MODE_DEC_H264 (0x01 << 0)
+#define VE_MODE_DEC_MPEG (0x00 << 0)
+
+#define VE_PRIMARY_CHROMA_BUF_LEN 0xc4
+#define VE_PRIMARY_FB_LINE_STRIDE 0xc8
+
+#define VE_PRIMARY_FB_LINE_STRIDE_CHROMA(s) (((s) << 16) & GENMASK(31, 16))
+#define VE_PRIMARY_FB_LINE_STRIDE_LUMA(s) (((s) << 0) & GENMASK(15, 0))
+
+#define VE_CHROMA_BUF_LEN 0xe8
+
+#define VE_SECONDARY_OUT_FMT_TILED_32_NV12 (0x00 << 30)
+#define VE_SECONDARY_OUT_FMT_EXT (0x01 << 30)
+#define VE_SECONDARY_OUT_FMT_YU12 (0x02 << 30)
+#define VE_SECONDARY_OUT_FMT_YV12 (0x03 << 30)
+#define VE_CHROMA_BUF_LEN_SDRT(l) ((l) & GENMASK(27, 0))
+
+#define VE_PRIMARY_OUT_FMT 0xec
+
+#define VE_PRIMARY_OUT_FMT_TILED_32_NV12 (0x00 << 4)
+#define VE_PRIMARY_OUT_FMT_TILED_128_NV12 (0x01 << 4)
+#define VE_PRIMARY_OUT_FMT_YU12 (0x02 << 4)
+#define VE_PRIMARY_OUT_FMT_YV12 (0x03 << 4)
+#define VE_PRIMARY_OUT_FMT_NV12 (0x04 << 4)
+#define VE_PRIMARY_OUT_FMT_NV21 (0x05 << 4)
+#define VE_SECONDARY_OUT_FMT_EXT_TILED_32_NV12 (0x00 << 0)
+#define VE_SECONDARY_OUT_FMT_EXT_TILED_128_NV12 (0x01 << 0)
+#define VE_SECONDARY_OUT_FMT_EXT_YU12 (0x02 << 0)
+#define VE_SECONDARY_OUT_FMT_EXT_YV12 (0x03 << 0)
+#define VE_SECONDARY_OUT_FMT_EXT_NV12 (0x04 << 0)
+#define VE_SECONDARY_OUT_FMT_EXT_NV21 (0x05 << 0)
+
+#define VE_VERSION 0xf0
+
+#define VE_VERSION_SHIFT 16
+
+#define VE_DEC_MPEG_MP12HDR (VE_ENGINE_DEC_MPEG + 0x00)
+
+#define VE_DEC_MPEG_MP12HDR_SLICE_TYPE(t) (((t) << 28) & GENMASK(30, 28))
+#define VE_DEC_MPEG_MP12HDR_F_CODE_SHIFT(x, y) (24 - 4 * (y) - 8 * (x))
+#define VE_DEC_MPEG_MP12HDR_F_CODE(__x, __y, __v) \
+ (((__v) & GENMASK(3, 0)) << VE_DEC_MPEG_MP12HDR_F_CODE_SHIFT(__x, __y))
+
+#define VE_DEC_MPEG_MP12HDR_INTRA_DC_PRECISION(p) \
+ (((p) << 10) & GENMASK(11, 10))
+#define VE_DEC_MPEG_MP12HDR_INTRA_PICTURE_STRUCTURE(s) \
+ (((s) << 8) & GENMASK(9, 8))
+#define VE_DEC_MPEG_MP12HDR_TOP_FIELD_FIRST(v) \
+ ((v) ? BIT(7) : 0)
+#define VE_DEC_MPEG_MP12HDR_FRAME_PRED_FRAME_DCT(v) \
+ ((v) ? BIT(6) : 0)
+#define VE_DEC_MPEG_MP12HDR_CONCEALMENT_MOTION_VECTORS(v) \
+ ((v) ? BIT(5) : 0)
+#define VE_DEC_MPEG_MP12HDR_Q_SCALE_TYPE(v) \
+ ((v) ? BIT(4) : 0)
+#define VE_DEC_MPEG_MP12HDR_INTRA_VLC_FORMAT(v) \
+ ((v) ? BIT(3) : 0)
+#define VE_DEC_MPEG_MP12HDR_ALTERNATE_SCAN(v) \
+ ((v) ? BIT(2) : 0)
+#define VE_DEC_MPEG_MP12HDR_FULL_PEL_FORWARD_VECTOR(v) \
+ ((v) ? BIT(1) : 0)
+#define VE_DEC_MPEG_MP12HDR_FULL_PEL_BACKWARD_VECTOR(v) \
+ ((v) ? BIT(0) : 0)
+
+#define VE_DEC_MPEG_PICCODEDSIZE (VE_ENGINE_DEC_MPEG + 0x08)
+
+#define VE_DEC_MPEG_PICCODEDSIZE_WIDTH(w) \
+ ((DIV_ROUND_UP((w), 16) << 8) & GENMASK(15, 8))
+#define VE_DEC_MPEG_PICCODEDSIZE_HEIGHT(h) \
+ ((DIV_ROUND_UP((h), 16) << 0) & GENMASK(7, 0))
+
+#define VE_DEC_MPEG_PICBOUNDSIZE (VE_ENGINE_DEC_MPEG + 0x0c)
+
+#define VE_DEC_MPEG_PICBOUNDSIZE_WIDTH(w) (((w) << 16) & GENMASK(27, 16))
+#define VE_DEC_MPEG_PICBOUNDSIZE_HEIGHT(h) (((h) << 0) & GENMASK(11, 0))
+
+#define VE_DEC_MPEG_MBADDR (VE_ENGINE_DEC_MPEG + 0x10)
+
+#define VE_DEC_MPEG_MBADDR_X(w) (((w) << 8) & GENMASK(15, 8))
+#define VE_DEC_MPEG_MBADDR_Y(h) (((h) << 0) & GENMASK(0, 7))
+
+#define VE_DEC_MPEG_CTRL (VE_ENGINE_DEC_MPEG + 0x14)
+
+#define VE_DEC_MPEG_CTRL_MC_CACHE_EN BIT(31)
+#define VE_DEC_MPEG_CTRL_SW_VLD BIT(27)
+#define VE_DEC_MPEG_CTRL_SW_IQ_IS BIT(17)
+#define VE_DEC_MPEG_CTRL_QP_AC_DC_OUT_EN BIT(14)
+#define VE_DEC_MPEG_CTRL_ROTATE_SCALE_OUT_EN BIT(8)
+#define VE_DEC_MPEG_CTRL_MC_NO_WRITEBACK BIT(7)
+#define VE_DEC_MPEG_CTRL_ROTATE_IRQ_EN BIT(6)
+#define VE_DEC_MPEG_CTRL_VLD_DATA_REQ_IRQ_EN BIT(5)
+#define VE_DEC_MPEG_CTRL_ERROR_IRQ_EN BIT(4)
+#define VE_DEC_MPEG_CTRL_FINISH_IRQ_EN BIT(3)
+#define VE_DEC_MPEG_CTRL_IRQ_MASK \
+ (VE_DEC_MPEG_CTRL_FINISH_IRQ_EN | VE_DEC_MPEG_CTRL_ERROR_IRQ_EN | \
+ VE_DEC_MPEG_CTRL_VLD_DATA_REQ_IRQ_EN)
+
+#define VE_DEC_MPEG_TRIGGER (VE_ENGINE_DEC_MPEG + 0x18)
+
+#define VE_DEC_MPEG_TRIGGER_MB_BOUNDARY BIT(31)
+
+#define VE_DEC_MPEG_TRIGGER_CHROMA_FMT_420 (0x00 << 27)
+#define VE_DEC_MPEG_TRIGGER_CHROMA_FMT_411 (0x01 << 27)
+#define VE_DEC_MPEG_TRIGGER_CHROMA_FMT_422 (0x02 << 27)
+#define VE_DEC_MPEG_TRIGGER_CHROMA_FMT_444 (0x03 << 27)
+#define VE_DEC_MPEG_TRIGGER_CHROMA_FMT_422T (0x04 << 27)
+
+#define VE_DEC_MPEG_TRIGGER_MPEG1 (0x01 << 24)
+#define VE_DEC_MPEG_TRIGGER_MPEG2 (0x02 << 24)
+#define VE_DEC_MPEG_TRIGGER_JPEG (0x03 << 24)
+#define VE_DEC_MPEG_TRIGGER_MPEG4 (0x04 << 24)
+#define VE_DEC_MPEG_TRIGGER_VP62 (0x05 << 24)
+
+#define VE_DEC_MPEG_TRIGGER_VP62_AC_GET_BITS BIT(7)
+
+#define VE_DEC_MPEG_TRIGGER_STCD_VC1 (0x02 << 4)
+#define VE_DEC_MPEG_TRIGGER_STCD_MPEG2 (0x01 << 4)
+#define VE_DEC_MPEG_TRIGGER_STCD_AVC (0x00 << 4)
+
+#define VE_DEC_MPEG_TRIGGER_HW_MPEG_VLD (0x0f << 0)
+#define VE_DEC_MPEG_TRIGGER_HW_JPEG_VLD (0x0e << 0)
+#define VE_DEC_MPEG_TRIGGER_HW_MB (0x0d << 0)
+#define VE_DEC_MPEG_TRIGGER_HW_ROTATE (0x0c << 0)
+#define VE_DEC_MPEG_TRIGGER_HW_VP6_VLD (0x0b << 0)
+#define VE_DEC_MPEG_TRIGGER_HW_MAF (0x0a << 0)
+#define VE_DEC_MPEG_TRIGGER_HW_STCD_END (0x09 << 0)
+#define VE_DEC_MPEG_TRIGGER_HW_STCD_BEGIN (0x08 << 0)
+#define VE_DEC_MPEG_TRIGGER_SW_MC (0x07 << 0)
+#define VE_DEC_MPEG_TRIGGER_SW_IQ (0x06 << 0)
+#define VE_DEC_MPEG_TRIGGER_SW_IDCT (0x05 << 0)
+#define VE_DEC_MPEG_TRIGGER_SW_SCALE (0x04 << 0)
+#define VE_DEC_MPEG_TRIGGER_SW_VP6 (0x03 << 0)
+#define VE_DEC_MPEG_TRIGGER_SW_VP62_AC_GET_BITS (0x02 << 0)
+
+#define VE_DEC_MPEG_STATUS (VE_ENGINE_DEC_MPEG + 0x1c)
+
+#define VE_DEC_MPEG_STATUS_START_DETECT_BUSY BIT(27)
+#define VE_DEC_MPEG_STATUS_VP6_BIT BIT(26)
+#define VE_DEC_MPEG_STATUS_VP6_BIT_BUSY BIT(25)
+#define VE_DEC_MPEG_STATUS_MAF_BUSY BIT(23)
+#define VE_DEC_MPEG_STATUS_VP6_MVP_BUSY BIT(22)
+#define VE_DEC_MPEG_STATUS_JPEG_BIT_END BIT(21)
+#define VE_DEC_MPEG_STATUS_JPEG_RESTART_ERROR BIT(20)
+#define VE_DEC_MPEG_STATUS_JPEG_MARKER BIT(19)
+#define VE_DEC_MPEG_STATUS_ROTATE_BUSY BIT(18)
+#define VE_DEC_MPEG_STATUS_DEBLOCKING_BUSY BIT(17)
+#define VE_DEC_MPEG_STATUS_SCALE_DOWN_BUSY BIT(16)
+#define VE_DEC_MPEG_STATUS_IQIS_BUF_EMPTY BIT(15)
+#define VE_DEC_MPEG_STATUS_IDCT_BUF_EMPTY BIT(14)
+#define VE_DEC_MPEG_STATUS_VE_BUSY BIT(13)
+#define VE_DEC_MPEG_STATUS_MC_BUSY BIT(12)
+#define VE_DEC_MPEG_STATUS_IDCT_BUSY BIT(11)
+#define VE_DEC_MPEG_STATUS_IQIS_BUSY BIT(10)
+#define VE_DEC_MPEG_STATUS_DCAC_BUSY BIT(9)
+#define VE_DEC_MPEG_STATUS_VLD_BUSY BIT(8)
+#define VE_DEC_MPEG_STATUS_ROTATE_SUCCESS BIT(3)
+#define VE_DEC_MPEG_STATUS_VLD_DATA_REQ BIT(2)
+#define VE_DEC_MPEG_STATUS_ERROR BIT(1)
+#define VE_DEC_MPEG_STATUS_SUCCESS BIT(0)
+#define VE_DEC_MPEG_STATUS_CHECK_MASK \
+ (VE_DEC_MPEG_STATUS_SUCCESS | VE_DEC_MPEG_STATUS_ERROR | \
+ VE_DEC_MPEG_STATUS_VLD_DATA_REQ)
+#define VE_DEC_MPEG_STATUS_CHECK_ERROR \
+ (VE_DEC_MPEG_STATUS_ERROR | VE_DEC_MPEG_STATUS_VLD_DATA_REQ)
+
+#define VE_DEC_MPEG_VLD_ADDR (VE_ENGINE_DEC_MPEG + 0x28)
+
+#define VE_DEC_MPEG_VLD_ADDR_FIRST_PIC_DATA BIT(30)
+#define VE_DEC_MPEG_VLD_ADDR_LAST_PIC_DATA BIT(29)
+#define VE_DEC_MPEG_VLD_ADDR_VALID_PIC_DATA BIT(28)
+#define VE_DEC_MPEG_VLD_ADDR_BASE(a) \
+ ({ \
+ u32 _tmp = (a); \
+ u32 _lo = _tmp & GENMASK(27, 4); \
+ u32 _hi = (_tmp >> 28) & GENMASK(3, 0); \
+ (_lo | _hi); \
+ })
+
+#define VE_DEC_MPEG_VLD_OFFSET (VE_ENGINE_DEC_MPEG + 0x2c)
+#define VE_DEC_MPEG_VLD_LEN (VE_ENGINE_DEC_MPEG + 0x30)
+#define VE_DEC_MPEG_VLD_END_ADDR (VE_ENGINE_DEC_MPEG + 0x34)
+
+#define VE_DEC_MPEG_REC_LUMA (VE_ENGINE_DEC_MPEG + 0x48)
+#define VE_DEC_MPEG_REC_CHROMA (VE_ENGINE_DEC_MPEG + 0x4c)
+#define VE_DEC_MPEG_FWD_REF_LUMA_ADDR (VE_ENGINE_DEC_MPEG + 0x50)
+#define VE_DEC_MPEG_FWD_REF_CHROMA_ADDR (VE_ENGINE_DEC_MPEG + 0x54)
+#define VE_DEC_MPEG_BWD_REF_LUMA_ADDR (VE_ENGINE_DEC_MPEG + 0x58)
+#define VE_DEC_MPEG_BWD_REF_CHROMA_ADDR (VE_ENGINE_DEC_MPEG + 0x5c)
+
+#define VE_DEC_MPEG_IQMINPUT (VE_ENGINE_DEC_MPEG + 0x80)
+
+#define VE_DEC_MPEG_IQMINPUT_FLAG_INTRA (0x01 << 14)
+#define VE_DEC_MPEG_IQMINPUT_FLAG_NON_INTRA (0x00 << 14)
+#define VE_DEC_MPEG_IQMINPUT_WEIGHT(i, v) \
+ (((v) & GENMASK(7, 0)) | (((i) << 8) & GENMASK(13, 8)))
+
+#define VE_DEC_MPEG_ERROR (VE_ENGINE_DEC_MPEG + 0xc4)
+#define VE_DEC_MPEG_CRTMBADDR (VE_ENGINE_DEC_MPEG + 0xc8)
+#define VE_DEC_MPEG_ROT_LUMA (VE_ENGINE_DEC_MPEG + 0xcc)
+#define VE_DEC_MPEG_ROT_CHROMA (VE_ENGINE_DEC_MPEG + 0xd0)
+
+#endif
diff --git a/drivers/staging/media/sunxi/cedrus/cedrus_video.c b/drivers/staging/media/sunxi/cedrus/cedrus_video.c
new file mode 100644
index 000000000000..5c5fce678b93
--- /dev/null
+++ b/drivers/staging/media/sunxi/cedrus/cedrus_video.c
@@ -0,0 +1,542 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Cedrus VPU driver
+ *
+ * Copyright (C) 2016 Florent Revest <florent.revest@free-electrons.com>
+ * Copyright (C) 2018 Paul Kocialkowski <paul.kocialkowski@bootlin.com>
+ * Copyright (C) 2018 Bootlin
+ *
+ * Based on the vim2m driver, that is:
+ *
+ * Copyright (c) 2009-2010 Samsung Electronics Co., Ltd.
+ * Pawel Osciak, <pawel@osciak.com>
+ * Marek Szyprowski, <m.szyprowski@samsung.com>
+ */
+
+#include <media/videobuf2-dma-contig.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-mem2mem.h>
+
+#include "cedrus.h"
+#include "cedrus_video.h"
+#include "cedrus_dec.h"
+#include "cedrus_hw.h"
+
+#define CEDRUS_DECODE_SRC BIT(0)
+#define CEDRUS_DECODE_DST BIT(1)
+
+#define CEDRUS_MIN_WIDTH 16U
+#define CEDRUS_MIN_HEIGHT 16U
+#define CEDRUS_MAX_WIDTH 3840U
+#define CEDRUS_MAX_HEIGHT 2160U
+
+static struct cedrus_format cedrus_formats[] = {
+ {
+ .pixelformat = V4L2_PIX_FMT_MPEG2_SLICE,
+ .directions = CEDRUS_DECODE_SRC,
+ },
+ {
+ .pixelformat = V4L2_PIX_FMT_SUNXI_TILED_NV12,
+ .directions = CEDRUS_DECODE_DST,
+ },
+ {
+ .pixelformat = V4L2_PIX_FMT_NV12,
+ .directions = CEDRUS_DECODE_DST,
+ .capabilities = CEDRUS_CAPABILITY_UNTILED,
+ },
+};
+
+#define CEDRUS_FORMATS_COUNT ARRAY_SIZE(cedrus_formats)
+
+static inline struct cedrus_ctx *cedrus_file2ctx(struct file *file)
+{
+ return container_of(file->private_data, struct cedrus_ctx, fh);
+}
+
+static struct cedrus_format *cedrus_find_format(u32 pixelformat, u32 directions,
+ unsigned int capabilities)
+{
+ struct cedrus_format *fmt;
+ unsigned int i;
+
+ for (i = 0; i < CEDRUS_FORMATS_COUNT; i++) {
+ fmt = &cedrus_formats[i];
+
+ if (fmt->capabilities && (fmt->capabilities & capabilities) !=
+ fmt->capabilities)
+ continue;
+
+ if (fmt->pixelformat == pixelformat &&
+ (fmt->directions & directions) != 0)
+ break;
+ }
+
+ if (i == CEDRUS_FORMATS_COUNT)
+ return NULL;
+
+ return &cedrus_formats[i];
+}
+
+static bool cedrus_check_format(u32 pixelformat, u32 directions,
+ unsigned int capabilities)
+{
+ return cedrus_find_format(pixelformat, directions, capabilities);
+}
+
+static void cedrus_prepare_format(struct v4l2_pix_format *pix_fmt)
+{
+ unsigned int width = pix_fmt->width;
+ unsigned int height = pix_fmt->height;
+ unsigned int sizeimage = pix_fmt->sizeimage;
+ unsigned int bytesperline = pix_fmt->bytesperline;
+
+ pix_fmt->field = V4L2_FIELD_NONE;
+
+ /* Limit to hardware min/max. */
+ width = clamp(width, CEDRUS_MIN_WIDTH, CEDRUS_MAX_WIDTH);
+ height = clamp(height, CEDRUS_MIN_HEIGHT, CEDRUS_MAX_HEIGHT);
+
+ switch (pix_fmt->pixelformat) {
+ case V4L2_PIX_FMT_MPEG2_SLICE:
+ /* Zero bytes per line for encoded source. */
+ bytesperline = 0;
+
+ break;
+
+ case V4L2_PIX_FMT_SUNXI_TILED_NV12:
+ /* 32-aligned stride. */
+ bytesperline = ALIGN(width, 32);
+
+ /* 32-aligned height. */
+ height = ALIGN(height, 32);
+
+ /* Luma plane size. */
+ sizeimage = bytesperline * height;
+
+ /* Chroma plane size. */
+ sizeimage += bytesperline * height / 2;
+
+ break;
+
+ case V4L2_PIX_FMT_NV12:
+ /* 16-aligned stride. */
+ bytesperline = ALIGN(width, 16);
+
+ /* 16-aligned height. */
+ height = ALIGN(height, 16);
+
+ /* Luma plane size. */
+ sizeimage = bytesperline * height;
+
+ /* Chroma plane size. */
+ sizeimage += bytesperline * height / 2;
+
+ break;
+ }
+
+ pix_fmt->width = width;
+ pix_fmt->height = height;
+
+ pix_fmt->bytesperline = bytesperline;
+ pix_fmt->sizeimage = sizeimage;
+}
+
+static int cedrus_querycap(struct file *file, void *priv,
+ struct v4l2_capability *cap)
+{
+ strscpy(cap->driver, CEDRUS_NAME, sizeof(cap->driver));
+ strscpy(cap->card, CEDRUS_NAME, sizeof(cap->card));
+ snprintf(cap->bus_info, sizeof(cap->bus_info),
+ "platform:%s", CEDRUS_NAME);
+
+ return 0;
+}
+
+static int cedrus_enum_fmt(struct file *file, struct v4l2_fmtdesc *f,
+ u32 direction)
+{
+ struct cedrus_ctx *ctx = cedrus_file2ctx(file);
+ struct cedrus_dev *dev = ctx->dev;
+ unsigned int capabilities = dev->capabilities;
+ struct cedrus_format *fmt;
+ unsigned int i, index;
+
+ /* Index among formats that match the requested direction. */
+ index = 0;
+
+ for (i = 0; i < CEDRUS_FORMATS_COUNT; i++) {
+ fmt = &cedrus_formats[i];
+
+ if (fmt->capabilities && (fmt->capabilities & capabilities) !=
+ fmt->capabilities)
+ continue;
+
+ if (!(cedrus_formats[i].directions & direction))
+ continue;
+
+ if (index == f->index)
+ break;
+
+ index++;
+ }
+
+ /* Matched format. */
+ if (i < CEDRUS_FORMATS_COUNT) {
+ f->pixelformat = cedrus_formats[i].pixelformat;
+
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static int cedrus_enum_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_fmtdesc *f)
+{
+ return cedrus_enum_fmt(file, f, CEDRUS_DECODE_DST);
+}
+
+static int cedrus_enum_fmt_vid_out(struct file *file, void *priv,
+ struct v4l2_fmtdesc *f)
+{
+ return cedrus_enum_fmt(file, f, CEDRUS_DECODE_SRC);
+}
+
+static int cedrus_g_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct cedrus_ctx *ctx = cedrus_file2ctx(file);
+
+ /* Fall back to dummy default by lack of hardware configuration. */
+ if (!ctx->dst_fmt.width || !ctx->dst_fmt.height) {
+ f->fmt.pix.pixelformat = V4L2_PIX_FMT_SUNXI_TILED_NV12;
+ cedrus_prepare_format(&f->fmt.pix);
+
+ return 0;
+ }
+
+ f->fmt.pix = ctx->dst_fmt;
+
+ return 0;
+}
+
+static int cedrus_g_fmt_vid_out(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct cedrus_ctx *ctx = cedrus_file2ctx(file);
+
+ /* Fall back to dummy default by lack of hardware configuration. */
+ if (!ctx->dst_fmt.width || !ctx->dst_fmt.height) {
+ f->fmt.pix.pixelformat = V4L2_PIX_FMT_MPEG2_SLICE;
+ f->fmt.pix.sizeimage = SZ_1K;
+ cedrus_prepare_format(&f->fmt.pix);
+
+ return 0;
+ }
+
+ f->fmt.pix = ctx->src_fmt;
+
+ return 0;
+}
+
+static int cedrus_try_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct cedrus_ctx *ctx = cedrus_file2ctx(file);
+ struct cedrus_dev *dev = ctx->dev;
+ struct v4l2_pix_format *pix_fmt = &f->fmt.pix;
+
+ if (!cedrus_check_format(pix_fmt->pixelformat, CEDRUS_DECODE_DST,
+ dev->capabilities))
+ return -EINVAL;
+
+ cedrus_prepare_format(pix_fmt);
+
+ return 0;
+}
+
+static int cedrus_try_fmt_vid_out(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct cedrus_ctx *ctx = cedrus_file2ctx(file);
+ struct cedrus_dev *dev = ctx->dev;
+ struct v4l2_pix_format *pix_fmt = &f->fmt.pix;
+
+ if (!cedrus_check_format(pix_fmt->pixelformat, CEDRUS_DECODE_SRC,
+ dev->capabilities))
+ return -EINVAL;
+
+ /* Source image size has to be provided by userspace. */
+ if (pix_fmt->sizeimage == 0)
+ return -EINVAL;
+
+ cedrus_prepare_format(pix_fmt);
+
+ return 0;
+}
+
+static int cedrus_s_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct cedrus_ctx *ctx = cedrus_file2ctx(file);
+ struct cedrus_dev *dev = ctx->dev;
+ int ret;
+
+ ret = cedrus_try_fmt_vid_cap(file, priv, f);
+ if (ret)
+ return ret;
+
+ ctx->dst_fmt = f->fmt.pix;
+
+ cedrus_dst_format_set(dev, &ctx->dst_fmt);
+
+ return 0;
+}
+
+static int cedrus_s_fmt_vid_out(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct cedrus_ctx *ctx = cedrus_file2ctx(file);
+ int ret;
+
+ ret = cedrus_try_fmt_vid_out(file, priv, f);
+ if (ret)
+ return ret;
+
+ ctx->src_fmt = f->fmt.pix;
+
+ /* Propagate colorspace information to capture. */
+ ctx->dst_fmt.colorspace = f->fmt.pix.colorspace;
+ ctx->dst_fmt.xfer_func = f->fmt.pix.xfer_func;
+ ctx->dst_fmt.ycbcr_enc = f->fmt.pix.ycbcr_enc;
+ ctx->dst_fmt.quantization = f->fmt.pix.quantization;
+
+ return 0;
+}
+
+const struct v4l2_ioctl_ops cedrus_ioctl_ops = {
+ .vidioc_querycap = cedrus_querycap,
+
+ .vidioc_enum_fmt_vid_cap = cedrus_enum_fmt_vid_cap,
+ .vidioc_g_fmt_vid_cap = cedrus_g_fmt_vid_cap,
+ .vidioc_try_fmt_vid_cap = cedrus_try_fmt_vid_cap,
+ .vidioc_s_fmt_vid_cap = cedrus_s_fmt_vid_cap,
+
+ .vidioc_enum_fmt_vid_out = cedrus_enum_fmt_vid_out,
+ .vidioc_g_fmt_vid_out = cedrus_g_fmt_vid_out,
+ .vidioc_try_fmt_vid_out = cedrus_try_fmt_vid_out,
+ .vidioc_s_fmt_vid_out = cedrus_s_fmt_vid_out,
+
+ .vidioc_reqbufs = v4l2_m2m_ioctl_reqbufs,
+ .vidioc_querybuf = v4l2_m2m_ioctl_querybuf,
+ .vidioc_qbuf = v4l2_m2m_ioctl_qbuf,
+ .vidioc_dqbuf = v4l2_m2m_ioctl_dqbuf,
+ .vidioc_prepare_buf = v4l2_m2m_ioctl_prepare_buf,
+ .vidioc_create_bufs = v4l2_m2m_ioctl_create_bufs,
+ .vidioc_expbuf = v4l2_m2m_ioctl_expbuf,
+
+ .vidioc_streamon = v4l2_m2m_ioctl_streamon,
+ .vidioc_streamoff = v4l2_m2m_ioctl_streamoff,
+
+ .vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
+ .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
+};
+
+static int cedrus_queue_setup(struct vb2_queue *vq, unsigned int *nbufs,
+ unsigned int *nplanes, unsigned int sizes[],
+ struct device *alloc_devs[])
+{
+ struct cedrus_ctx *ctx = vb2_get_drv_priv(vq);
+ struct cedrus_dev *dev = ctx->dev;
+ struct v4l2_pix_format *pix_fmt;
+ u32 directions;
+
+ if (V4L2_TYPE_IS_OUTPUT(vq->type)) {
+ directions = CEDRUS_DECODE_SRC;
+ pix_fmt = &ctx->src_fmt;
+ } else {
+ directions = CEDRUS_DECODE_DST;
+ pix_fmt = &ctx->dst_fmt;
+ }
+
+ if (!cedrus_check_format(pix_fmt->pixelformat, directions,
+ dev->capabilities))
+ return -EINVAL;
+
+ if (*nplanes) {
+ if (sizes[0] < pix_fmt->sizeimage)
+ return -EINVAL;
+ } else {
+ sizes[0] = pix_fmt->sizeimage;
+ *nplanes = 1;
+ }
+
+ return 0;
+}
+
+static void cedrus_queue_cleanup(struct vb2_queue *vq, u32 state)
+{
+ struct cedrus_ctx *ctx = vb2_get_drv_priv(vq);
+ struct vb2_v4l2_buffer *vbuf;
+ unsigned long flags;
+
+ for (;;) {
+ spin_lock_irqsave(&ctx->dev->irq_lock, flags);
+
+ if (V4L2_TYPE_IS_OUTPUT(vq->type))
+ vbuf = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
+ else
+ vbuf = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);
+
+ spin_unlock_irqrestore(&ctx->dev->irq_lock, flags);
+
+ if (!vbuf)
+ return;
+
+ v4l2_ctrl_request_complete(vbuf->vb2_buf.req_obj.req,
+ &ctx->hdl);
+ v4l2_m2m_buf_done(vbuf, state);
+ }
+}
+
+static int cedrus_buf_init(struct vb2_buffer *vb)
+{
+ struct vb2_queue *vq = vb->vb2_queue;
+ struct cedrus_ctx *ctx = vb2_get_drv_priv(vq);
+
+ if (!V4L2_TYPE_IS_OUTPUT(vq->type))
+ ctx->dst_bufs[vb->index] = vb;
+
+ return 0;
+}
+
+static void cedrus_buf_cleanup(struct vb2_buffer *vb)
+{
+ struct vb2_queue *vq = vb->vb2_queue;
+ struct cedrus_ctx *ctx = vb2_get_drv_priv(vq);
+
+ if (!V4L2_TYPE_IS_OUTPUT(vq->type))
+ ctx->dst_bufs[vb->index] = NULL;
+}
+
+static int cedrus_buf_prepare(struct vb2_buffer *vb)
+{
+ struct vb2_queue *vq = vb->vb2_queue;
+ struct cedrus_ctx *ctx = vb2_get_drv_priv(vq);
+ struct v4l2_pix_format *pix_fmt;
+
+ if (V4L2_TYPE_IS_OUTPUT(vq->type))
+ pix_fmt = &ctx->src_fmt;
+ else
+ pix_fmt = &ctx->dst_fmt;
+
+ if (vb2_plane_size(vb, 0) < pix_fmt->sizeimage)
+ return -EINVAL;
+
+ vb2_set_plane_payload(vb, 0, pix_fmt->sizeimage);
+
+ return 0;
+}
+
+static int cedrus_start_streaming(struct vb2_queue *vq, unsigned int count)
+{
+ struct cedrus_ctx *ctx = vb2_get_drv_priv(vq);
+ struct cedrus_dev *dev = ctx->dev;
+ int ret = 0;
+
+ switch (ctx->src_fmt.pixelformat) {
+ case V4L2_PIX_FMT_MPEG2_SLICE:
+ ctx->current_codec = CEDRUS_CODEC_MPEG2;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ if (V4L2_TYPE_IS_OUTPUT(vq->type) &&
+ dev->dec_ops[ctx->current_codec]->start)
+ ret = dev->dec_ops[ctx->current_codec]->start(ctx);
+
+ if (ret)
+ cedrus_queue_cleanup(vq, VB2_BUF_STATE_QUEUED);
+
+ return ret;
+}
+
+static void cedrus_stop_streaming(struct vb2_queue *vq)
+{
+ struct cedrus_ctx *ctx = vb2_get_drv_priv(vq);
+ struct cedrus_dev *dev = ctx->dev;
+
+ if (V4L2_TYPE_IS_OUTPUT(vq->type) &&
+ dev->dec_ops[ctx->current_codec]->stop)
+ dev->dec_ops[ctx->current_codec]->stop(ctx);
+
+ cedrus_queue_cleanup(vq, VB2_BUF_STATE_ERROR);
+}
+
+static void cedrus_buf_queue(struct vb2_buffer *vb)
+{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct cedrus_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
+
+ v4l2_m2m_buf_queue(ctx->fh.m2m_ctx, vbuf);
+}
+
+static void cedrus_buf_request_complete(struct vb2_buffer *vb)
+{
+ struct cedrus_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
+
+ v4l2_ctrl_request_complete(vb->req_obj.req, &ctx->hdl);
+}
+
+static struct vb2_ops cedrus_qops = {
+ .queue_setup = cedrus_queue_setup,
+ .buf_prepare = cedrus_buf_prepare,
+ .buf_init = cedrus_buf_init,
+ .buf_cleanup = cedrus_buf_cleanup,
+ .buf_queue = cedrus_buf_queue,
+ .buf_request_complete = cedrus_buf_request_complete,
+ .start_streaming = cedrus_start_streaming,
+ .stop_streaming = cedrus_stop_streaming,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
+};
+
+int cedrus_queue_init(void *priv, struct vb2_queue *src_vq,
+ struct vb2_queue *dst_vq)
+{
+ struct cedrus_ctx *ctx = priv;
+ int ret;
+
+ src_vq->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
+ src_vq->io_modes = VB2_MMAP | VB2_DMABUF;
+ src_vq->drv_priv = ctx;
+ src_vq->buf_struct_size = sizeof(struct cedrus_buffer);
+ src_vq->min_buffers_needed = 1;
+ src_vq->ops = &cedrus_qops;
+ src_vq->mem_ops = &vb2_dma_contig_memops;
+ src_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+ src_vq->lock = &ctx->dev->dev_mutex;
+ src_vq->dev = ctx->dev->dev;
+ src_vq->supports_requests = true;
+
+ ret = vb2_queue_init(src_vq);
+ if (ret)
+ return ret;
+
+ dst_vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ dst_vq->io_modes = VB2_MMAP | VB2_DMABUF;
+ dst_vq->drv_priv = ctx;
+ dst_vq->buf_struct_size = sizeof(struct cedrus_buffer);
+ dst_vq->min_buffers_needed = 1;
+ dst_vq->ops = &cedrus_qops;
+ dst_vq->mem_ops = &vb2_dma_contig_memops;
+ dst_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+ dst_vq->lock = &ctx->dev->dev_mutex;
+ dst_vq->dev = ctx->dev->dev;
+
+ return vb2_queue_init(dst_vq);
+}
diff --git a/drivers/staging/media/sunxi/cedrus/cedrus_video.h b/drivers/staging/media/sunxi/cedrus/cedrus_video.h
new file mode 100644
index 000000000000..0e4f7a8cccf2
--- /dev/null
+++ b/drivers/staging/media/sunxi/cedrus/cedrus_video.h
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Cedrus VPU driver
+ *
+ * Copyright (C) 2016 Florent Revest <florent.revest@free-electrons.com>
+ * Copyright (C) 2018 Paul Kocialkowski <paul.kocialkowski@bootlin.com>
+ * Copyright (C) 2018 Bootlin
+ *
+ * Based on the vim2m driver, that is:
+ *
+ * Copyright (c) 2009-2010 Samsung Electronics Co., Ltd.
+ * Pawel Osciak, <pawel@osciak.com>
+ * Marek Szyprowski, <m.szyprowski@samsung.com>
+ */
+
+#ifndef _CEDRUS_VIDEO_H_
+#define _CEDRUS_VIDEO_H_
+
+struct cedrus_format {
+ u32 pixelformat;
+ u32 directions;
+ unsigned int capabilities;
+};
+
+extern const struct v4l2_ioctl_ops cedrus_ioctl_ops;
+
+int cedrus_queue_init(void *priv, struct vb2_queue *src_vq,
+ struct vb2_queue *dst_vq);
+
+#endif
diff --git a/drivers/usb/gadget/function/uvc_queue.c b/drivers/usb/gadget/function/uvc_queue.c
index 9e33d5206d54..f2497cb96abb 100644
--- a/drivers/usb/gadget/function/uvc_queue.c
+++ b/drivers/usb/gadget/function/uvc_queue.c
@@ -166,7 +166,7 @@ int uvcg_queue_buffer(struct uvc_video_queue *queue, struct v4l2_buffer *buf)
unsigned long flags;
int ret;
- ret = vb2_qbuf(&queue->queue, buf);
+ ret = vb2_qbuf(&queue->queue, NULL, buf);
if (ret < 0)
return ret;