2 * TI VPE mem2mem driver, based on the virtual v4l2-mem2mem example driver
4 * Copyright (c) 2013 Texas Instruments Inc.
5 * David Griego, <dagriego@biglakesoftware.com>
6 * Dale Farnsworth, <dale@farnsworth.org>
7 * Archit Taneja, <archit@ti.com>
9 * Copyright (c) 2009-2010 Samsung Electronics Co., Ltd.
10 * Pawel Osciak, <pawel@osciak.com>
11 * Marek Szyprowski, <m.szyprowski@samsung.com>
13 * Based on the virtual v4l2-mem2mem example device
15 * This program is free software; you can redistribute it and/or modify it
16 * under the terms of the GNU General Public License version 2 as published by
17 * the Free Software Foundation
20 #include <linux/delay.h>
21 #include <linux/dma-mapping.h>
22 #include <linux/err.h>
24 #include <linux/interrupt.h>
26 #include <linux/ioctl.h>
27 #include <linux/module.h>
28 #include <linux/platform_device.h>
29 #include <linux/pm_runtime.h>
30 #include <linux/sched.h>
31 #include <linux/slab.h>
32 #include <linux/videodev2.h>
33 #include <linux/log2.h>
35 #include <media/v4l2-common.h>
36 #include <media/v4l2-ctrls.h>
37 #include <media/v4l2-device.h>
38 #include <media/v4l2-event.h>
39 #include <media/v4l2-ioctl.h>
40 #include <media/v4l2-mem2mem.h>
41 #include <media/videobuf2-core.h>
42 #include <media/videobuf2-dma-contig.h>
49 #define VPE_MODULE_NAME "vpe"
51 /* minimum and maximum frame sizes */
57 /* required alignments */
58 #define S_ALIGN 0 /* multiple of 1 */
59 #define H_ALIGN 1 /* multiple of 2 */
61 /* flags that indicate a format can be used for capture/output */
62 #define VPE_FMT_TYPE_CAPTURE (1 << 0)
63 #define VPE_FMT_TYPE_OUTPUT (1 << 1)
65 /* used as plane indices */
66 #define VPE_MAX_PLANES 2
70 /* per m2m context info */
71 #define VPE_MAX_SRC_BUFS 3 /* need 3 src fields to de-interlace */
73 #define VPE_DEF_BUFS_PER_JOB 1 /* default one buffer per batch job */
76 * each VPE context can need up to 3 config desciptors, 7 input descriptors,
77 * 3 output descriptors, and 10 control descriptors
79 #define VPE_DESC_LIST_SIZE (10 * VPDMA_DTD_DESC_SIZE + \
80 13 * VPDMA_CFD_CTD_DESC_SIZE)
82 #define vpe_dbg(vpedev, fmt, arg...) \
83 dev_dbg((vpedev)->v4l2_dev.dev, fmt, ##arg)
84 #define vpe_err(vpedev, fmt, arg...) \
85 dev_err((vpedev)->v4l2_dev.dev, fmt, ##arg)
87 struct vpe_us_coeffs {
88 unsigned short anchor_fid0_c0;
89 unsigned short anchor_fid0_c1;
90 unsigned short anchor_fid0_c2;
91 unsigned short anchor_fid0_c3;
92 unsigned short interp_fid0_c0;
93 unsigned short interp_fid0_c1;
94 unsigned short interp_fid0_c2;
95 unsigned short interp_fid0_c3;
96 unsigned short anchor_fid1_c0;
97 unsigned short anchor_fid1_c1;
98 unsigned short anchor_fid1_c2;
99 unsigned short anchor_fid1_c3;
100 unsigned short interp_fid1_c0;
101 unsigned short interp_fid1_c1;
102 unsigned short interp_fid1_c2;
103 unsigned short interp_fid1_c3;
107 * Default upsampler coefficients
109 static const struct vpe_us_coeffs us_coeffs[] = {
111 /* Coefficients for progressive input */
112 0x00C8, 0x0348, 0x0018, 0x3FD8, 0x3FB8, 0x0378, 0x00E8, 0x3FE8,
113 0x00C8, 0x0348, 0x0018, 0x3FD8, 0x3FB8, 0x0378, 0x00E8, 0x3FE8,
116 /* Coefficients for Top Field Interlaced input */
117 0x0051, 0x03D5, 0x3FE3, 0x3FF7, 0x3FB5, 0x02E9, 0x018F, 0x3FD3,
118 /* Coefficients for Bottom Field Interlaced input */
119 0x016B, 0x0247, 0x00B1, 0x3F9D, 0x3FCF, 0x03DB, 0x005D, 0x3FF9,
124 * the following registers are for configuring some of the parameters of the
125 * motion and edge detection blocks inside DEI, these generally remain the same,
126 * these could be passed later via userspace if some one needs to tweak these.
128 struct vpe_dei_regs {
129 unsigned long mdt_spacial_freq_thr_reg; /* VPE_DEI_REG2 */
130 unsigned long edi_config_reg; /* VPE_DEI_REG3 */
131 unsigned long edi_lut_reg0; /* VPE_DEI_REG4 */
132 unsigned long edi_lut_reg1; /* VPE_DEI_REG5 */
133 unsigned long edi_lut_reg2; /* VPE_DEI_REG6 */
134 unsigned long edi_lut_reg3; /* VPE_DEI_REG7 */
138 * default expert DEI register values, unlikely to be modified.
140 static const struct vpe_dei_regs dei_regs = {
150 * The port_data structure contains per-port data.
152 struct vpe_port_data {
153 enum vpdma_channel channel; /* VPDMA channel */
154 u8 vb_index; /* input frame f, f-1, f-2 index */
155 u8 vb_part; /* plane index for co-panar formats */
159 * Define indices into the port_data tables
161 #define VPE_PORT_LUMA1_IN 0
162 #define VPE_PORT_CHROMA1_IN 1
163 #define VPE_PORT_LUMA2_IN 2
164 #define VPE_PORT_CHROMA2_IN 3
165 #define VPE_PORT_LUMA3_IN 4
166 #define VPE_PORT_CHROMA3_IN 5
167 #define VPE_PORT_MV_IN 6
168 #define VPE_PORT_MV_OUT 7
169 #define VPE_PORT_LUMA_OUT 8
170 #define VPE_PORT_CHROMA_OUT 9
171 #define VPE_PORT_RGB_OUT 10
173 static const struct vpe_port_data port_data[11] = {
174 [VPE_PORT_LUMA1_IN] = {
175 .channel = VPE_CHAN_LUMA1_IN,
179 [VPE_PORT_CHROMA1_IN] = {
180 .channel = VPE_CHAN_CHROMA1_IN,
182 .vb_part = VPE_CHROMA,
184 [VPE_PORT_LUMA2_IN] = {
185 .channel = VPE_CHAN_LUMA2_IN,
189 [VPE_PORT_CHROMA2_IN] = {
190 .channel = VPE_CHAN_CHROMA2_IN,
192 .vb_part = VPE_CHROMA,
194 [VPE_PORT_LUMA3_IN] = {
195 .channel = VPE_CHAN_LUMA3_IN,
199 [VPE_PORT_CHROMA3_IN] = {
200 .channel = VPE_CHAN_CHROMA3_IN,
202 .vb_part = VPE_CHROMA,
205 .channel = VPE_CHAN_MV_IN,
207 [VPE_PORT_MV_OUT] = {
208 .channel = VPE_CHAN_MV_OUT,
210 [VPE_PORT_LUMA_OUT] = {
211 .channel = VPE_CHAN_LUMA_OUT,
214 [VPE_PORT_CHROMA_OUT] = {
215 .channel = VPE_CHAN_CHROMA_OUT,
216 .vb_part = VPE_CHROMA,
218 [VPE_PORT_RGB_OUT] = {
219 .channel = VPE_CHAN_RGB_OUT,
225 /* driver info for each of the supported video formats */
227 char *name; /* human-readable name */
228 u32 fourcc; /* standard format identifier */
229 u8 types; /* CAPTURE and/or OUTPUT */
230 u8 coplanar; /* set for unpacked Luma and Chroma */
231 /* vpdma format info for each plane */
232 struct vpdma_data_format const *vpdma_fmt[VPE_MAX_PLANES];
235 static struct vpe_fmt vpe_formats[] = {
237 .name = "YUV 422 co-planar",
238 .fourcc = V4L2_PIX_FMT_NV16,
239 .types = VPE_FMT_TYPE_CAPTURE | VPE_FMT_TYPE_OUTPUT,
241 .vpdma_fmt = { &vpdma_yuv_fmts[VPDMA_DATA_FMT_Y444],
242 &vpdma_yuv_fmts[VPDMA_DATA_FMT_C444],
246 .name = "YUV 420 co-planar",
247 .fourcc = V4L2_PIX_FMT_NV12,
248 .types = VPE_FMT_TYPE_CAPTURE | VPE_FMT_TYPE_OUTPUT,
250 .vpdma_fmt = { &vpdma_yuv_fmts[VPDMA_DATA_FMT_Y420],
251 &vpdma_yuv_fmts[VPDMA_DATA_FMT_C420],
255 .name = "YUYV 422 packed",
256 .fourcc = V4L2_PIX_FMT_YUYV,
257 .types = VPE_FMT_TYPE_CAPTURE | VPE_FMT_TYPE_OUTPUT,
259 .vpdma_fmt = { &vpdma_yuv_fmts[VPDMA_DATA_FMT_YC422],
263 .name = "UYVY 422 packed",
264 .fourcc = V4L2_PIX_FMT_UYVY,
265 .types = VPE_FMT_TYPE_CAPTURE | VPE_FMT_TYPE_OUTPUT,
267 .vpdma_fmt = { &vpdma_yuv_fmts[VPDMA_DATA_FMT_CY422],
271 .name = "RGB888 packed",
272 .fourcc = V4L2_PIX_FMT_RGB24,
273 .types = VPE_FMT_TYPE_CAPTURE,
275 .vpdma_fmt = { &vpdma_rgb_fmts[VPDMA_DATA_FMT_RGB24],
280 .fourcc = V4L2_PIX_FMT_RGB32,
281 .types = VPE_FMT_TYPE_CAPTURE,
283 .vpdma_fmt = { &vpdma_rgb_fmts[VPDMA_DATA_FMT_ARGB32],
287 .name = "BGR888 packed",
288 .fourcc = V4L2_PIX_FMT_BGR24,
289 .types = VPE_FMT_TYPE_CAPTURE,
291 .vpdma_fmt = { &vpdma_rgb_fmts[VPDMA_DATA_FMT_BGR24],
296 .fourcc = V4L2_PIX_FMT_BGR32,
297 .types = VPE_FMT_TYPE_CAPTURE,
299 .vpdma_fmt = { &vpdma_rgb_fmts[VPDMA_DATA_FMT_ABGR32],
305 * per-queue, driver-specific private data.
306 * there is one source queue and one destination queue for each m2m context.
309 unsigned int width; /* frame width */
310 unsigned int height; /* frame height */
311 unsigned int bytesperline[VPE_MAX_PLANES]; /* bytes per line in memory */
312 enum v4l2_colorspace colorspace;
313 enum v4l2_field field; /* supported field value */
315 unsigned int sizeimage[VPE_MAX_PLANES]; /* image size in memory */
316 struct v4l2_rect c_rect; /* crop/compose rectangle */
317 struct vpe_fmt *fmt; /* format info */
320 /* vpe_q_data flag bits */
321 #define Q_DATA_FRAME_1D (1 << 0)
322 #define Q_DATA_MODE_TILED (1 << 1)
323 #define Q_DATA_INTERLACED (1 << 2)
330 /* find our format description corresponding to the passed v4l2_format */
331 static struct vpe_fmt *find_format(struct v4l2_format *f)
336 for (k = 0; k < ARRAY_SIZE(vpe_formats); k++) {
337 fmt = &vpe_formats[k];
338 if (fmt->fourcc == f->fmt.pix.pixelformat)
346 * there is one vpe_dev structure in the driver, it is shared by
350 struct v4l2_device v4l2_dev;
351 struct video_device vfd;
352 struct v4l2_m2m_dev *m2m_dev;
354 atomic_t num_instances; /* count of driver instances */
355 dma_addr_t loaded_mmrs; /* shadow mmrs in device */
356 struct mutex dev_mutex;
361 struct resource *res;
363 struct vb2_alloc_ctx *alloc_ctx;
364 struct vpdma_data *vpdma; /* vpdma data handle */
365 struct sc_data *sc; /* scaler data handle */
366 struct csc_data *csc; /* csc data handle */
370 * There is one vpe_ctx structure for each m2m context.
375 struct v4l2_m2m_ctx *m2m_ctx;
376 struct v4l2_ctrl_handler hdl;
378 unsigned int field; /* current field */
379 unsigned int sequence; /* current frame/field seq */
380 unsigned int aborting; /* abort after next irq */
382 unsigned int bufs_per_job; /* input buffers per batch */
383 unsigned int bufs_completed; /* bufs done in this batch */
385 struct vpe_q_data q_data[2]; /* src & dst queue data */
386 struct vb2_buffer *src_vbs[VPE_MAX_SRC_BUFS];
387 struct vb2_buffer *dst_vb;
389 dma_addr_t mv_buf_dma[2]; /* dma addrs of motion vector in/out bufs */
390 void *mv_buf[2]; /* virtual addrs of motion vector bufs */
391 size_t mv_buf_size; /* current motion vector buffer size */
392 struct vpdma_buf mmr_adb; /* shadow reg addr/data block */
393 struct vpdma_buf sc_coeff_h; /* h coeff buffer */
394 struct vpdma_buf sc_coeff_v; /* v coeff buffer */
395 struct vpdma_desc_list desc_list; /* DMA descriptor list */
397 bool deinterlacing; /* using de-interlacer */
398 bool load_mmrs; /* have new shadow reg values */
400 unsigned int src_mv_buf_selector;
405 * M2M devices get 2 queues.
406 * Return the queue given the type.
408 static struct vpe_q_data *get_q_data(struct vpe_ctx *ctx,
409 enum v4l2_buf_type type)
412 case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
413 return &ctx->q_data[Q_DATA_SRC];
414 case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
415 return &ctx->q_data[Q_DATA_DST];
422 static u32 read_reg(struct vpe_dev *dev, int offset)
424 return ioread32(dev->base + offset);
427 static void write_reg(struct vpe_dev *dev, int offset, u32 value)
429 iowrite32(value, dev->base + offset);
432 /* register field read/write helpers */
433 static int get_field(u32 value, u32 mask, int shift)
435 return (value & (mask << shift)) >> shift;
438 static int read_field_reg(struct vpe_dev *dev, int offset, u32 mask, int shift)
440 return get_field(read_reg(dev, offset), mask, shift);
443 static void write_field(u32 *valp, u32 field, u32 mask, int shift)
447 val &= ~(mask << shift);
448 val |= (field & mask) << shift;
452 static void write_field_reg(struct vpe_dev *dev, int offset, u32 field,
455 u32 val = read_reg(dev, offset);
457 write_field(&val, field, mask, shift);
459 write_reg(dev, offset, val);
463 * DMA address/data block for the shadow registers
466 struct vpdma_adb_hdr out_fmt_hdr;
469 struct vpdma_adb_hdr us1_hdr;
471 struct vpdma_adb_hdr us2_hdr;
473 struct vpdma_adb_hdr us3_hdr;
475 struct vpdma_adb_hdr dei_hdr;
477 struct vpdma_adb_hdr sc_hdr0;
480 struct vpdma_adb_hdr sc_hdr8;
483 struct vpdma_adb_hdr sc_hdr17;
486 struct vpdma_adb_hdr csc_hdr;
491 #define GET_OFFSET_TOP(ctx, obj, reg) \
492 ((obj)->res->start - ctx->dev->res->start + reg)
494 #define VPE_SET_MMR_ADB_HDR(ctx, hdr, regs, offset_a) \
495 VPDMA_SET_MMR_ADB_HDR(ctx->mmr_adb, vpe_mmr_adb, hdr, regs, offset_a)
497 * Set the headers for all of the address/data block structures.
499 static void init_adb_hdrs(struct vpe_ctx *ctx)
501 VPE_SET_MMR_ADB_HDR(ctx, out_fmt_hdr, out_fmt_reg, VPE_CLK_FORMAT_SELECT);
502 VPE_SET_MMR_ADB_HDR(ctx, us1_hdr, us1_regs, VPE_US1_R0);
503 VPE_SET_MMR_ADB_HDR(ctx, us2_hdr, us2_regs, VPE_US2_R0);
504 VPE_SET_MMR_ADB_HDR(ctx, us3_hdr, us3_regs, VPE_US3_R0);
505 VPE_SET_MMR_ADB_HDR(ctx, dei_hdr, dei_regs, VPE_DEI_FRAME_SIZE);
506 VPE_SET_MMR_ADB_HDR(ctx, sc_hdr0, sc_regs0,
507 GET_OFFSET_TOP(ctx, ctx->dev->sc, CFG_SC0));
508 VPE_SET_MMR_ADB_HDR(ctx, sc_hdr8, sc_regs8,
509 GET_OFFSET_TOP(ctx, ctx->dev->sc, CFG_SC8));
510 VPE_SET_MMR_ADB_HDR(ctx, sc_hdr17, sc_regs17,
511 GET_OFFSET_TOP(ctx, ctx->dev->sc, CFG_SC17));
512 VPE_SET_MMR_ADB_HDR(ctx, csc_hdr, csc_regs,
513 GET_OFFSET_TOP(ctx, ctx->dev->csc, CSC_CSC00));
517 * Allocate or re-allocate the motion vector DMA buffers
518 * There are two buffers, one for input and one for output.
519 * However, the roles are reversed after each field is processed.
520 * In other words, after each field is processed, the previous
521 * output (dst) MV buffer becomes the new input (src) MV buffer.
523 static int realloc_mv_buffers(struct vpe_ctx *ctx, size_t size)
525 struct device *dev = ctx->dev->v4l2_dev.dev;
527 if (ctx->mv_buf_size == size)
531 dma_free_coherent(dev, ctx->mv_buf_size, ctx->mv_buf[0],
535 dma_free_coherent(dev, ctx->mv_buf_size, ctx->mv_buf[1],
541 ctx->mv_buf[0] = dma_alloc_coherent(dev, size, &ctx->mv_buf_dma[0],
543 if (!ctx->mv_buf[0]) {
544 vpe_err(ctx->dev, "failed to allocate motion vector buffer\n");
548 ctx->mv_buf[1] = dma_alloc_coherent(dev, size, &ctx->mv_buf_dma[1],
550 if (!ctx->mv_buf[1]) {
551 vpe_err(ctx->dev, "failed to allocate motion vector buffer\n");
552 dma_free_coherent(dev, size, ctx->mv_buf[0],
558 ctx->mv_buf_size = size;
559 ctx->src_mv_buf_selector = 0;
564 static void free_mv_buffers(struct vpe_ctx *ctx)
566 realloc_mv_buffers(ctx, 0);
570 * While de-interlacing, we keep the two most recent input buffers
571 * around. This function frees those two buffers when we have
572 * finished processing the current stream.
574 static void free_vbs(struct vpe_ctx *ctx)
576 struct vpe_dev *dev = ctx->dev;
579 if (ctx->src_vbs[2] == NULL)
582 spin_lock_irqsave(&dev->lock, flags);
583 if (ctx->src_vbs[2]) {
584 v4l2_m2m_buf_done(ctx->src_vbs[2], VB2_BUF_STATE_DONE);
585 v4l2_m2m_buf_done(ctx->src_vbs[1], VB2_BUF_STATE_DONE);
587 spin_unlock_irqrestore(&dev->lock, flags);
591 * Enable or disable the VPE clocks
593 static void vpe_set_clock_enable(struct vpe_dev *dev, bool on)
598 val = VPE_DATA_PATH_CLK_ENABLE | VPE_VPEDMA_CLK_ENABLE;
599 write_reg(dev, VPE_CLK_ENABLE, val);
602 static void vpe_top_reset(struct vpe_dev *dev)
605 write_field_reg(dev, VPE_CLK_RESET, 1, VPE_DATA_PATH_CLK_RESET_MASK,
606 VPE_DATA_PATH_CLK_RESET_SHIFT);
608 usleep_range(100, 150);
610 write_field_reg(dev, VPE_CLK_RESET, 0, VPE_DATA_PATH_CLK_RESET_MASK,
611 VPE_DATA_PATH_CLK_RESET_SHIFT);
614 static void vpe_top_vpdma_reset(struct vpe_dev *dev)
616 write_field_reg(dev, VPE_CLK_RESET, 1, VPE_VPDMA_CLK_RESET_MASK,
617 VPE_VPDMA_CLK_RESET_SHIFT);
619 usleep_range(100, 150);
621 write_field_reg(dev, VPE_CLK_RESET, 0, VPE_VPDMA_CLK_RESET_MASK,
622 VPE_VPDMA_CLK_RESET_SHIFT);
626 * Load the correct of upsampler coefficients into the shadow MMRs
628 static void set_us_coefficients(struct vpe_ctx *ctx)
630 struct vpe_mmr_adb *mmr_adb = ctx->mmr_adb.addr;
631 struct vpe_q_data *s_q_data = &ctx->q_data[Q_DATA_SRC];
632 u32 *us1_reg = &mmr_adb->us1_regs[0];
633 u32 *us2_reg = &mmr_adb->us2_regs[0];
634 u32 *us3_reg = &mmr_adb->us3_regs[0];
635 const unsigned short *cp, *end_cp;
637 cp = &us_coeffs[0].anchor_fid0_c0;
639 if (s_q_data->flags & Q_DATA_INTERLACED) /* interlaced */
640 cp += sizeof(us_coeffs[0]) / sizeof(*cp);
642 end_cp = cp + sizeof(us_coeffs[0]) / sizeof(*cp);
644 while (cp < end_cp) {
645 write_field(us1_reg, *cp++, VPE_US_C0_MASK, VPE_US_C0_SHIFT);
646 write_field(us1_reg, *cp++, VPE_US_C1_MASK, VPE_US_C1_SHIFT);
647 *us2_reg++ = *us1_reg;
648 *us3_reg++ = *us1_reg++;
650 ctx->load_mmrs = true;
654 * Set the upsampler config mode and the VPDMA line mode in the shadow MMRs.
656 static void set_cfg_and_line_modes(struct vpe_ctx *ctx)
658 struct vpe_fmt *fmt = ctx->q_data[Q_DATA_SRC].fmt;
659 struct vpe_mmr_adb *mmr_adb = ctx->mmr_adb.addr;
660 u32 *us1_reg0 = &mmr_adb->us1_regs[0];
661 u32 *us2_reg0 = &mmr_adb->us2_regs[0];
662 u32 *us3_reg0 = &mmr_adb->us3_regs[0];
667 * Cfg Mode 0: YUV420 source, enable upsampler, DEI is de-interlacing.
668 * Cfg Mode 1: YUV422 source, disable upsampler, DEI is de-interlacing.
671 if (fmt->fourcc == V4L2_PIX_FMT_NV12) {
673 line_mode = 0; /* double lines to line buffer */
676 write_field(us1_reg0, cfg_mode, VPE_US_MODE_MASK, VPE_US_MODE_SHIFT);
677 write_field(us2_reg0, cfg_mode, VPE_US_MODE_MASK, VPE_US_MODE_SHIFT);
678 write_field(us3_reg0, cfg_mode, VPE_US_MODE_MASK, VPE_US_MODE_SHIFT);
681 vpdma_set_line_mode(ctx->dev->vpdma, line_mode, VPE_CHAN_CHROMA1_IN);
682 vpdma_set_line_mode(ctx->dev->vpdma, line_mode, VPE_CHAN_CHROMA2_IN);
683 vpdma_set_line_mode(ctx->dev->vpdma, line_mode, VPE_CHAN_CHROMA3_IN);
685 /* frame start for input luma */
686 vpdma_set_frame_start_event(ctx->dev->vpdma, VPDMA_FSEVENT_CHANNEL_ACTIVE,
688 vpdma_set_frame_start_event(ctx->dev->vpdma, VPDMA_FSEVENT_CHANNEL_ACTIVE,
690 vpdma_set_frame_start_event(ctx->dev->vpdma, VPDMA_FSEVENT_CHANNEL_ACTIVE,
693 /* frame start for input chroma */
694 vpdma_set_frame_start_event(ctx->dev->vpdma, VPDMA_FSEVENT_CHANNEL_ACTIVE,
695 VPE_CHAN_CHROMA1_IN);
696 vpdma_set_frame_start_event(ctx->dev->vpdma, VPDMA_FSEVENT_CHANNEL_ACTIVE,
697 VPE_CHAN_CHROMA2_IN);
698 vpdma_set_frame_start_event(ctx->dev->vpdma, VPDMA_FSEVENT_CHANNEL_ACTIVE,
699 VPE_CHAN_CHROMA3_IN);
701 /* frame start for MV in client */
702 vpdma_set_frame_start_event(ctx->dev->vpdma, VPDMA_FSEVENT_CHANNEL_ACTIVE,
705 ctx->load_mmrs = true;
709 * Set the shadow registers that are modified when the source
712 static void set_src_registers(struct vpe_ctx *ctx)
714 set_us_coefficients(ctx);
718 * Set the shadow registers that are modified when the destination
721 static void set_dst_registers(struct vpe_ctx *ctx)
723 struct vpe_mmr_adb *mmr_adb = ctx->mmr_adb.addr;
724 enum v4l2_colorspace clrspc = ctx->q_data[Q_DATA_DST].colorspace;
725 struct vpe_fmt *fmt = ctx->q_data[Q_DATA_DST].fmt;
728 if (clrspc == V4L2_COLORSPACE_SRGB)
729 val |= VPE_RGB_OUT_SELECT;
730 else if (fmt->fourcc == V4L2_PIX_FMT_NV16)
731 val |= VPE_COLOR_SEPARATE_422;
734 * the source of CHR_DS and CSC is always the scaler, irrespective of
735 * whether it's used or not
737 val |= VPE_DS_SRC_DEI_SCALER | VPE_CSC_SRC_DEI_SCALER;
739 if (fmt->fourcc != V4L2_PIX_FMT_NV12)
740 val |= VPE_DS_BYPASS;
742 mmr_adb->out_fmt_reg[0] = val;
744 ctx->load_mmrs = true;
748 * Set the de-interlacer shadow register values
750 static void set_dei_regs(struct vpe_ctx *ctx)
752 struct vpe_mmr_adb *mmr_adb = ctx->mmr_adb.addr;
753 struct vpe_q_data *s_q_data = &ctx->q_data[Q_DATA_SRC];
754 unsigned int src_h = s_q_data->c_rect.height;
755 unsigned int src_w = s_q_data->c_rect.width;
756 u32 *dei_mmr0 = &mmr_adb->dei_regs[0];
757 bool deinterlace = true;
761 * according to TRM, we should set DEI in progressive bypass mode when
762 * the input content is progressive, however, DEI is bypassed correctly
763 * for both progressive and interlace content in interlace bypass mode.
764 * It has been recommended not to use progressive bypass mode.
766 if ((!ctx->deinterlacing && (s_q_data->flags & Q_DATA_INTERLACED)) ||
767 !(s_q_data->flags & Q_DATA_INTERLACED)) {
769 val = VPE_DEI_INTERLACE_BYPASS;
772 src_h = deinterlace ? src_h * 2 : src_h;
774 val |= (src_h << VPE_DEI_HEIGHT_SHIFT) |
775 (src_w << VPE_DEI_WIDTH_SHIFT) |
780 ctx->load_mmrs = true;
783 static void set_dei_shadow_registers(struct vpe_ctx *ctx)
785 struct vpe_mmr_adb *mmr_adb = ctx->mmr_adb.addr;
786 u32 *dei_mmr = &mmr_adb->dei_regs[0];
787 const struct vpe_dei_regs *cur = &dei_regs;
789 dei_mmr[2] = cur->mdt_spacial_freq_thr_reg;
790 dei_mmr[3] = cur->edi_config_reg;
791 dei_mmr[4] = cur->edi_lut_reg0;
792 dei_mmr[5] = cur->edi_lut_reg1;
793 dei_mmr[6] = cur->edi_lut_reg2;
794 dei_mmr[7] = cur->edi_lut_reg3;
796 ctx->load_mmrs = true;
800 * Set the shadow registers whose values are modified when either the
801 * source or destination format is changed.
803 static int set_srcdst_params(struct vpe_ctx *ctx)
805 struct vpe_q_data *s_q_data = &ctx->q_data[Q_DATA_SRC];
806 struct vpe_q_data *d_q_data = &ctx->q_data[Q_DATA_DST];
807 struct vpe_mmr_adb *mmr_adb = ctx->mmr_adb.addr;
808 unsigned int src_w = s_q_data->c_rect.width;
809 unsigned int src_h = s_q_data->c_rect.height;
810 unsigned int dst_w = d_q_data->c_rect.width;
811 unsigned int dst_h = d_q_data->c_rect.height;
816 ctx->field = V4L2_FIELD_TOP;
818 if ((s_q_data->flags & Q_DATA_INTERLACED) &&
819 !(d_q_data->flags & Q_DATA_INTERLACED)) {
821 const struct vpdma_data_format *mv =
822 &vpdma_misc_fmts[VPDMA_DATA_FMT_MV];
825 * we make sure that the source image has a 16 byte aligned
826 * stride, we need to do the same for the motion vector buffer
827 * by aligning it's stride to the next 16 byte boundry. this
828 * extra space will not be used by the de-interlacer, but will
829 * ensure that vpdma operates correctly
831 bytes_per_line = ALIGN((s_q_data->width * mv->depth) >> 3,
833 mv_buf_size = bytes_per_line * s_q_data->height;
835 ctx->deinterlacing = 1;
838 ctx->deinterlacing = 0;
844 ret = realloc_mv_buffers(ctx, mv_buf_size);
848 set_cfg_and_line_modes(ctx);
851 csc_set_coeff(ctx->dev->csc, &mmr_adb->csc_regs[0],
852 s_q_data->colorspace, d_q_data->colorspace);
854 sc_set_hs_coeffs(ctx->dev->sc, ctx->sc_coeff_h.addr, src_w, dst_w);
855 sc_set_vs_coeffs(ctx->dev->sc, ctx->sc_coeff_v.addr, src_h, dst_h);
857 sc_config_scaler(ctx->dev->sc, &mmr_adb->sc_regs0[0],
858 &mmr_adb->sc_regs8[0], &mmr_adb->sc_regs17[0],
859 src_w, src_h, dst_w, dst_h);
865 * Return the vpe_ctx structure for a given struct file
867 static struct vpe_ctx *file2ctx(struct file *file)
869 return container_of(file->private_data, struct vpe_ctx, fh);
877 * job_ready() - check whether an instance is ready to be scheduled to run
879 static int job_ready(void *priv)
881 struct vpe_ctx *ctx = priv;
882 int needed = ctx->bufs_per_job;
884 if (ctx->deinterlacing && ctx->src_vbs[2] == NULL)
885 needed += 2; /* need additional two most recent fields */
887 if (v4l2_m2m_num_src_bufs_ready(ctx->m2m_ctx) < needed)
890 if (v4l2_m2m_num_dst_bufs_ready(ctx->m2m_ctx) < needed)
896 static void job_abort(void *priv)
898 struct vpe_ctx *ctx = priv;
900 /* Will cancel the transaction in the next interrupt handler */
905 * Lock access to the device
907 static void vpe_lock(void *priv)
909 struct vpe_ctx *ctx = priv;
910 struct vpe_dev *dev = ctx->dev;
911 mutex_lock(&dev->dev_mutex);
914 static void vpe_unlock(void *priv)
916 struct vpe_ctx *ctx = priv;
917 struct vpe_dev *dev = ctx->dev;
918 mutex_unlock(&dev->dev_mutex);
921 static void vpe_dump_regs(struct vpe_dev *dev)
923 #define DUMPREG(r) vpe_dbg(dev, "%-35s %08x\n", #r, read_reg(dev, VPE_##r))
925 vpe_dbg(dev, "VPE Registers:\n");
929 DUMPREG(INT0_STATUS0_RAW);
930 DUMPREG(INT0_STATUS0);
931 DUMPREG(INT0_ENABLE0);
932 DUMPREG(INT0_STATUS1_RAW);
933 DUMPREG(INT0_STATUS1);
934 DUMPREG(INT0_ENABLE1);
937 DUMPREG(CLK_FORMAT_SELECT);
938 DUMPREG(CLK_RANGE_MAP);
963 DUMPREG(DEI_FRAME_SIZE);
965 DUMPREG(MDT_SF_THRESHOLD);
967 DUMPREG(DEI_EDI_LUT_R0);
968 DUMPREG(DEI_EDI_LUT_R1);
969 DUMPREG(DEI_EDI_LUT_R2);
970 DUMPREG(DEI_EDI_LUT_R3);
971 DUMPREG(DEI_FMD_WINDOW_R0);
972 DUMPREG(DEI_FMD_WINDOW_R1);
973 DUMPREG(DEI_FMD_CONTROL_R0);
974 DUMPREG(DEI_FMD_CONTROL_R1);
975 DUMPREG(DEI_FMD_STATUS_R0);
976 DUMPREG(DEI_FMD_STATUS_R1);
977 DUMPREG(DEI_FMD_STATUS_R2);
980 sc_dump_regs(dev->sc);
981 csc_dump_regs(dev->csc);
984 static void add_out_dtd(struct vpe_ctx *ctx, int port)
986 struct vpe_q_data *q_data = &ctx->q_data[Q_DATA_DST];
987 const struct vpe_port_data *p_data = &port_data[port];
988 struct vb2_buffer *vb = ctx->dst_vb;
989 struct v4l2_rect *c_rect = &q_data->c_rect;
990 struct vpe_fmt *fmt = q_data->fmt;
991 const struct vpdma_data_format *vpdma_fmt;
992 int mv_buf_selector = !ctx->src_mv_buf_selector;
996 if (port == VPE_PORT_MV_OUT) {
997 vpdma_fmt = &vpdma_misc_fmts[VPDMA_DATA_FMT_MV];
998 dma_addr = ctx->mv_buf_dma[mv_buf_selector];
1000 /* to incorporate interleaved formats */
1001 int plane = fmt->coplanar ? p_data->vb_part : 0;
1003 vpdma_fmt = fmt->vpdma_fmt[plane];
1004 dma_addr = vb2_dma_contig_plane_dma_addr(vb, plane);
1007 "acquiring output buffer(%d) dma_addr failed\n",
1013 if (q_data->flags & Q_DATA_FRAME_1D)
1014 flags |= VPDMA_DATA_FRAME_1D;
1015 if (q_data->flags & Q_DATA_MODE_TILED)
1016 flags |= VPDMA_DATA_MODE_TILED;
1018 vpdma_add_out_dtd(&ctx->desc_list, c_rect, vpdma_fmt, dma_addr,
1019 p_data->channel, flags);
1022 static void add_in_dtd(struct vpe_ctx *ctx, int port)
1024 struct vpe_q_data *q_data = &ctx->q_data[Q_DATA_SRC];
1025 const struct vpe_port_data *p_data = &port_data[port];
1026 struct vb2_buffer *vb = ctx->src_vbs[p_data->vb_index];
1027 struct v4l2_rect *c_rect = &q_data->c_rect;
1028 struct vpe_fmt *fmt = q_data->fmt;
1029 const struct vpdma_data_format *vpdma_fmt;
1030 int mv_buf_selector = ctx->src_mv_buf_selector;
1031 int field = vb->v4l2_buf.field == V4L2_FIELD_BOTTOM;
1032 dma_addr_t dma_addr;
1035 if (port == VPE_PORT_MV_IN) {
1036 vpdma_fmt = &vpdma_misc_fmts[VPDMA_DATA_FMT_MV];
1037 dma_addr = ctx->mv_buf_dma[mv_buf_selector];
1039 /* to incorporate interleaved formats */
1040 int plane = fmt->coplanar ? p_data->vb_part : 0;
1042 vpdma_fmt = fmt->vpdma_fmt[plane];
1044 dma_addr = vb2_dma_contig_plane_dma_addr(vb, plane);
1047 "acquiring input buffer(%d) dma_addr failed\n",
1053 if (q_data->flags & Q_DATA_FRAME_1D)
1054 flags |= VPDMA_DATA_FRAME_1D;
1055 if (q_data->flags & Q_DATA_MODE_TILED)
1056 flags |= VPDMA_DATA_MODE_TILED;
1058 vpdma_add_in_dtd(&ctx->desc_list, q_data->width, q_data->height,
1059 c_rect, vpdma_fmt, dma_addr, p_data->channel, field, flags);
1063 * Enable the expected IRQ sources
1065 static void enable_irqs(struct vpe_ctx *ctx)
1067 write_reg(ctx->dev, VPE_INT0_ENABLE0_SET, VPE_INT0_LIST0_COMPLETE);
1068 write_reg(ctx->dev, VPE_INT0_ENABLE1_SET, VPE_DEI_ERROR_INT |
1069 VPE_DS1_UV_ERROR_INT);
1071 vpdma_enable_list_complete_irq(ctx->dev->vpdma, 0, true);
1074 static void disable_irqs(struct vpe_ctx *ctx)
1076 write_reg(ctx->dev, VPE_INT0_ENABLE0_CLR, 0xffffffff);
1077 write_reg(ctx->dev, VPE_INT0_ENABLE1_CLR, 0xffffffff);
1079 vpdma_enable_list_complete_irq(ctx->dev->vpdma, 0, false);
1082 /* device_run() - prepares and starts the device
1084 * This function is only called when both the source and destination
1085 * buffers are in place.
1087 static void device_run(void *priv)
1089 struct vpe_ctx *ctx = priv;
1090 struct sc_data *sc = ctx->dev->sc;
1091 struct vpe_q_data *d_q_data = &ctx->q_data[Q_DATA_DST];
1093 if (ctx->deinterlacing && ctx->src_vbs[2] == NULL) {
1094 ctx->src_vbs[2] = v4l2_m2m_src_buf_remove(ctx->m2m_ctx);
1095 WARN_ON(ctx->src_vbs[2] == NULL);
1096 ctx->src_vbs[1] = v4l2_m2m_src_buf_remove(ctx->m2m_ctx);
1097 WARN_ON(ctx->src_vbs[1] == NULL);
1100 ctx->src_vbs[0] = v4l2_m2m_src_buf_remove(ctx->m2m_ctx);
1101 WARN_ON(ctx->src_vbs[0] == NULL);
1102 ctx->dst_vb = v4l2_m2m_dst_buf_remove(ctx->m2m_ctx);
1103 WARN_ON(ctx->dst_vb == NULL);
1105 /* config descriptors */
1106 if (ctx->dev->loaded_mmrs != ctx->mmr_adb.dma_addr || ctx->load_mmrs) {
1107 vpdma_map_desc_buf(ctx->dev->vpdma, &ctx->mmr_adb);
1108 vpdma_add_cfd_adb(&ctx->desc_list, CFD_MMR_CLIENT, &ctx->mmr_adb);
1109 ctx->dev->loaded_mmrs = ctx->mmr_adb.dma_addr;
1110 ctx->load_mmrs = false;
1113 if (sc->loaded_coeff_h != ctx->sc_coeff_h.dma_addr ||
1115 vpdma_map_desc_buf(ctx->dev->vpdma, &ctx->sc_coeff_h);
1116 vpdma_add_cfd_block(&ctx->desc_list, CFD_SC_CLIENT,
1117 &ctx->sc_coeff_h, 0);
1119 sc->loaded_coeff_h = ctx->sc_coeff_h.dma_addr;
1120 sc->load_coeff_h = false;
1123 if (sc->loaded_coeff_v != ctx->sc_coeff_v.dma_addr ||
1125 vpdma_map_desc_buf(ctx->dev->vpdma, &ctx->sc_coeff_v);
1126 vpdma_add_cfd_block(&ctx->desc_list, CFD_SC_CLIENT,
1127 &ctx->sc_coeff_v, SC_COEF_SRAM_SIZE >> 4);
1129 sc->loaded_coeff_v = ctx->sc_coeff_v.dma_addr;
1130 sc->load_coeff_v = false;
1133 /* output data descriptors */
1134 if (ctx->deinterlacing)
1135 add_out_dtd(ctx, VPE_PORT_MV_OUT);
1137 if (d_q_data->colorspace == V4L2_COLORSPACE_SRGB) {
1138 add_out_dtd(ctx, VPE_PORT_RGB_OUT);
1140 add_out_dtd(ctx, VPE_PORT_LUMA_OUT);
1141 if (d_q_data->fmt->coplanar)
1142 add_out_dtd(ctx, VPE_PORT_CHROMA_OUT);
1145 /* input data descriptors */
1146 if (ctx->deinterlacing) {
1147 add_in_dtd(ctx, VPE_PORT_LUMA3_IN);
1148 add_in_dtd(ctx, VPE_PORT_CHROMA3_IN);
1150 add_in_dtd(ctx, VPE_PORT_LUMA2_IN);
1151 add_in_dtd(ctx, VPE_PORT_CHROMA2_IN);
1154 add_in_dtd(ctx, VPE_PORT_LUMA1_IN);
1155 add_in_dtd(ctx, VPE_PORT_CHROMA1_IN);
1157 if (ctx->deinterlacing)
1158 add_in_dtd(ctx, VPE_PORT_MV_IN);
1160 /* sync on channel control descriptors for input ports */
1161 vpdma_add_sync_on_channel_ctd(&ctx->desc_list, VPE_CHAN_LUMA1_IN);
1162 vpdma_add_sync_on_channel_ctd(&ctx->desc_list, VPE_CHAN_CHROMA1_IN);
1164 if (ctx->deinterlacing) {
1165 vpdma_add_sync_on_channel_ctd(&ctx->desc_list,
1167 vpdma_add_sync_on_channel_ctd(&ctx->desc_list,
1168 VPE_CHAN_CHROMA2_IN);
1170 vpdma_add_sync_on_channel_ctd(&ctx->desc_list,
1172 vpdma_add_sync_on_channel_ctd(&ctx->desc_list,
1173 VPE_CHAN_CHROMA3_IN);
1175 vpdma_add_sync_on_channel_ctd(&ctx->desc_list, VPE_CHAN_MV_IN);
1178 /* sync on channel control descriptors for output ports */
1179 if (d_q_data->colorspace == V4L2_COLORSPACE_SRGB) {
1180 vpdma_add_sync_on_channel_ctd(&ctx->desc_list,
1183 vpdma_add_sync_on_channel_ctd(&ctx->desc_list,
1185 if (d_q_data->fmt->coplanar)
1186 vpdma_add_sync_on_channel_ctd(&ctx->desc_list,
1187 VPE_CHAN_CHROMA_OUT);
1190 if (ctx->deinterlacing)
1191 vpdma_add_sync_on_channel_ctd(&ctx->desc_list, VPE_CHAN_MV_OUT);
1195 vpdma_map_desc_buf(ctx->dev->vpdma, &ctx->desc_list.buf);
1196 vpdma_submit_descs(ctx->dev->vpdma, &ctx->desc_list);
1199 static void dei_error(struct vpe_ctx *ctx)
1201 dev_warn(ctx->dev->v4l2_dev.dev,
1202 "received DEI error interrupt\n");
1205 static void ds1_uv_error(struct vpe_ctx *ctx)
1207 dev_warn(ctx->dev->v4l2_dev.dev,
1208 "received downsampler error interrupt\n");
1211 static irqreturn_t vpe_irq(int irq_vpe, void *data)
1213 struct vpe_dev *dev = (struct vpe_dev *)data;
1214 struct vpe_ctx *ctx;
1215 struct vpe_q_data *d_q_data;
1216 struct vb2_buffer *s_vb, *d_vb;
1217 struct v4l2_buffer *s_buf, *d_buf;
1218 unsigned long flags;
1221 irqst0 = read_reg(dev, VPE_INT0_STATUS0);
1223 write_reg(dev, VPE_INT0_STATUS0_CLR, irqst0);
1224 vpe_dbg(dev, "INT0_STATUS0 = 0x%08x\n", irqst0);
1227 irqst1 = read_reg(dev, VPE_INT0_STATUS1);
1229 write_reg(dev, VPE_INT0_STATUS1_CLR, irqst1);
1230 vpe_dbg(dev, "INT0_STATUS1 = 0x%08x\n", irqst1);
1233 ctx = v4l2_m2m_get_curr_priv(dev->m2m_dev);
1235 vpe_err(dev, "instance released before end of transaction\n");
1240 if (irqst1 & VPE_DEI_ERROR_INT) {
1241 irqst1 &= ~VPE_DEI_ERROR_INT;
1244 if (irqst1 & VPE_DS1_UV_ERROR_INT) {
1245 irqst1 &= ~VPE_DS1_UV_ERROR_INT;
1251 if (irqst0 & VPE_INT0_LIST0_COMPLETE)
1252 vpdma_clear_list_stat(ctx->dev->vpdma);
1254 irqst0 &= ~(VPE_INT0_LIST0_COMPLETE);
1257 if (irqst0 | irqst1) {
1258 dev_warn(dev->v4l2_dev.dev, "Unexpected interrupt: "
1259 "INT0_STATUS0 = 0x%08x, INT0_STATUS1 = 0x%08x\n",
1265 vpdma_unmap_desc_buf(dev->vpdma, &ctx->desc_list.buf);
1266 vpdma_unmap_desc_buf(dev->vpdma, &ctx->mmr_adb);
1267 vpdma_unmap_desc_buf(dev->vpdma, &ctx->sc_coeff_h);
1268 vpdma_unmap_desc_buf(dev->vpdma, &ctx->sc_coeff_v);
1270 vpdma_reset_desc_list(&ctx->desc_list);
1272 /* the previous dst mv buffer becomes the next src mv buffer */
1273 ctx->src_mv_buf_selector = !ctx->src_mv_buf_selector;
1278 s_vb = ctx->src_vbs[0];
1280 s_buf = &s_vb->v4l2_buf;
1281 d_buf = &d_vb->v4l2_buf;
1283 d_buf->flags = s_buf->flags;
1285 d_buf->timestamp = s_buf->timestamp;
1286 if (s_buf->flags & V4L2_BUF_FLAG_TIMECODE)
1287 d_buf->timecode = s_buf->timecode;
1289 d_buf->sequence = ctx->sequence;
1291 d_q_data = &ctx->q_data[Q_DATA_DST];
1292 if (d_q_data->flags & Q_DATA_INTERLACED) {
1293 d_buf->field = ctx->field;
1294 if (ctx->field == V4L2_FIELD_BOTTOM) {
1296 ctx->field = V4L2_FIELD_TOP;
1298 WARN_ON(ctx->field != V4L2_FIELD_TOP);
1299 ctx->field = V4L2_FIELD_BOTTOM;
1302 d_buf->field = V4L2_FIELD_NONE;
1306 if (ctx->deinterlacing)
1307 s_vb = ctx->src_vbs[2];
1309 spin_lock_irqsave(&dev->lock, flags);
1310 v4l2_m2m_buf_done(s_vb, VB2_BUF_STATE_DONE);
1311 v4l2_m2m_buf_done(d_vb, VB2_BUF_STATE_DONE);
1312 spin_unlock_irqrestore(&dev->lock, flags);
1314 if (ctx->deinterlacing) {
1315 ctx->src_vbs[2] = ctx->src_vbs[1];
1316 ctx->src_vbs[1] = ctx->src_vbs[0];
1319 ctx->bufs_completed++;
1320 if (ctx->bufs_completed < ctx->bufs_per_job) {
1326 vpe_dbg(ctx->dev, "finishing transaction\n");
1327 ctx->bufs_completed = 0;
1328 v4l2_m2m_job_finish(dev->m2m_dev, ctx->m2m_ctx);
1336 static int vpe_querycap(struct file *file, void *priv,
1337 struct v4l2_capability *cap)
1339 strncpy(cap->driver, VPE_MODULE_NAME, sizeof(cap->driver) - 1);
1340 strncpy(cap->card, VPE_MODULE_NAME, sizeof(cap->card) - 1);
1341 snprintf(cap->bus_info, sizeof(cap->bus_info), "platform:%s",
1343 cap->device_caps = V4L2_CAP_VIDEO_M2M_MPLANE | V4L2_CAP_STREAMING;
1344 cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
1348 static int __enum_fmt(struct v4l2_fmtdesc *f, u32 type)
1351 struct vpe_fmt *fmt = NULL;
1354 for (i = 0; i < ARRAY_SIZE(vpe_formats); ++i) {
1355 if (vpe_formats[i].types & type) {
1356 if (index == f->index) {
1357 fmt = &vpe_formats[i];
1367 strncpy(f->description, fmt->name, sizeof(f->description) - 1);
1368 f->pixelformat = fmt->fourcc;
1372 static int vpe_enum_fmt(struct file *file, void *priv,
1373 struct v4l2_fmtdesc *f)
1375 if (V4L2_TYPE_IS_OUTPUT(f->type))
1376 return __enum_fmt(f, VPE_FMT_TYPE_OUTPUT);
1378 return __enum_fmt(f, VPE_FMT_TYPE_CAPTURE);
1381 static int vpe_g_fmt(struct file *file, void *priv, struct v4l2_format *f)
1383 struct v4l2_pix_format_mplane *pix = &f->fmt.pix_mp;
1384 struct vpe_ctx *ctx = file2ctx(file);
1385 struct vb2_queue *vq;
1386 struct vpe_q_data *q_data;
1389 vq = v4l2_m2m_get_vq(ctx->m2m_ctx, f->type);
1393 q_data = get_q_data(ctx, f->type);
1395 pix->width = q_data->width;
1396 pix->height = q_data->height;
1397 pix->pixelformat = q_data->fmt->fourcc;
1398 pix->field = q_data->field;
1400 if (V4L2_TYPE_IS_OUTPUT(f->type)) {
1401 pix->colorspace = q_data->colorspace;
1403 struct vpe_q_data *s_q_data;
1405 /* get colorspace from the source queue */
1406 s_q_data = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
1408 pix->colorspace = s_q_data->colorspace;
1411 pix->num_planes = q_data->fmt->coplanar ? 2 : 1;
1413 for (i = 0; i < pix->num_planes; i++) {
1414 pix->plane_fmt[i].bytesperline = q_data->bytesperline[i];
1415 pix->plane_fmt[i].sizeimage = q_data->sizeimage[i];
1421 static int __vpe_try_fmt(struct vpe_ctx *ctx, struct v4l2_format *f,
1422 struct vpe_fmt *fmt, int type)
1424 struct v4l2_pix_format_mplane *pix = &f->fmt.pix_mp;
1425 struct v4l2_plane_pix_format *plane_fmt;
1426 unsigned int w_align;
1427 int i, depth, depth_bytes;
1429 if (!fmt || !(fmt->types & type)) {
1430 vpe_err(ctx->dev, "Fourcc format (0x%08x) invalid.\n",
1435 if (pix->field != V4L2_FIELD_NONE && pix->field != V4L2_FIELD_ALTERNATE)
1436 pix->field = V4L2_FIELD_NONE;
1438 depth = fmt->vpdma_fmt[VPE_LUMA]->depth;
1441 * the line stride should 16 byte aligned for VPDMA to work, based on
1442 * the bytes per pixel, figure out how much the width should be aligned
1443 * to make sure line stride is 16 byte aligned
1445 depth_bytes = depth >> 3;
1447 if (depth_bytes == 3)
1449 * if bpp is 3(as in some RGB formats), the pixel width doesn't
1450 * really help in ensuring line stride is 16 byte aligned
1455 * for the remainder bpp(4, 2 and 1), the pixel width alignment
1456 * can ensure a line stride alignment of 16 bytes. For example,
1457 * if bpp is 2, then the line stride can be 16 byte aligned if
1458 * the width is 8 byte aligned
1460 w_align = order_base_2(VPDMA_DESC_ALIGN / depth_bytes);
1462 v4l_bound_align_image(&pix->width, MIN_W, MAX_W, w_align,
1463 &pix->height, MIN_H, MAX_H, H_ALIGN,
1466 pix->num_planes = fmt->coplanar ? 2 : 1;
1467 pix->pixelformat = fmt->fourcc;
1469 if (!pix->colorspace) {
1470 if (fmt->fourcc == V4L2_PIX_FMT_RGB24 ||
1471 fmt->fourcc == V4L2_PIX_FMT_BGR24 ||
1472 fmt->fourcc == V4L2_PIX_FMT_RGB32 ||
1473 fmt->fourcc == V4L2_PIX_FMT_BGR32) {
1474 pix->colorspace = V4L2_COLORSPACE_SRGB;
1476 if (pix->height > 1280) /* HD */
1477 pix->colorspace = V4L2_COLORSPACE_REC709;
1479 pix->colorspace = V4L2_COLORSPACE_SMPTE170M;
1483 memset(pix->reserved, 0, sizeof(pix->reserved));
1484 for (i = 0; i < pix->num_planes; i++) {
1485 plane_fmt = &pix->plane_fmt[i];
1486 depth = fmt->vpdma_fmt[i]->depth;
1489 plane_fmt->bytesperline = (pix->width * depth) >> 3;
1491 plane_fmt->bytesperline = pix->width;
1493 plane_fmt->sizeimage =
1494 (pix->height * pix->width * depth) >> 3;
1496 memset(plane_fmt->reserved, 0, sizeof(plane_fmt->reserved));
1502 static int vpe_try_fmt(struct file *file, void *priv, struct v4l2_format *f)
1504 struct vpe_ctx *ctx = file2ctx(file);
1505 struct vpe_fmt *fmt = find_format(f);
1507 if (V4L2_TYPE_IS_OUTPUT(f->type))
1508 return __vpe_try_fmt(ctx, f, fmt, VPE_FMT_TYPE_OUTPUT);
1510 return __vpe_try_fmt(ctx, f, fmt, VPE_FMT_TYPE_CAPTURE);
1513 static int __vpe_s_fmt(struct vpe_ctx *ctx, struct v4l2_format *f)
1515 struct v4l2_pix_format_mplane *pix = &f->fmt.pix_mp;
1516 struct v4l2_plane_pix_format *plane_fmt;
1517 struct vpe_q_data *q_data;
1518 struct vb2_queue *vq;
1521 vq = v4l2_m2m_get_vq(ctx->m2m_ctx, f->type);
1525 if (vb2_is_busy(vq)) {
1526 vpe_err(ctx->dev, "queue busy\n");
1530 q_data = get_q_data(ctx, f->type);
1534 q_data->fmt = find_format(f);
1535 q_data->width = pix->width;
1536 q_data->height = pix->height;
1537 q_data->colorspace = pix->colorspace;
1538 q_data->field = pix->field;
1540 for (i = 0; i < pix->num_planes; i++) {
1541 plane_fmt = &pix->plane_fmt[i];
1543 q_data->bytesperline[i] = plane_fmt->bytesperline;
1544 q_data->sizeimage[i] = plane_fmt->sizeimage;
1547 q_data->c_rect.left = 0;
1548 q_data->c_rect.top = 0;
1549 q_data->c_rect.width = q_data->width;
1550 q_data->c_rect.height = q_data->height;
1552 if (q_data->field == V4L2_FIELD_ALTERNATE)
1553 q_data->flags |= Q_DATA_INTERLACED;
1555 q_data->flags &= ~Q_DATA_INTERLACED;
1557 vpe_dbg(ctx->dev, "Setting format for type %d, wxh: %dx%d, fmt: %d bpl_y %d",
1558 f->type, q_data->width, q_data->height, q_data->fmt->fourcc,
1559 q_data->bytesperline[VPE_LUMA]);
1560 if (q_data->fmt->coplanar)
1561 vpe_dbg(ctx->dev, " bpl_uv %d\n",
1562 q_data->bytesperline[VPE_CHROMA]);
1567 static int vpe_s_fmt(struct file *file, void *priv, struct v4l2_format *f)
1570 struct vpe_ctx *ctx = file2ctx(file);
1572 ret = vpe_try_fmt(file, priv, f);
1576 ret = __vpe_s_fmt(ctx, f);
1580 if (V4L2_TYPE_IS_OUTPUT(f->type))
1581 set_src_registers(ctx);
1583 set_dst_registers(ctx);
1585 return set_srcdst_params(ctx);
1588 static int vpe_reqbufs(struct file *file, void *priv,
1589 struct v4l2_requestbuffers *reqbufs)
1591 struct vpe_ctx *ctx = file2ctx(file);
1593 return v4l2_m2m_reqbufs(file, ctx->m2m_ctx, reqbufs);
1596 static int vpe_querybuf(struct file *file, void *priv, struct v4l2_buffer *buf)
1598 struct vpe_ctx *ctx = file2ctx(file);
1600 return v4l2_m2m_querybuf(file, ctx->m2m_ctx, buf);
1603 static int vpe_qbuf(struct file *file, void *priv, struct v4l2_buffer *buf)
1605 struct vpe_ctx *ctx = file2ctx(file);
1607 return v4l2_m2m_qbuf(file, ctx->m2m_ctx, buf);
1610 static int vpe_dqbuf(struct file *file, void *priv, struct v4l2_buffer *buf)
1612 struct vpe_ctx *ctx = file2ctx(file);
1614 return v4l2_m2m_dqbuf(file, ctx->m2m_ctx, buf);
1617 static int vpe_streamon(struct file *file, void *priv, enum v4l2_buf_type type)
1619 struct vpe_ctx *ctx = file2ctx(file);
1621 return v4l2_m2m_streamon(file, ctx->m2m_ctx, type);
1624 static int vpe_streamoff(struct file *file, void *priv, enum v4l2_buf_type type)
1626 struct vpe_ctx *ctx = file2ctx(file);
1628 vpe_dump_regs(ctx->dev);
1629 vpdma_dump_regs(ctx->dev->vpdma);
1631 return v4l2_m2m_streamoff(file, ctx->m2m_ctx, type);
1635 * defines number of buffers/frames a context can process with VPE before
1636 * switching to a different context. default value is 1 buffer per context
1638 #define V4L2_CID_VPE_BUFS_PER_JOB (V4L2_CID_USER_TI_VPE_BASE + 0)
1640 static int vpe_s_ctrl(struct v4l2_ctrl *ctrl)
1642 struct vpe_ctx *ctx =
1643 container_of(ctrl->handler, struct vpe_ctx, hdl);
1646 case V4L2_CID_VPE_BUFS_PER_JOB:
1647 ctx->bufs_per_job = ctrl->val;
1651 vpe_err(ctx->dev, "Invalid control\n");
1658 static const struct v4l2_ctrl_ops vpe_ctrl_ops = {
1659 .s_ctrl = vpe_s_ctrl,
1662 static const struct v4l2_ioctl_ops vpe_ioctl_ops = {
1663 .vidioc_querycap = vpe_querycap,
1665 .vidioc_enum_fmt_vid_cap_mplane = vpe_enum_fmt,
1666 .vidioc_g_fmt_vid_cap_mplane = vpe_g_fmt,
1667 .vidioc_try_fmt_vid_cap_mplane = vpe_try_fmt,
1668 .vidioc_s_fmt_vid_cap_mplane = vpe_s_fmt,
1670 .vidioc_enum_fmt_vid_out_mplane = vpe_enum_fmt,
1671 .vidioc_g_fmt_vid_out_mplane = vpe_g_fmt,
1672 .vidioc_try_fmt_vid_out_mplane = vpe_try_fmt,
1673 .vidioc_s_fmt_vid_out_mplane = vpe_s_fmt,
1675 .vidioc_reqbufs = vpe_reqbufs,
1676 .vidioc_querybuf = vpe_querybuf,
1678 .vidioc_qbuf = vpe_qbuf,
1679 .vidioc_dqbuf = vpe_dqbuf,
1681 .vidioc_streamon = vpe_streamon,
1682 .vidioc_streamoff = vpe_streamoff,
1683 .vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
1684 .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
1690 static int vpe_queue_setup(struct vb2_queue *vq,
1691 const struct v4l2_format *fmt,
1692 unsigned int *nbuffers, unsigned int *nplanes,
1693 unsigned int sizes[], void *alloc_ctxs[])
1696 struct vpe_ctx *ctx = vb2_get_drv_priv(vq);
1697 struct vpe_q_data *q_data;
1699 q_data = get_q_data(ctx, vq->type);
1701 *nplanes = q_data->fmt->coplanar ? 2 : 1;
1703 for (i = 0; i < *nplanes; i++) {
1704 sizes[i] = q_data->sizeimage[i];
1705 alloc_ctxs[i] = ctx->dev->alloc_ctx;
1708 vpe_dbg(ctx->dev, "get %d buffer(s) of size %d", *nbuffers,
1710 if (q_data->fmt->coplanar)
1711 vpe_dbg(ctx->dev, " and %d\n", sizes[VPE_CHROMA]);
1716 static int vpe_buf_prepare(struct vb2_buffer *vb)
1718 struct vpe_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
1719 struct vpe_q_data *q_data;
1722 vpe_dbg(ctx->dev, "type: %d\n", vb->vb2_queue->type);
1724 q_data = get_q_data(ctx, vb->vb2_queue->type);
1725 num_planes = q_data->fmt->coplanar ? 2 : 1;
1727 if (vb->vb2_queue->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
1728 if (!(q_data->flags & Q_DATA_INTERLACED)) {
1729 vb->v4l2_buf.field = V4L2_FIELD_NONE;
1731 if (vb->v4l2_buf.field != V4L2_FIELD_TOP &&
1732 vb->v4l2_buf.field != V4L2_FIELD_BOTTOM)
1737 for (i = 0; i < num_planes; i++) {
1738 if (vb2_plane_size(vb, i) < q_data->sizeimage[i]) {
1740 "data will not fit into plane (%lu < %lu)\n",
1741 vb2_plane_size(vb, i),
1742 (long) q_data->sizeimage[i]);
1747 for (i = 0; i < num_planes; i++)
1748 vb2_set_plane_payload(vb, i, q_data->sizeimage[i]);
1753 static void vpe_buf_queue(struct vb2_buffer *vb)
1755 struct vpe_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
1756 v4l2_m2m_buf_queue(ctx->m2m_ctx, vb);
1759 static void vpe_wait_prepare(struct vb2_queue *q)
1761 struct vpe_ctx *ctx = vb2_get_drv_priv(q);
1765 static void vpe_wait_finish(struct vb2_queue *q)
1767 struct vpe_ctx *ctx = vb2_get_drv_priv(q);
1771 static struct vb2_ops vpe_qops = {
1772 .queue_setup = vpe_queue_setup,
1773 .buf_prepare = vpe_buf_prepare,
1774 .buf_queue = vpe_buf_queue,
1775 .wait_prepare = vpe_wait_prepare,
1776 .wait_finish = vpe_wait_finish,
1779 static int queue_init(void *priv, struct vb2_queue *src_vq,
1780 struct vb2_queue *dst_vq)
1782 struct vpe_ctx *ctx = priv;
1785 memset(src_vq, 0, sizeof(*src_vq));
1786 src_vq->type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
1787 src_vq->io_modes = VB2_MMAP;
1788 src_vq->drv_priv = ctx;
1789 src_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
1790 src_vq->ops = &vpe_qops;
1791 src_vq->mem_ops = &vb2_dma_contig_memops;
1792 src_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
1794 ret = vb2_queue_init(src_vq);
1798 memset(dst_vq, 0, sizeof(*dst_vq));
1799 dst_vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
1800 dst_vq->io_modes = VB2_MMAP;
1801 dst_vq->drv_priv = ctx;
1802 dst_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
1803 dst_vq->ops = &vpe_qops;
1804 dst_vq->mem_ops = &vb2_dma_contig_memops;
1805 dst_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
1807 return vb2_queue_init(dst_vq);
1810 static const struct v4l2_ctrl_config vpe_bufs_per_job = {
1811 .ops = &vpe_ctrl_ops,
1812 .id = V4L2_CID_VPE_BUFS_PER_JOB,
1813 .name = "Buffers Per Transaction",
1814 .type = V4L2_CTRL_TYPE_INTEGER,
1815 .def = VPE_DEF_BUFS_PER_JOB,
1817 .max = VIDEO_MAX_FRAME,
1824 static int vpe_open(struct file *file)
1826 struct vpe_dev *dev = video_drvdata(file);
1827 struct vpe_ctx *ctx = NULL;
1828 struct vpe_q_data *s_q_data;
1829 struct v4l2_ctrl_handler *hdl;
1832 vpe_dbg(dev, "vpe_open\n");
1834 if (!dev->vpdma->ready) {
1835 vpe_err(dev, "vpdma firmware not loaded\n");
1839 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
1845 if (mutex_lock_interruptible(&dev->dev_mutex)) {
1850 ret = vpdma_create_desc_list(&ctx->desc_list, VPE_DESC_LIST_SIZE,
1851 VPDMA_LIST_TYPE_NORMAL);
1855 ret = vpdma_alloc_desc_buf(&ctx->mmr_adb, sizeof(struct vpe_mmr_adb));
1857 goto free_desc_list;
1859 ret = vpdma_alloc_desc_buf(&ctx->sc_coeff_h, SC_COEF_SRAM_SIZE);
1863 ret = vpdma_alloc_desc_buf(&ctx->sc_coeff_v, SC_COEF_SRAM_SIZE);
1869 v4l2_fh_init(&ctx->fh, video_devdata(file));
1870 file->private_data = &ctx->fh;
1873 v4l2_ctrl_handler_init(hdl, 1);
1874 v4l2_ctrl_new_custom(hdl, &vpe_bufs_per_job, NULL);
1879 ctx->fh.ctrl_handler = hdl;
1880 v4l2_ctrl_handler_setup(hdl);
1882 s_q_data = &ctx->q_data[Q_DATA_SRC];
1883 s_q_data->fmt = &vpe_formats[2];
1884 s_q_data->width = 1920;
1885 s_q_data->height = 1080;
1886 s_q_data->bytesperline[VPE_LUMA] = (s_q_data->width *
1887 s_q_data->fmt->vpdma_fmt[VPE_LUMA]->depth) >> 3;
1888 s_q_data->sizeimage[VPE_LUMA] = (s_q_data->bytesperline[VPE_LUMA] *
1890 s_q_data->colorspace = V4L2_COLORSPACE_REC709;
1891 s_q_data->field = V4L2_FIELD_NONE;
1892 s_q_data->c_rect.left = 0;
1893 s_q_data->c_rect.top = 0;
1894 s_q_data->c_rect.width = s_q_data->width;
1895 s_q_data->c_rect.height = s_q_data->height;
1896 s_q_data->flags = 0;
1898 ctx->q_data[Q_DATA_DST] = *s_q_data;
1900 set_dei_shadow_registers(ctx);
1901 set_src_registers(ctx);
1902 set_dst_registers(ctx);
1903 ret = set_srcdst_params(ctx);
1907 ctx->m2m_ctx = v4l2_m2m_ctx_init(dev->m2m_dev, ctx, &queue_init);
1909 if (IS_ERR(ctx->m2m_ctx)) {
1910 ret = PTR_ERR(ctx->m2m_ctx);
1914 v4l2_fh_add(&ctx->fh);
1917 * for now, just report the creation of the first instance, we can later
1918 * optimize the driver to enable or disable clocks when the first
1919 * instance is created or the last instance released
1921 if (atomic_inc_return(&dev->num_instances) == 1)
1922 vpe_dbg(dev, "first instance created\n");
1924 ctx->bufs_per_job = VPE_DEF_BUFS_PER_JOB;
1926 ctx->load_mmrs = true;
1928 vpe_dbg(dev, "created instance %p, m2m_ctx: %p\n",
1931 mutex_unlock(&dev->dev_mutex);
1935 v4l2_ctrl_handler_free(hdl);
1936 v4l2_fh_exit(&ctx->fh);
1937 vpdma_free_desc_buf(&ctx->sc_coeff_v);
1939 vpdma_free_desc_buf(&ctx->sc_coeff_h);
1941 vpdma_free_desc_buf(&ctx->mmr_adb);
1943 vpdma_free_desc_list(&ctx->desc_list);
1945 mutex_unlock(&dev->dev_mutex);
1951 static int vpe_release(struct file *file)
1953 struct vpe_dev *dev = video_drvdata(file);
1954 struct vpe_ctx *ctx = file2ctx(file);
1956 vpe_dbg(dev, "releasing instance %p\n", ctx);
1958 mutex_lock(&dev->dev_mutex);
1960 free_mv_buffers(ctx);
1961 vpdma_free_desc_list(&ctx->desc_list);
1962 vpdma_free_desc_buf(&ctx->mmr_adb);
1964 v4l2_fh_del(&ctx->fh);
1965 v4l2_fh_exit(&ctx->fh);
1966 v4l2_ctrl_handler_free(&ctx->hdl);
1967 v4l2_m2m_ctx_release(ctx->m2m_ctx);
1972 * for now, just report the release of the last instance, we can later
1973 * optimize the driver to enable or disable clocks when the first
1974 * instance is created or the last instance released
1976 if (atomic_dec_return(&dev->num_instances) == 0)
1977 vpe_dbg(dev, "last instance released\n");
1979 mutex_unlock(&dev->dev_mutex);
1984 static unsigned int vpe_poll(struct file *file,
1985 struct poll_table_struct *wait)
1987 struct vpe_ctx *ctx = file2ctx(file);
1988 struct vpe_dev *dev = ctx->dev;
1991 mutex_lock(&dev->dev_mutex);
1992 ret = v4l2_m2m_poll(file, ctx->m2m_ctx, wait);
1993 mutex_unlock(&dev->dev_mutex);
1997 static int vpe_mmap(struct file *file, struct vm_area_struct *vma)
1999 struct vpe_ctx *ctx = file2ctx(file);
2000 struct vpe_dev *dev = ctx->dev;
2003 if (mutex_lock_interruptible(&dev->dev_mutex))
2004 return -ERESTARTSYS;
2005 ret = v4l2_m2m_mmap(file, ctx->m2m_ctx, vma);
2006 mutex_unlock(&dev->dev_mutex);
2010 static const struct v4l2_file_operations vpe_fops = {
2011 .owner = THIS_MODULE,
2013 .release = vpe_release,
2015 .unlocked_ioctl = video_ioctl2,
2019 static struct video_device vpe_videodev = {
2020 .name = VPE_MODULE_NAME,
2022 .ioctl_ops = &vpe_ioctl_ops,
2024 .release = video_device_release_empty,
2025 .vfl_dir = VFL_DIR_M2M,
2028 static struct v4l2_m2m_ops m2m_ops = {
2029 .device_run = device_run,
2030 .job_ready = job_ready,
2031 .job_abort = job_abort,
2033 .unlock = vpe_unlock,
2036 static int vpe_runtime_get(struct platform_device *pdev)
2040 dev_dbg(&pdev->dev, "vpe_runtime_get\n");
2042 r = pm_runtime_get_sync(&pdev->dev);
2044 return r < 0 ? r : 0;
2047 static void vpe_runtime_put(struct platform_device *pdev)
2052 dev_dbg(&pdev->dev, "vpe_runtime_put\n");
2054 r = pm_runtime_put_sync(&pdev->dev);
2055 WARN_ON(r < 0 && r != -ENOSYS);
2058 static int vpe_probe(struct platform_device *pdev)
2060 struct vpe_dev *dev;
2061 struct video_device *vfd;
2064 dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL);
2068 spin_lock_init(&dev->lock);
2070 ret = v4l2_device_register(&pdev->dev, &dev->v4l2_dev);
2074 atomic_set(&dev->num_instances, 0);
2075 mutex_init(&dev->dev_mutex);
2077 dev->res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
2080 * HACK: we get resource info from device tree in the form of a list of
2081 * VPE sub blocks, the driver currently uses only the base of vpe_top
2082 * for register access, the driver should be changed later to access
2083 * registers based on the sub block base addresses
2085 dev->base = devm_ioremap(&pdev->dev, dev->res->start, SZ_32K);
2088 goto v4l2_dev_unreg;
2091 irq = platform_get_irq(pdev, 0);
2092 ret = devm_request_irq(&pdev->dev, irq, vpe_irq, 0, VPE_MODULE_NAME,
2095 goto v4l2_dev_unreg;
2097 platform_set_drvdata(pdev, dev);
2099 dev->alloc_ctx = vb2_dma_contig_init_ctx(&pdev->dev);
2100 if (IS_ERR(dev->alloc_ctx)) {
2101 vpe_err(dev, "Failed to alloc vb2 context\n");
2102 ret = PTR_ERR(dev->alloc_ctx);
2103 goto v4l2_dev_unreg;
2106 dev->m2m_dev = v4l2_m2m_init(&m2m_ops);
2107 if (IS_ERR(dev->m2m_dev)) {
2108 vpe_err(dev, "Failed to init mem2mem device\n");
2109 ret = PTR_ERR(dev->m2m_dev);
2113 pm_runtime_enable(&pdev->dev);
2115 ret = vpe_runtime_get(pdev);
2119 /* Perform clk enable followed by reset */
2120 vpe_set_clock_enable(dev, 1);
2124 func = read_field_reg(dev, VPE_PID, VPE_PID_FUNC_MASK,
2125 VPE_PID_FUNC_SHIFT);
2126 vpe_dbg(dev, "VPE PID function %x\n", func);
2128 vpe_top_vpdma_reset(dev);
2130 dev->sc = sc_create(pdev);
2131 if (IS_ERR(dev->sc)) {
2132 ret = PTR_ERR(dev->sc);
2136 dev->csc = csc_create(pdev);
2137 if (IS_ERR(dev->csc)) {
2138 ret = PTR_ERR(dev->csc);
2142 dev->vpdma = vpdma_create(pdev);
2143 if (IS_ERR(dev->vpdma)) {
2144 ret = PTR_ERR(dev->vpdma);
2149 *vfd = vpe_videodev;
2150 vfd->lock = &dev->dev_mutex;
2151 vfd->v4l2_dev = &dev->v4l2_dev;
2153 ret = video_register_device(vfd, VFL_TYPE_GRABBER, 0);
2155 vpe_err(dev, "Failed to register video device\n");
2159 video_set_drvdata(vfd, dev);
2160 snprintf(vfd->name, sizeof(vfd->name), "%s", vpe_videodev.name);
2161 dev_info(dev->v4l2_dev.dev, "Device registered as /dev/video%d\n",
2167 vpe_runtime_put(pdev);
2169 pm_runtime_disable(&pdev->dev);
2170 v4l2_m2m_release(dev->m2m_dev);
2172 vb2_dma_contig_cleanup_ctx(dev->alloc_ctx);
2174 v4l2_device_unregister(&dev->v4l2_dev);
2179 static int vpe_remove(struct platform_device *pdev)
2181 struct vpe_dev *dev =
2182 (struct vpe_dev *) platform_get_drvdata(pdev);
2184 v4l2_info(&dev->v4l2_dev, "Removing " VPE_MODULE_NAME);
2186 v4l2_m2m_release(dev->m2m_dev);
2187 video_unregister_device(&dev->vfd);
2188 v4l2_device_unregister(&dev->v4l2_dev);
2189 vb2_dma_contig_cleanup_ctx(dev->alloc_ctx);
2191 vpe_set_clock_enable(dev, 0);
2192 vpe_runtime_put(pdev);
2193 pm_runtime_disable(&pdev->dev);
2198 #if defined(CONFIG_OF)
2199 static const struct of_device_id vpe_of_match[] = {
2201 .compatible = "ti,vpe",
2206 #define vpe_of_match NULL
2209 static struct platform_driver vpe_pdrv = {
2211 .remove = vpe_remove,
2213 .name = VPE_MODULE_NAME,
2214 .owner = THIS_MODULE,
2215 .of_match_table = vpe_of_match,
2219 module_platform_driver(vpe_pdrv);
2221 MODULE_DESCRIPTION("TI VPE driver");
2222 MODULE_AUTHOR("Dale Farnsworth, <dale@farnsworth.org>");
2223 MODULE_LICENSE("GPL");