^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-or-later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) cx231xx_vbi.c - driver for Conexant Cx23100/101/102 USB video capture devices
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) Copyright (C) 2008 <srinivasa.deevi at conexant dot com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) Based on cx88 driver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include "cx231xx.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/list.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/bitmap.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/i2c.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/mutex.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <media/v4l2-common.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <media/v4l2-ioctl.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <media/drv-intf/msp3400.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <media/tuner.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include "cx231xx-vbi.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) static inline void print_err_status(struct cx231xx *dev, int packet, int status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) char *errmsg = "Unknown";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) switch (status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) case -ENOENT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) errmsg = "unlinked synchronously";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) case -ECONNRESET:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) errmsg = "unlinked asynchronously";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) case -ENOSR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) errmsg = "Buffer error (overrun)";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) case -EPIPE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) errmsg = "Stalled (device not responding)";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) case -EOVERFLOW:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) errmsg = "Babble (bad cable?)";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) case -EPROTO:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) errmsg = "Bit-stuff error (bad cable?)";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) case -EILSEQ:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) errmsg = "CRC/Timeout (could be anything)";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) case -ETIME:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) errmsg = "Device does not respond";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) if (packet < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) dev_err(dev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) "URB status %d [%s].\n", status, errmsg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) dev_err(dev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) "URB packet %d, status %d [%s].\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) packet, status, errmsg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) * Controls the isoc copy of each urb packet
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) static inline int cx231xx_isoc_vbi_copy(struct cx231xx *dev, struct urb *urb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) struct cx231xx_dmaqueue *dma_q = urb->context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) int rc = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) unsigned char *p_buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) u32 bytes_parsed = 0, buffer_size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) u8 sav_eav = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) if (!dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) if (dev->state & DEV_DISCONNECTED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) if (urb->status < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) print_err_status(dev, -1, urb->status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) if (urb->status == -ENOENT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) /* get buffer pointer and length */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) p_buffer = urb->transfer_buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) buffer_size = urb->actual_length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) if (buffer_size > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) bytes_parsed = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) if (dma_q->is_partial_line) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) /* Handle the case where we were working on a partial
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) line */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) sav_eav = dma_q->last_sav;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) /* Check for a SAV/EAV overlapping the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) buffer boundary */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) sav_eav = cx231xx_find_boundary_SAV_EAV(p_buffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) dma_q->partial_buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) &bytes_parsed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) sav_eav &= 0xF0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) /* Get the first line if we have some portion of an SAV/EAV from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) the last buffer or a partial line */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) if (sav_eav) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) bytes_parsed += cx231xx_get_vbi_line(dev, dma_q,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) sav_eav, /* SAV/EAV */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) p_buffer + bytes_parsed, /* p_buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) buffer_size - bytes_parsed); /* buffer size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) /* Now parse data that is completely in this buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) dma_q->is_partial_line = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) while (bytes_parsed < buffer_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) u32 bytes_used = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) sav_eav = cx231xx_find_next_SAV_EAV(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) p_buffer + bytes_parsed, /* p_buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) buffer_size - bytes_parsed, /* buffer size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) &bytes_used); /* bytes used to get SAV/EAV */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) bytes_parsed += bytes_used;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) sav_eav &= 0xF0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) if (sav_eav && (bytes_parsed < buffer_size)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) bytes_parsed += cx231xx_get_vbi_line(dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) dma_q, sav_eav, /* SAV/EAV */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) p_buffer+bytes_parsed, /* p_buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) buffer_size-bytes_parsed);/*buf size*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) /* Save the last four bytes of the buffer so we can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) check the buffer boundary condition next time */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) memcpy(dma_q->partial_buf, p_buffer + buffer_size - 4, 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) bytes_parsed = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) /* ------------------------------------------------------------------
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) Vbi buf operations
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) ------------------------------------------------------------------*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) static int vbi_queue_setup(struct vb2_queue *vq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) unsigned int *nbuffers, unsigned int *nplanes,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) unsigned int sizes[], struct device *alloc_devs[])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) struct cx231xx *dev = vb2_get_drv_priv(vq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) u32 height = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) height = ((dev->norm & V4L2_STD_625_50) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) PAL_VBI_LINES : NTSC_VBI_LINES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) *nplanes = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) sizes[0] = (dev->width * height * 2 * 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) /* This is called *without* dev->slock held; please keep it that way */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) static int vbi_buf_prepare(struct vb2_buffer *vb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) struct cx231xx *dev = vb2_get_drv_priv(vb->vb2_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) u32 height = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) u32 size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) height = ((dev->norm & V4L2_STD_625_50) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) PAL_VBI_LINES : NTSC_VBI_LINES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) size = ((dev->width << 1) * height * 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) if (vb2_plane_size(vb, 0) < size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) vb2_set_plane_payload(vb, 0, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) static void vbi_buf_queue(struct vb2_buffer *vb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) struct cx231xx *dev = vb2_get_drv_priv(vb->vb2_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) struct cx231xx_buffer *buf =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) container_of(vb, struct cx231xx_buffer, vb.vb2_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) struct cx231xx_dmaqueue *vidq = &dev->vbi_mode.vidq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) spin_lock_irqsave(&dev->vbi_mode.slock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) list_add_tail(&buf->list, &vidq->active);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) spin_unlock_irqrestore(&dev->vbi_mode.slock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) static void return_all_buffers(struct cx231xx *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) enum vb2_buffer_state state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) struct cx231xx_dmaqueue *vidq = &dev->vbi_mode.vidq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) struct cx231xx_buffer *buf, *node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) spin_lock_irqsave(&dev->vbi_mode.slock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) dev->vbi_mode.bulk_ctl.buf = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) list_for_each_entry_safe(buf, node, &vidq->active, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) list_del(&buf->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) vb2_buffer_done(&buf->vb.vb2_buf, state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) spin_unlock_irqrestore(&dev->vbi_mode.slock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) static int vbi_start_streaming(struct vb2_queue *vq, unsigned int count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) struct cx231xx *dev = vb2_get_drv_priv(vq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) struct cx231xx_dmaqueue *vidq = &dev->vbi_mode.vidq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) vidq->sequence = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) ret = cx231xx_init_vbi_isoc(dev, CX231XX_NUM_VBI_PACKETS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) CX231XX_NUM_VBI_BUFS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) dev->vbi_mode.alt_max_pkt_size[0],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) cx231xx_isoc_vbi_copy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) return_all_buffers(dev, VB2_BUF_STATE_QUEUED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) static void vbi_stop_streaming(struct vb2_queue *vq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) struct cx231xx *dev = vb2_get_drv_priv(vq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) return_all_buffers(dev, VB2_BUF_STATE_ERROR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) struct vb2_ops cx231xx_vbi_qops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) .queue_setup = vbi_queue_setup,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) .buf_prepare = vbi_buf_prepare,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) .buf_queue = vbi_buf_queue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) .start_streaming = vbi_start_streaming,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) .stop_streaming = vbi_stop_streaming,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) .wait_prepare = vb2_ops_wait_prepare,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) .wait_finish = vb2_ops_wait_finish,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) /* ------------------------------------------------------------------
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) URB control
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) ------------------------------------------------------------------*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) * IRQ callback, called by URB callback
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) static void cx231xx_irq_vbi_callback(struct urb *urb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) struct cx231xx_dmaqueue *dma_q = urb->context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) struct cx231xx_video_mode *vmode =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) container_of(dma_q, struct cx231xx_video_mode, vidq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) struct cx231xx *dev = container_of(vmode, struct cx231xx, vbi_mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) switch (urb->status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) case 0: /* success */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) case -ETIMEDOUT: /* NAK */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) case -ECONNRESET: /* kill */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) case -ENOENT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) case -ESHUTDOWN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) default: /* error */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) dev_err(dev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) "urb completion error %d.\n", urb->status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) /* Copy data from URB */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) spin_lock_irqsave(&dev->vbi_mode.slock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) dev->vbi_mode.bulk_ctl.bulk_copy(dev, urb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) spin_unlock_irqrestore(&dev->vbi_mode.slock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) /* Reset status */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) urb->status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) urb->status = usb_submit_urb(urb, GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) if (urb->status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) dev_err(dev->dev, "urb resubmit failed (error=%i)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) urb->status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) * Stop and Deallocate URBs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) void cx231xx_uninit_vbi_isoc(struct cx231xx *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) struct urb *urb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) dev_dbg(dev->dev, "called cx231xx_uninit_vbi_isoc\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) dev->vbi_mode.bulk_ctl.nfields = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) for (i = 0; i < dev->vbi_mode.bulk_ctl.num_bufs; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) urb = dev->vbi_mode.bulk_ctl.urb[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) if (urb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) if (!irqs_disabled())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) usb_kill_urb(urb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) usb_unlink_urb(urb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) if (dev->vbi_mode.bulk_ctl.transfer_buffer[i]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) kfree(dev->vbi_mode.bulk_ctl.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) transfer_buffer[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) dev->vbi_mode.bulk_ctl.transfer_buffer[i] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) usb_free_urb(urb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) dev->vbi_mode.bulk_ctl.urb[i] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) dev->vbi_mode.bulk_ctl.transfer_buffer[i] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) kfree(dev->vbi_mode.bulk_ctl.urb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) kfree(dev->vbi_mode.bulk_ctl.transfer_buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) dev->vbi_mode.bulk_ctl.urb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) dev->vbi_mode.bulk_ctl.transfer_buffer = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) dev->vbi_mode.bulk_ctl.num_bufs = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) cx231xx_capture_start(dev, 0, Vbi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) EXPORT_SYMBOL_GPL(cx231xx_uninit_vbi_isoc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) * Allocate URBs and start IRQ
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) int cx231xx_init_vbi_isoc(struct cx231xx *dev, int max_packets,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) int num_bufs, int max_pkt_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) int (*bulk_copy) (struct cx231xx *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) struct urb *urb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) struct cx231xx_dmaqueue *dma_q = &dev->vbi_mode.vidq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) int sb_size, pipe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) struct urb *urb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) dev_dbg(dev->dev, "called cx231xx_vbi_isoc\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) /* De-allocates all pending stuff */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) cx231xx_uninit_vbi_isoc(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) /* clear if any halt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) usb_clear_halt(dev->udev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) usb_rcvbulkpipe(dev->udev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) dev->vbi_mode.end_point_addr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) dev->vbi_mode.bulk_ctl.bulk_copy = bulk_copy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) dev->vbi_mode.bulk_ctl.num_bufs = num_bufs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) dma_q->pos = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) dma_q->is_partial_line = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) dma_q->last_sav = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) dma_q->current_field = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) dma_q->bytes_left_in_line = dev->width << 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) dma_q->lines_per_field = ((dev->norm & V4L2_STD_625_50) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) PAL_VBI_LINES : NTSC_VBI_LINES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) dma_q->lines_completed = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) for (i = 0; i < 8; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) dma_q->partial_buf[i] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) dev->vbi_mode.bulk_ctl.urb = kcalloc(num_bufs, sizeof(void *),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) if (!dev->vbi_mode.bulk_ctl.urb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) dev_err(dev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) "cannot alloc memory for usb buffers\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) dev->vbi_mode.bulk_ctl.transfer_buffer =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) kcalloc(num_bufs, sizeof(void *), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) if (!dev->vbi_mode.bulk_ctl.transfer_buffer) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) dev_err(dev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) "cannot allocate memory for usbtransfer\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) kfree(dev->vbi_mode.bulk_ctl.urb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) dev->vbi_mode.bulk_ctl.max_pkt_size = max_pkt_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) dev->vbi_mode.bulk_ctl.buf = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) sb_size = max_packets * dev->vbi_mode.bulk_ctl.max_pkt_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) /* allocate urbs and transfer buffers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) for (i = 0; i < dev->vbi_mode.bulk_ctl.num_bufs; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) urb = usb_alloc_urb(0, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) if (!urb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) cx231xx_uninit_vbi_isoc(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) dev->vbi_mode.bulk_ctl.urb[i] = urb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) urb->transfer_flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) dev->vbi_mode.bulk_ctl.transfer_buffer[i] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) kzalloc(sb_size, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) if (!dev->vbi_mode.bulk_ctl.transfer_buffer[i]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) dev_err(dev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) "unable to allocate %i bytes for transfer buffer %i%s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) sb_size, i,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) in_interrupt() ? " while in int" : "");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) cx231xx_uninit_vbi_isoc(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) pipe = usb_rcvbulkpipe(dev->udev, dev->vbi_mode.end_point_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) usb_fill_bulk_urb(urb, dev->udev, pipe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) dev->vbi_mode.bulk_ctl.transfer_buffer[i],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) sb_size, cx231xx_irq_vbi_callback, dma_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) init_waitqueue_head(&dma_q->wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) /* submit urbs and enables IRQ */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) for (i = 0; i < dev->vbi_mode.bulk_ctl.num_bufs; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) rc = usb_submit_urb(dev->vbi_mode.bulk_ctl.urb[i], GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) dev_err(dev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) "submit of urb %i failed (error=%i)\n", i, rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) cx231xx_uninit_vbi_isoc(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) cx231xx_capture_start(dev, 1, Vbi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) EXPORT_SYMBOL_GPL(cx231xx_init_vbi_isoc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) u32 cx231xx_get_vbi_line(struct cx231xx *dev, struct cx231xx_dmaqueue *dma_q,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) u8 sav_eav, u8 *p_buffer, u32 buffer_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) u32 bytes_copied = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) int current_field = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) switch (sav_eav) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) case SAV_VBI_FIELD1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) current_field = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) case SAV_VBI_FIELD2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) current_field = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) if (current_field < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) return bytes_copied;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) dma_q->last_sav = sav_eav;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) bytes_copied =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) cx231xx_copy_vbi_line(dev, dma_q, p_buffer, buffer_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) current_field);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) return bytes_copied;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) * Announces that a buffer were filled and request the next
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) static inline void vbi_buffer_filled(struct cx231xx *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) struct cx231xx_dmaqueue *dma_q,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) struct cx231xx_buffer *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) /* Advice that buffer was filled */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) /* dev_dbg(dev->dev, "[%p/%d] wakeup\n", buf, buf->vb.index); */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) buf->vb.sequence = dma_q->sequence++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) buf->vb.vb2_buf.timestamp = ktime_get_ns();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) dev->vbi_mode.bulk_ctl.buf = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) list_del(&buf->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_DONE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) u32 cx231xx_copy_vbi_line(struct cx231xx *dev, struct cx231xx_dmaqueue *dma_q,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) u8 *p_line, u32 length, int field_number)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) u32 bytes_to_copy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) struct cx231xx_buffer *buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) u32 _line_size = dev->width * 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) if (dma_q->current_field == -1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) /* Just starting up */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) cx231xx_reset_vbi_buffer(dev, dma_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) if (dma_q->current_field != field_number)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) dma_q->lines_completed = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) /* get the buffer pointer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) buf = dev->vbi_mode.bulk_ctl.buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) /* Remember the field number for next time */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) dma_q->current_field = field_number;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) bytes_to_copy = dma_q->bytes_left_in_line;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) if (bytes_to_copy > length)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) bytes_to_copy = length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) if (dma_q->lines_completed >= dma_q->lines_per_field) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) dma_q->bytes_left_in_line -= bytes_to_copy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) dma_q->is_partial_line =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) (dma_q->bytes_left_in_line == 0) ? 0 : 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) dma_q->is_partial_line = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) /* If we don't have a buffer, just return the number of bytes we would
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) have copied if we had a buffer. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) if (!buf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) dma_q->bytes_left_in_line -= bytes_to_copy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) dma_q->is_partial_line =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) (dma_q->bytes_left_in_line == 0) ? 0 : 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) return bytes_to_copy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) /* copy the data to video buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) cx231xx_do_vbi_copy(dev, dma_q, p_line, bytes_to_copy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) dma_q->pos += bytes_to_copy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) dma_q->bytes_left_in_line -= bytes_to_copy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) if (dma_q->bytes_left_in_line == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) dma_q->bytes_left_in_line = _line_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) dma_q->lines_completed++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) dma_q->is_partial_line = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) if (cx231xx_is_vbi_buffer_done(dev, dma_q) && buf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) vbi_buffer_filled(dev, dma_q, buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) dma_q->pos = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) dma_q->lines_completed = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) cx231xx_reset_vbi_buffer(dev, dma_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) return bytes_to_copy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) * video-buf generic routine to get the next available buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) static inline void get_next_vbi_buf(struct cx231xx_dmaqueue *dma_q,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) struct cx231xx_buffer **buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) struct cx231xx_video_mode *vmode =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) container_of(dma_q, struct cx231xx_video_mode, vidq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) struct cx231xx *dev = container_of(vmode, struct cx231xx, vbi_mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) char *outp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) if (list_empty(&dma_q->active)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) dev_err(dev->dev, "No active queue to serve\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) dev->vbi_mode.bulk_ctl.buf = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) *buf = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) /* Get the next buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) *buf = list_entry(dma_q->active.next, struct cx231xx_buffer, list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) /* Cleans up buffer - Useful for testing for frame/URB loss */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) outp = vb2_plane_vaddr(&(*buf)->vb.vb2_buf, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) memset(outp, 0, vb2_plane_size(&(*buf)->vb.vb2_buf, 0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) dev->vbi_mode.bulk_ctl.buf = *buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) void cx231xx_reset_vbi_buffer(struct cx231xx *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) struct cx231xx_dmaqueue *dma_q)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) struct cx231xx_buffer *buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) buf = dev->vbi_mode.bulk_ctl.buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) if (buf == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) /* first try to get the buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) get_next_vbi_buf(dma_q, &buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) dma_q->pos = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) dma_q->current_field = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) dma_q->bytes_left_in_line = dev->width << 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) dma_q->lines_completed = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) int cx231xx_do_vbi_copy(struct cx231xx *dev, struct cx231xx_dmaqueue *dma_q,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) u8 *p_buffer, u32 bytes_to_copy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) u8 *p_out_buffer = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) u32 current_line_bytes_copied = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) struct cx231xx_buffer *buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) u32 _line_size = dev->width << 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) void *startwrite;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) int offset, lencopy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) buf = dev->vbi_mode.bulk_ctl.buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) if (buf == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) p_out_buffer = vb2_plane_vaddr(&buf->vb.vb2_buf, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) if (dma_q->bytes_left_in_line != _line_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) current_line_bytes_copied =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) _line_size - dma_q->bytes_left_in_line;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) offset = (dma_q->lines_completed * _line_size) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) current_line_bytes_copied;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) if (dma_q->current_field == 2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) /* Populate the second half of the frame */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) offset += (dev->width * 2 * dma_q->lines_per_field);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) /* prepare destination address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) startwrite = p_out_buffer + offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) lencopy = dma_q->bytes_left_in_line > bytes_to_copy ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) bytes_to_copy : dma_q->bytes_left_in_line;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) memcpy(startwrite, p_buffer, lencopy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) u8 cx231xx_is_vbi_buffer_done(struct cx231xx *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) struct cx231xx_dmaqueue *dma_q)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) u32 height = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) height = ((dev->norm & V4L2_STD_625_50) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) PAL_VBI_LINES : NTSC_VBI_LINES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) if (dma_q->lines_completed == height && dma_q->current_field == 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) }