Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) /* SPDX-License-Identifier: GPL-2.0-or-later */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  * Memory-to-memory device framework for Video for Linux 2.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5)  * Helper functions for devices that use memory buffers for both source
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6)  * and destination.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8)  * Copyright (c) 2009 Samsung Electronics Co., Ltd.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9)  * Pawel Osciak, <pawel@osciak.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10)  * Marek Szyprowski, <m.szyprowski@samsung.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) #ifndef _MEDIA_V4L2_MEM2MEM_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) #define _MEDIA_V4L2_MEM2MEM_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) #include <media/videobuf2-v4l2.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19)  * struct v4l2_m2m_ops - mem-to-mem device driver callbacks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20)  * @device_run:	required. Begin the actual job (transaction) inside this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21)  *		callback.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22)  *		The job does NOT have to end before this callback returns
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23)  *		(and it will be the usual case). When the job finishes,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24)  *		v4l2_m2m_job_finish() or v4l2_m2m_buf_done_and_job_finish()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25)  *		has to be called.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26)  * @job_ready:	optional. Should return 0 if the driver does not have a job
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27)  *		fully prepared to run yet (i.e. it will not be able to finish a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28)  *		transaction without sleeping). If not provided, it will be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29)  *		assumed that one source and one destination buffer are all
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30)  *		that is required for the driver to perform one full transaction.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31)  *		This method may not sleep.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32)  * @job_abort:	optional. Informs the driver that it has to abort the currently
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33)  *		running transaction as soon as possible (i.e. as soon as it can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34)  *		stop the device safely; e.g. in the next interrupt handler),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35)  *		even if the transaction would not have been finished by then.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36)  *		After the driver performs the necessary steps, it has to call
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37)  *		v4l2_m2m_job_finish() or v4l2_m2m_buf_done_and_job_finish() as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38)  *		if the transaction ended normally.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39)  *		This function does not have to (and will usually not) wait
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40)  *		until the device enters a state when it can be stopped.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) struct v4l2_m2m_ops {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) 	void (*device_run)(void *priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) 	int (*job_ready)(void *priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) 	void (*job_abort)(void *priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) struct video_device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) struct v4l2_m2m_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52)  * struct v4l2_m2m_queue_ctx - represents a queue for buffers ready to be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53)  *	processed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55)  * @q:		pointer to struct &vb2_queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56)  * @rdy_queue:	List of V4L2 mem-to-mem queues
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57)  * @rdy_spinlock: spin lock to protect the struct usage
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58)  * @num_rdy:	number of buffers ready to be processed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59)  * @buffered:	is the queue buffered?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61)  * Queue for buffers ready to be processed as soon as this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62)  * instance receives access to the device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) struct v4l2_m2m_queue_ctx {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) 	struct vb2_queue	q;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) 	struct list_head	rdy_queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) 	spinlock_t		rdy_spinlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) 	u8			num_rdy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) 	bool			buffered;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75)  * struct v4l2_m2m_ctx - Memory to memory context structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77)  * @q_lock: struct &mutex lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78)  * @new_frame: valid in the device_run callback: if true, then this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79)  *		starts a new frame; if false, then this is a new slice
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80)  *		for an existing frame. This is always true unless
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81)  *		V4L2_BUF_CAP_SUPPORTS_M2M_HOLD_CAPTURE_BUF is set, which
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82)  *		indicates slicing support.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83)  * @is_draining: indicates device is in draining phase
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84)  * @last_src_buf: indicate the last source buffer for draining
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85)  * @next_buf_last: next capture queud buffer will be tagged as last
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86)  * @has_stopped: indicate the device has been stopped
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87)  * @m2m_dev: opaque pointer to the internal data to handle M2M context
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88)  * @cap_q_ctx: Capture (output to memory) queue context
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89)  * @out_q_ctx: Output (input from memory) queue context
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90)  * @queue: List of memory to memory contexts
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91)  * @job_flags: Job queue flags, used internally by v4l2-mem2mem.c:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92)  *		%TRANS_QUEUED, %TRANS_RUNNING and %TRANS_ABORT.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93)  * @finished: Wait queue used to signalize when a job queue finished.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94)  * @priv: Instance private data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96)  * The memory to memory context is specific to a file handle, NOT to e.g.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97)  * a device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) struct v4l2_m2m_ctx {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) 	/* optional cap/out vb2 queues lock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) 	struct mutex			*q_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) 	bool				new_frame;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) 	bool				is_draining;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) 	struct vb2_v4l2_buffer		*last_src_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) 	bool				next_buf_last;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) 	bool				has_stopped;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) 	/* internal use only */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) 	struct v4l2_m2m_dev		*m2m_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) 	struct v4l2_m2m_queue_ctx	cap_q_ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) 	struct v4l2_m2m_queue_ctx	out_q_ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 	/* For device job queue */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) 	struct list_head		queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 	unsigned long			job_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) 	wait_queue_head_t		finished;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 	void				*priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126)  * struct v4l2_m2m_buffer - Memory to memory buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128)  * @vb: pointer to struct &vb2_v4l2_buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129)  * @list: list of m2m buffers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) struct v4l2_m2m_buffer {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) 	struct vb2_v4l2_buffer	vb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) 	struct list_head	list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137)  * v4l2_m2m_get_curr_priv() - return driver private data for the currently
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138)  * running instance or NULL if no instance is running
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140)  * @m2m_dev: opaque pointer to the internal data to handle M2M context
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) void *v4l2_m2m_get_curr_priv(struct v4l2_m2m_dev *m2m_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145)  * v4l2_m2m_get_vq() - return vb2_queue for the given type
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147)  * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148)  * @type: type of the V4L2 buffer, as defined by enum &v4l2_buf_type
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) struct vb2_queue *v4l2_m2m_get_vq(struct v4l2_m2m_ctx *m2m_ctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) 				       enum v4l2_buf_type type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154)  * v4l2_m2m_try_schedule() - check whether an instance is ready to be added to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155)  * the pending job queue and add it if so.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157)  * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159)  * There are three basic requirements an instance has to meet to be able to run:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160)  * 1) at least one source buffer has to be queued,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161)  * 2) at least one destination buffer has to be queued,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162)  * 3) streaming has to be on.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164)  * If a queue is buffered (for example a decoder hardware ringbuffer that has
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165)  * to be drained before doing streamoff), allow scheduling without v4l2 buffers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166)  * on that queue.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168)  * There may also be additional, custom requirements. In such case the driver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169)  * should supply a custom callback (job_ready in v4l2_m2m_ops) that should
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170)  * return 1 if the instance is ready.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171)  * An example of the above could be an instance that requires more than one
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172)  * src/dst buffer per transaction.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) void v4l2_m2m_try_schedule(struct v4l2_m2m_ctx *m2m_ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177)  * v4l2_m2m_job_finish() - inform the framework that a job has been finished
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178)  * and have it clean up
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180)  * @m2m_dev: opaque pointer to the internal data to handle M2M context
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181)  * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183)  * Called by a driver to yield back the device after it has finished with it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184)  * Should be called as soon as possible after reaching a state which allows
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185)  * other instances to take control of the device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187)  * This function has to be called only after &v4l2_m2m_ops->device_run
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188)  * callback has been called on the driver. To prevent recursion, it should
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189)  * not be called directly from the &v4l2_m2m_ops->device_run callback though.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) void v4l2_m2m_job_finish(struct v4l2_m2m_dev *m2m_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) 			 struct v4l2_m2m_ctx *m2m_ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195)  * v4l2_m2m_buf_done_and_job_finish() - return source/destination buffers with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196)  * state and inform the framework that a job has been finished and have it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197)  * clean up
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199)  * @m2m_dev: opaque pointer to the internal data to handle M2M context
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200)  * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201)  * @state: vb2 buffer state passed to v4l2_m2m_buf_done().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203)  * Drivers that set V4L2_BUF_CAP_SUPPORTS_M2M_HOLD_CAPTURE_BUF must use this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204)  * function instead of job_finish() to take held buffers into account. It is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205)  * optional for other drivers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207)  * This function removes the source buffer from the ready list and returns
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208)  * it with the given state. The same is done for the destination buffer, unless
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209)  * it is marked 'held'. In that case the buffer is kept on the ready list.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211)  * After that the job is finished (see job_finish()).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213)  * This allows for multiple output buffers to be used to fill in a single
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214)  * capture buffer. This is typically used by stateless decoders where
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215)  * multiple e.g. H.264 slices contribute to a single decoded frame.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) void v4l2_m2m_buf_done_and_job_finish(struct v4l2_m2m_dev *m2m_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) 				      struct v4l2_m2m_ctx *m2m_ctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) 				      enum vb2_buffer_state state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) static inline void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) v4l2_m2m_buf_done(struct vb2_v4l2_buffer *buf, enum vb2_buffer_state state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) 	vb2_buffer_done(&buf->vb2_buf, state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228)  * v4l2_m2m_clear_state() - clear encoding/decoding state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230)  * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) static inline void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) v4l2_m2m_clear_state(struct v4l2_m2m_ctx *m2m_ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) 	m2m_ctx->next_buf_last = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) 	m2m_ctx->is_draining = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) 	m2m_ctx->has_stopped = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241)  * v4l2_m2m_mark_stopped() - set current encoding/decoding state as stopped
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243)  * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) static inline void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) v4l2_m2m_mark_stopped(struct v4l2_m2m_ctx *m2m_ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) 	m2m_ctx->next_buf_last = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) 	m2m_ctx->is_draining = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) 	m2m_ctx->has_stopped = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254)  * v4l2_m2m_dst_buf_is_last() - return the current encoding/decoding session
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255)  * draining management state of next queued capture buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257)  * This last capture buffer should be tagged with V4L2_BUF_FLAG_LAST to notify
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258)  * the end of the capture session.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260)  * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) static inline bool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) v4l2_m2m_dst_buf_is_last(struct v4l2_m2m_ctx *m2m_ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) 	return m2m_ctx->is_draining && m2m_ctx->next_buf_last;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269)  * v4l2_m2m_has_stopped() - return the current encoding/decoding session
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270)  * stopped state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272)  * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) static inline bool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) v4l2_m2m_has_stopped(struct v4l2_m2m_ctx *m2m_ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) 	return m2m_ctx->has_stopped;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281)  * v4l2_m2m_is_last_draining_src_buf() - return the output buffer draining
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282)  * state in the current encoding/decoding session
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284)  * This will identify the last output buffer queued before a session stop
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285)  * was required, leading to an actual encoding/decoding session stop state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286)  * in the encoding/decoding process after being processed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288)  * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289)  * @vbuf: pointer to struct &v4l2_buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) static inline bool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) v4l2_m2m_is_last_draining_src_buf(struct v4l2_m2m_ctx *m2m_ctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) 				  struct vb2_v4l2_buffer *vbuf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) 	return m2m_ctx->is_draining && vbuf == m2m_ctx->last_src_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299)  * v4l2_m2m_last_buffer_done() - marks the buffer with LAST flag and DONE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301)  * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302)  * @vbuf: pointer to struct &v4l2_buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) void v4l2_m2m_last_buffer_done(struct v4l2_m2m_ctx *m2m_ctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) 			       struct vb2_v4l2_buffer *vbuf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308)  * v4l2_m2m_suspend() - stop new jobs from being run and wait for current job
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309)  * to finish
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311)  * @m2m_dev: opaque pointer to the internal data to handle M2M context
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313)  * Called by a driver in the suspend hook. Stop new jobs from being run, and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314)  * wait for current running job to finish.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) void v4l2_m2m_suspend(struct v4l2_m2m_dev *m2m_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319)  * v4l2_m2m_resume() - resume job running and try to run a queued job
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321)  * @m2m_dev: opaque pointer to the internal data to handle M2M context
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323)  * Called by a driver in the resume hook. This reverts the operation of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324)  * v4l2_m2m_suspend() and allows job to be run. Also try to run a queued job if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325)  * there is any.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) void v4l2_m2m_resume(struct v4l2_m2m_dev *m2m_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330)  * v4l2_m2m_reqbufs() - multi-queue-aware REQBUFS multiplexer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332)  * @file: pointer to struct &file
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333)  * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334)  * @reqbufs: pointer to struct &v4l2_requestbuffers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) int v4l2_m2m_reqbufs(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) 		     struct v4l2_requestbuffers *reqbufs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340)  * v4l2_m2m_querybuf() - multi-queue-aware QUERYBUF multiplexer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342)  * @file: pointer to struct &file
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343)  * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344)  * @buf: pointer to struct &v4l2_buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346)  * See v4l2_m2m_mmap() documentation for details.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) int v4l2_m2m_querybuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) 		      struct v4l2_buffer *buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352)  * v4l2_m2m_qbuf() - enqueue a source or destination buffer, depending on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353)  * the type
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355)  * @file: pointer to struct &file
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356)  * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357)  * @buf: pointer to struct &v4l2_buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) int v4l2_m2m_qbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) 		  struct v4l2_buffer *buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363)  * v4l2_m2m_dqbuf() - dequeue a source or destination buffer, depending on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364)  * the type
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366)  * @file: pointer to struct &file
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367)  * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368)  * @buf: pointer to struct &v4l2_buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) int v4l2_m2m_dqbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) 		   struct v4l2_buffer *buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374)  * v4l2_m2m_prepare_buf() - prepare a source or destination buffer, depending on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375)  * the type
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377)  * @file: pointer to struct &file
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378)  * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379)  * @buf: pointer to struct &v4l2_buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) int v4l2_m2m_prepare_buf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) 			 struct v4l2_buffer *buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385)  * v4l2_m2m_create_bufs() - create a source or destination buffer, depending
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386)  * on the type
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388)  * @file: pointer to struct &file
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389)  * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390)  * @create: pointer to struct &v4l2_create_buffers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) int v4l2_m2m_create_bufs(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) 			 struct v4l2_create_buffers *create);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396)  * v4l2_m2m_expbuf() - export a source or destination buffer, depending on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397)  * the type
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399)  * @file: pointer to struct &file
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400)  * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401)  * @eb: pointer to struct &v4l2_exportbuffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) int v4l2_m2m_expbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) 		   struct v4l2_exportbuffer *eb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407)  * v4l2_m2m_streamon() - turn on streaming for a video queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409)  * @file: pointer to struct &file
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410)  * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411)  * @type: type of the V4L2 buffer, as defined by enum &v4l2_buf_type
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) int v4l2_m2m_streamon(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) 		      enum v4l2_buf_type type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417)  * v4l2_m2m_streamoff() - turn off streaming for a video queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419)  * @file: pointer to struct &file
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420)  * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421)  * @type: type of the V4L2 buffer, as defined by enum &v4l2_buf_type
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) int v4l2_m2m_streamoff(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) 		       enum v4l2_buf_type type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427)  * v4l2_m2m_update_start_streaming_state() - update the encoding/decoding
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428)  * session state when a start of streaming of a video queue is requested
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430)  * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431)  * @q: queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) void v4l2_m2m_update_start_streaming_state(struct v4l2_m2m_ctx *m2m_ctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) 					   struct vb2_queue *q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437)  * v4l2_m2m_update_stop_streaming_state() -  update the encoding/decoding
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438)  * session state when a stop of streaming of a video queue is requested
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440)  * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441)  * @q: queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) void v4l2_m2m_update_stop_streaming_state(struct v4l2_m2m_ctx *m2m_ctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) 					  struct vb2_queue *q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447)  * v4l2_m2m_encoder_cmd() - execute an encoder command
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449)  * @file: pointer to struct &file
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450)  * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451)  * @ec: pointer to the encoder command
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) int v4l2_m2m_encoder_cmd(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) 			 struct v4l2_encoder_cmd *ec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457)  * v4l2_m2m_decoder_cmd() - execute a decoder command
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459)  * @file: pointer to struct &file
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460)  * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461)  * @dc: pointer to the decoder command
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) int v4l2_m2m_decoder_cmd(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) 			 struct v4l2_decoder_cmd *dc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467)  * v4l2_m2m_poll() - poll replacement, for destination buffers only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469)  * @file: pointer to struct &file
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470)  * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471)  * @wait: pointer to struct &poll_table_struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473)  * Call from the driver's poll() function. Will poll both queues. If a buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474)  * is available to dequeue (with dqbuf) from the source queue, this will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475)  * indicate that a non-blocking write can be performed, while read will be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476)  * returned in case of the destination queue.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) __poll_t v4l2_m2m_poll(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) 			   struct poll_table_struct *wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482)  * v4l2_m2m_mmap() - source and destination queues-aware mmap multiplexer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484)  * @file: pointer to struct &file
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485)  * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486)  * @vma: pointer to struct &vm_area_struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488)  * Call from driver's mmap() function. Will handle mmap() for both queues
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489)  * seamlessly for videobuffer, which will receive normal per-queue offsets and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490)  * proper videobuf queue pointers. The differentiation is made outside videobuf
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491)  * by adding a predefined offset to buffers from one of the queues and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492)  * subtracting it before passing it back to videobuf. Only drivers (and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493)  * thus applications) receive modified offsets.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) int v4l2_m2m_mmap(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) 		  struct vm_area_struct *vma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499)  * v4l2_m2m_init() - initialize per-driver m2m data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501)  * @m2m_ops: pointer to struct v4l2_m2m_ops
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503)  * Usually called from driver's ``probe()`` function.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505)  * Return: returns an opaque pointer to the internal data to handle M2M context
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) struct v4l2_m2m_dev *v4l2_m2m_init(const struct v4l2_m2m_ops *m2m_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) #if defined(CONFIG_MEDIA_CONTROLLER)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) void v4l2_m2m_unregister_media_controller(struct v4l2_m2m_dev *m2m_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) int v4l2_m2m_register_media_controller(struct v4l2_m2m_dev *m2m_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) 			struct video_device *vdev, int function);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) static inline void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) v4l2_m2m_unregister_media_controller(struct v4l2_m2m_dev *m2m_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) static inline int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) v4l2_m2m_register_media_controller(struct v4l2_m2m_dev *m2m_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) 		struct video_device *vdev, int function)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528)  * v4l2_m2m_release() - cleans up and frees a m2m_dev structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530)  * @m2m_dev: opaque pointer to the internal data to handle M2M context
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532)  * Usually called from driver's ``remove()`` function.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) void v4l2_m2m_release(struct v4l2_m2m_dev *m2m_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537)  * v4l2_m2m_ctx_init() - allocate and initialize a m2m context
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539)  * @m2m_dev: opaque pointer to the internal data to handle M2M context
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540)  * @drv_priv: driver's instance private data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541)  * @queue_init: a callback for queue type-specific initialization function
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542)  *	to be used for initializing videobuf_queues
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544)  * Usually called from driver's ``open()`` function.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) struct v4l2_m2m_ctx *v4l2_m2m_ctx_init(struct v4l2_m2m_dev *m2m_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) 		void *drv_priv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) 		int (*queue_init)(void *priv, struct vb2_queue *src_vq, struct vb2_queue *dst_vq));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) static inline void v4l2_m2m_set_src_buffered(struct v4l2_m2m_ctx *m2m_ctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) 					     bool buffered)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) 	m2m_ctx->out_q_ctx.buffered = buffered;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) static inline void v4l2_m2m_set_dst_buffered(struct v4l2_m2m_ctx *m2m_ctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) 					     bool buffered)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) 	m2m_ctx->cap_q_ctx.buffered = buffered;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563)  * v4l2_m2m_ctx_release() - release m2m context
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565)  * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567)  * Usually called from driver's release() function.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) void v4l2_m2m_ctx_release(struct v4l2_m2m_ctx *m2m_ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572)  * v4l2_m2m_buf_queue() - add a buffer to the proper ready buffers list.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574)  * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575)  * @vbuf: pointer to struct &vb2_v4l2_buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577)  * Call from videobuf_queue_ops->ops->buf_queue, videobuf_queue_ops callback.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) void v4l2_m2m_buf_queue(struct v4l2_m2m_ctx *m2m_ctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) 			struct vb2_v4l2_buffer *vbuf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583)  * v4l2_m2m_num_src_bufs_ready() - return the number of source buffers ready for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584)  * use
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586)  * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) static inline
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) unsigned int v4l2_m2m_num_src_bufs_ready(struct v4l2_m2m_ctx *m2m_ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) 	return m2m_ctx->out_q_ctx.num_rdy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595)  * v4l2_m2m_num_dst_bufs_ready() - return the number of destination buffers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596)  * ready for use
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598)  * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) static inline
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) unsigned int v4l2_m2m_num_dst_bufs_ready(struct v4l2_m2m_ctx *m2m_ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) 	return m2m_ctx->cap_q_ctx.num_rdy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607)  * v4l2_m2m_next_buf() - return next buffer from the list of ready buffers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609)  * @q_ctx: pointer to struct @v4l2_m2m_queue_ctx
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) struct vb2_v4l2_buffer *v4l2_m2m_next_buf(struct v4l2_m2m_queue_ctx *q_ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614)  * v4l2_m2m_next_src_buf() - return next source buffer from the list of ready
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615)  * buffers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617)  * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) static inline struct vb2_v4l2_buffer *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) v4l2_m2m_next_src_buf(struct v4l2_m2m_ctx *m2m_ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) 	return v4l2_m2m_next_buf(&m2m_ctx->out_q_ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626)  * v4l2_m2m_next_dst_buf() - return next destination buffer from the list of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627)  * ready buffers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629)  * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) static inline struct vb2_v4l2_buffer *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) v4l2_m2m_next_dst_buf(struct v4l2_m2m_ctx *m2m_ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) 	return v4l2_m2m_next_buf(&m2m_ctx->cap_q_ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638)  * v4l2_m2m_last_buf() - return last buffer from the list of ready buffers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640)  * @q_ctx: pointer to struct @v4l2_m2m_queue_ctx
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) struct vb2_v4l2_buffer *v4l2_m2m_last_buf(struct v4l2_m2m_queue_ctx *q_ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645)  * v4l2_m2m_last_src_buf() - return last destination buffer from the list of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646)  * ready buffers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648)  * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) static inline struct vb2_v4l2_buffer *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) v4l2_m2m_last_src_buf(struct v4l2_m2m_ctx *m2m_ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) 	return v4l2_m2m_last_buf(&m2m_ctx->out_q_ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657)  * v4l2_m2m_last_dst_buf() - return last destination buffer from the list of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658)  * ready buffers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660)  * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) static inline struct vb2_v4l2_buffer *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) v4l2_m2m_last_dst_buf(struct v4l2_m2m_ctx *m2m_ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) 	return v4l2_m2m_last_buf(&m2m_ctx->cap_q_ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669)  * v4l2_m2m_for_each_dst_buf() - iterate over a list of destination ready
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670)  * buffers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672)  * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673)  * @b: current buffer of type struct v4l2_m2m_buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) #define v4l2_m2m_for_each_dst_buf(m2m_ctx, b)	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) 	list_for_each_entry(b, &m2m_ctx->cap_q_ctx.rdy_queue, list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679)  * v4l2_m2m_for_each_src_buf() - iterate over a list of source ready buffers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681)  * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682)  * @b: current buffer of type struct v4l2_m2m_buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) #define v4l2_m2m_for_each_src_buf(m2m_ctx, b)	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) 	list_for_each_entry(b, &m2m_ctx->out_q_ctx.rdy_queue, list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688)  * v4l2_m2m_for_each_dst_buf_safe() - iterate over a list of destination ready
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689)  * buffers safely
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691)  * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692)  * @b: current buffer of type struct v4l2_m2m_buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693)  * @n: used as temporary storage
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) #define v4l2_m2m_for_each_dst_buf_safe(m2m_ctx, b, n)	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) 	list_for_each_entry_safe(b, n, &m2m_ctx->cap_q_ctx.rdy_queue, list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699)  * v4l2_m2m_for_each_src_buf_safe() - iterate over a list of source ready
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700)  * buffers safely
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702)  * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703)  * @b: current buffer of type struct v4l2_m2m_buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704)  * @n: used as temporary storage
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) #define v4l2_m2m_for_each_src_buf_safe(m2m_ctx, b, n)	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) 	list_for_each_entry_safe(b, n, &m2m_ctx->out_q_ctx.rdy_queue, list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710)  * v4l2_m2m_get_src_vq() - return vb2_queue for source buffers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712)  * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) static inline
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) struct vb2_queue *v4l2_m2m_get_src_vq(struct v4l2_m2m_ctx *m2m_ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) 	return &m2m_ctx->out_q_ctx.q;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721)  * v4l2_m2m_get_dst_vq() - return vb2_queue for destination buffers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723)  * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) static inline
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) struct vb2_queue *v4l2_m2m_get_dst_vq(struct v4l2_m2m_ctx *m2m_ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) 	return &m2m_ctx->cap_q_ctx.q;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732)  * v4l2_m2m_buf_remove() - take off a buffer from the list of ready buffers and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733)  * return it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735)  * @q_ctx: pointer to struct @v4l2_m2m_queue_ctx
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) struct vb2_v4l2_buffer *v4l2_m2m_buf_remove(struct v4l2_m2m_queue_ctx *q_ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740)  * v4l2_m2m_src_buf_remove() - take off a source buffer from the list of ready
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741)  * buffers and return it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743)  * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) static inline struct vb2_v4l2_buffer *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) v4l2_m2m_src_buf_remove(struct v4l2_m2m_ctx *m2m_ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) 	return v4l2_m2m_buf_remove(&m2m_ctx->out_q_ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752)  * v4l2_m2m_dst_buf_remove() - take off a destination buffer from the list of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753)  * ready buffers and return it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755)  * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) static inline struct vb2_v4l2_buffer *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) v4l2_m2m_dst_buf_remove(struct v4l2_m2m_ctx *m2m_ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) 	return v4l2_m2m_buf_remove(&m2m_ctx->cap_q_ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764)  * v4l2_m2m_buf_remove_by_buf() - take off exact buffer from the list of ready
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765)  * buffers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767)  * @q_ctx: pointer to struct @v4l2_m2m_queue_ctx
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768)  * @vbuf: the buffer to be removed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) void v4l2_m2m_buf_remove_by_buf(struct v4l2_m2m_queue_ctx *q_ctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) 				struct vb2_v4l2_buffer *vbuf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774)  * v4l2_m2m_src_buf_remove_by_buf() - take off exact source buffer from the list
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775)  * of ready buffers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777)  * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778)  * @vbuf: the buffer to be removed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) static inline void v4l2_m2m_src_buf_remove_by_buf(struct v4l2_m2m_ctx *m2m_ctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) 						  struct vb2_v4l2_buffer *vbuf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) 	v4l2_m2m_buf_remove_by_buf(&m2m_ctx->out_q_ctx, vbuf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787)  * v4l2_m2m_dst_buf_remove_by_buf() - take off exact destination buffer from the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788)  * list of ready buffers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790)  * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791)  * @vbuf: the buffer to be removed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) static inline void v4l2_m2m_dst_buf_remove_by_buf(struct v4l2_m2m_ctx *m2m_ctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) 						  struct vb2_v4l2_buffer *vbuf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) 	v4l2_m2m_buf_remove_by_buf(&m2m_ctx->cap_q_ctx, vbuf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) struct vb2_v4l2_buffer *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) v4l2_m2m_buf_remove_by_idx(struct v4l2_m2m_queue_ctx *q_ctx, unsigned int idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) static inline struct vb2_v4l2_buffer *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) v4l2_m2m_src_buf_remove_by_idx(struct v4l2_m2m_ctx *m2m_ctx, unsigned int idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) 	return v4l2_m2m_buf_remove_by_idx(&m2m_ctx->out_q_ctx, idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) static inline struct vb2_v4l2_buffer *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) v4l2_m2m_dst_buf_remove_by_idx(struct v4l2_m2m_ctx *m2m_ctx, unsigned int idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) 	return v4l2_m2m_buf_remove_by_idx(&m2m_ctx->cap_q_ctx, idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815)  * v4l2_m2m_buf_copy_metadata() - copy buffer metadata from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816)  * the output buffer to the capture buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818)  * @out_vb: the output buffer that is the source of the metadata.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819)  * @cap_vb: the capture buffer that will receive the metadata.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820)  * @copy_frame_flags: copy the KEY/B/PFRAME flags as well.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822)  * This helper function copies the timestamp, timecode (if the TIMECODE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823)  * buffer flag was set), field and the TIMECODE, KEYFRAME, BFRAME, PFRAME
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824)  * and TSTAMP_SRC_MASK flags from @out_vb to @cap_vb.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826)  * If @copy_frame_flags is false, then the KEYFRAME, BFRAME and PFRAME
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827)  * flags are not copied. This is typically needed for encoders that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828)  * set this bits explicitly.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) void v4l2_m2m_buf_copy_metadata(const struct vb2_v4l2_buffer *out_vb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) 				struct vb2_v4l2_buffer *cap_vb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) 				bool copy_frame_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) /* v4l2 request helper */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) void v4l2_m2m_request_queue(struct media_request *req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) /* v4l2 ioctl helpers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) int v4l2_m2m_ioctl_reqbufs(struct file *file, void *priv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) 				struct v4l2_requestbuffers *rb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) int v4l2_m2m_ioctl_create_bufs(struct file *file, void *fh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) 				struct v4l2_create_buffers *create);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) int v4l2_m2m_ioctl_querybuf(struct file *file, void *fh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) 				struct v4l2_buffer *buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) int v4l2_m2m_ioctl_expbuf(struct file *file, void *fh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) 				struct v4l2_exportbuffer *eb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) int v4l2_m2m_ioctl_qbuf(struct file *file, void *fh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) 				struct v4l2_buffer *buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) int v4l2_m2m_ioctl_dqbuf(struct file *file, void *fh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) 				struct v4l2_buffer *buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) int v4l2_m2m_ioctl_prepare_buf(struct file *file, void *fh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) 			       struct v4l2_buffer *buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) int v4l2_m2m_ioctl_streamon(struct file *file, void *fh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) 				enum v4l2_buf_type type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) int v4l2_m2m_ioctl_streamoff(struct file *file, void *fh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) 				enum v4l2_buf_type type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) int v4l2_m2m_ioctl_encoder_cmd(struct file *file, void *fh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) 			       struct v4l2_encoder_cmd *ec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) int v4l2_m2m_ioctl_decoder_cmd(struct file *file, void *fh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) 			       struct v4l2_decoder_cmd *dc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) int v4l2_m2m_ioctl_try_encoder_cmd(struct file *file, void *fh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) 				   struct v4l2_encoder_cmd *ec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) int v4l2_m2m_ioctl_try_decoder_cmd(struct file *file, void *fh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) 				   struct v4l2_decoder_cmd *dc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) int v4l2_m2m_ioctl_stateless_try_decoder_cmd(struct file *file, void *fh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) 					     struct v4l2_decoder_cmd *dc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) int v4l2_m2m_ioctl_stateless_decoder_cmd(struct file *file, void *priv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) 					 struct v4l2_decoder_cmd *dc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) int v4l2_m2m_fop_mmap(struct file *file, struct vm_area_struct *vma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) __poll_t v4l2_m2m_fop_poll(struct file *file, poll_table *wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) #endif /* _MEDIA_V4L2_MEM2MEM_H */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874)