Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    1) // SPDX-License-Identifier: GPL-2.0-or-later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    2) /* Virtio ring implementation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    3)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    4)  *  Copyright 2007 Rusty Russell IBM Corporation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    5)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    6) #include <linux/virtio.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    7) #include <linux/virtio_ring.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    8) #include <linux/virtio_config.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    9) #include <linux/device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   10) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   11) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   12) #include <linux/hrtimer.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   13) #include <linux/dma-mapping.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   14) #include <xen/xen.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   15) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   16) #ifdef DEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   17) /* For development, we want to crash whenever the ring is screwed. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   18) #define BAD_RING(_vq, fmt, args...)				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   19) 	do {							\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   20) 		dev_err(&(_vq)->vq.vdev->dev,			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   21) 			"%s:"fmt, (_vq)->vq.name, ##args);	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   22) 		BUG();						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   23) 	} while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   24) /* Caller is supposed to guarantee no reentry. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   25) #define START_USE(_vq)						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   26) 	do {							\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   27) 		if ((_vq)->in_use)				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   28) 			panic("%s:in_use = %i\n",		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   29) 			      (_vq)->vq.name, (_vq)->in_use);	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   30) 		(_vq)->in_use = __LINE__;			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   31) 	} while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   32) #define END_USE(_vq) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   33) 	do { BUG_ON(!(_vq)->in_use); (_vq)->in_use = 0; } while(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   34) #define LAST_ADD_TIME_UPDATE(_vq)				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   35) 	do {							\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   36) 		ktime_t now = ktime_get();			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   37) 								\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   38) 		/* No kick or get, with .1 second between?  Warn. */ \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   39) 		if ((_vq)->last_add_time_valid)			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   40) 			WARN_ON(ktime_to_ms(ktime_sub(now,	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   41) 				(_vq)->last_add_time)) > 100);	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   42) 		(_vq)->last_add_time = now;			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   43) 		(_vq)->last_add_time_valid = true;		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   44) 	} while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   45) #define LAST_ADD_TIME_CHECK(_vq)				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   46) 	do {							\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   47) 		if ((_vq)->last_add_time_valid) {		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   48) 			WARN_ON(ktime_to_ms(ktime_sub(ktime_get(), \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   49) 				      (_vq)->last_add_time)) > 100); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   50) 		}						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   51) 	} while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   52) #define LAST_ADD_TIME_INVALID(_vq)				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   53) 	((_vq)->last_add_time_valid = false)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   54) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   55) #define BAD_RING(_vq, fmt, args...)				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   56) 	do {							\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   57) 		dev_err(&_vq->vq.vdev->dev,			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   58) 			"%s:"fmt, (_vq)->vq.name, ##args);	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   59) 		(_vq)->broken = true;				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   60) 	} while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   61) #define START_USE(vq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   62) #define END_USE(vq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   63) #define LAST_ADD_TIME_UPDATE(vq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   64) #define LAST_ADD_TIME_CHECK(vq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   65) #define LAST_ADD_TIME_INVALID(vq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   66) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   67) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   68) struct vring_desc_state_split {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   69) 	void *data;			/* Data for callback. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   70) 	struct vring_desc *indir_desc;	/* Indirect descriptor, if any. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   71) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   72) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   73) struct vring_desc_state_packed {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   74) 	void *data;			/* Data for callback. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   75) 	struct vring_packed_desc *indir_desc; /* Indirect descriptor, if any. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   76) 	u16 num;			/* Descriptor list length. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   77) 	u16 next;			/* The next desc state in a list. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   78) 	u16 last;			/* The last desc state in a list. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   79) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   80) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   81) struct vring_desc_extra_packed {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   82) 	dma_addr_t addr;		/* Buffer DMA addr. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   83) 	u32 len;			/* Buffer length. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   84) 	u16 flags;			/* Descriptor flags. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   85) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   86) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   87) struct vring_virtqueue {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   88) 	struct virtqueue vq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   89) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   90) 	/* Is this a packed ring? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   91) 	bool packed_ring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   92) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   93) 	/* Is DMA API used? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   94) 	bool use_dma_api;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   95) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   96) 	/* Can we use weak barriers? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   97) 	bool weak_barriers;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   98) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   99) 	/* Other side has made a mess, don't try any more. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  100) 	bool broken;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  101) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  102) 	/* Host supports indirect buffers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  103) 	bool indirect;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  104) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  105) 	/* Host publishes avail event idx */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  106) 	bool event;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  107) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  108) 	/* Head of free buffer list. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  109) 	unsigned int free_head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  110) 	/* Number we've added since last sync. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  111) 	unsigned int num_added;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  112) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  113) 	/* Last used index we've seen. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  114) 	u16 last_used_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  115) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  116) 	union {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  117) 		/* Available for split ring */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  118) 		struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  119) 			/* Actual memory layout for this queue. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  120) 			struct vring vring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  121) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  122) 			/* Last written value to avail->flags */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  123) 			u16 avail_flags_shadow;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  124) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  125) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  126) 			 * Last written value to avail->idx in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  127) 			 * guest byte order.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  128) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  129) 			u16 avail_idx_shadow;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  130) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  131) 			/* Per-descriptor state. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  132) 			struct vring_desc_state_split *desc_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  133) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  134) 			/* DMA address and size information */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  135) 			dma_addr_t queue_dma_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  136) 			size_t queue_size_in_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  137) 		} split;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  138) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  139) 		/* Available for packed ring */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  140) 		struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  141) 			/* Actual memory layout for this queue. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  142) 			struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  143) 				unsigned int num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  144) 				struct vring_packed_desc *desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  145) 				struct vring_packed_desc_event *driver;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  146) 				struct vring_packed_desc_event *device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  147) 			} vring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  148) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  149) 			/* Driver ring wrap counter. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  150) 			bool avail_wrap_counter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  151) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  152) 			/* Device ring wrap counter. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  153) 			bool used_wrap_counter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  154) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  155) 			/* Avail used flags. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  156) 			u16 avail_used_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  157) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  158) 			/* Index of the next avail descriptor. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  159) 			u16 next_avail_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  160) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  161) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  162) 			 * Last written value to driver->flags in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  163) 			 * guest byte order.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  164) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  165) 			u16 event_flags_shadow;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  166) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  167) 			/* Per-descriptor state. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  168) 			struct vring_desc_state_packed *desc_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  169) 			struct vring_desc_extra_packed *desc_extra;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  170) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  171) 			/* DMA address and size information */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  172) 			dma_addr_t ring_dma_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  173) 			dma_addr_t driver_event_dma_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  174) 			dma_addr_t device_event_dma_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  175) 			size_t ring_size_in_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  176) 			size_t event_size_in_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  177) 		} packed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  178) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  179) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  180) 	/* How to notify other side. FIXME: commonalize hcalls! */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  181) 	bool (*notify)(struct virtqueue *vq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  182) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  183) 	/* DMA, allocation, and size information */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  184) 	bool we_own_ring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  185) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  186) #ifdef DEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  187) 	/* They're supposed to lock for us. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  188) 	unsigned int in_use;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  189) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  190) 	/* Figure out if their kicks are too delayed. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  191) 	bool last_add_time_valid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  192) 	ktime_t last_add_time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  193) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  194) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  195) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  196) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  197) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  198)  * Helpers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  199)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  200) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  201) #define to_vvq(_vq) container_of(_vq, struct vring_virtqueue, vq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  202) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  203) static inline bool virtqueue_use_indirect(struct virtqueue *_vq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  204) 					  unsigned int total_sg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  205) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  206) 	struct vring_virtqueue *vq = to_vvq(_vq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  207) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  208) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  209) 	 * If the host supports indirect descriptor tables, and we have multiple
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  210) 	 * buffers, then go indirect. FIXME: tune this threshold
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  211) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  212) 	return (vq->indirect && total_sg > 1 && vq->vq.num_free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  213) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  214) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  215) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  216)  * Modern virtio devices have feature bits to specify whether they need a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  217)  * quirk and bypass the IOMMU. If not there, just use the DMA API.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  218)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  219)  * If there, the interaction between virtio and DMA API is messy.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  220)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  221)  * On most systems with virtio, physical addresses match bus addresses,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  222)  * and it doesn't particularly matter whether we use the DMA API.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  223)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  224)  * On some systems, including Xen and any system with a physical device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  225)  * that speaks virtio behind a physical IOMMU, we must use the DMA API
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  226)  * for virtio DMA to work at all.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  227)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  228)  * On other systems, including SPARC and PPC64, virtio-pci devices are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  229)  * enumerated as though they are behind an IOMMU, but the virtio host
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  230)  * ignores the IOMMU, so we must either pretend that the IOMMU isn't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  231)  * there or somehow map everything as the identity.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  232)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  233)  * For the time being, we preserve historic behavior and bypass the DMA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  234)  * API.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  235)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  236)  * TODO: install a per-device DMA ops structure that does the right thing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  237)  * taking into account all the above quirks, and use the DMA API
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  238)  * unconditionally on data path.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  239)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  240) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  241) static bool vring_use_dma_api(struct virtio_device *vdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  242) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  243) 	if (!virtio_has_dma_quirk(vdev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  244) 		return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  245) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  246) 	/* Otherwise, we are left to guess. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  247) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  248) 	 * In theory, it's possible to have a buggy QEMU-supposed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  249) 	 * emulated Q35 IOMMU and Xen enabled at the same time.  On
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  250) 	 * such a configuration, virtio has never worked and will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  251) 	 * not work without an even larger kludge.  Instead, enable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  252) 	 * the DMA API if we're a Xen guest, which at least allows
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  253) 	 * all of the sensible Xen configurations to work correctly.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  254) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  255) 	if (xen_domain())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  256) 		return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  257) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  258) 	return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  259) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  260) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  261) size_t virtio_max_dma_size(struct virtio_device *vdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  262) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  263) 	size_t max_segment_size = SIZE_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  264) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  265) 	if (vring_use_dma_api(vdev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  266) 		max_segment_size = dma_max_mapping_size(vdev->dev.parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  267) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  268) 	return max_segment_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  269) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  270) EXPORT_SYMBOL_GPL(virtio_max_dma_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  271) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  272) static void *vring_alloc_queue(struct virtio_device *vdev, size_t size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  273) 			      dma_addr_t *dma_handle, gfp_t flag)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  274) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  275) 	if (vring_use_dma_api(vdev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  276) 		return dma_alloc_coherent(vdev->dev.parent, size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  277) 					  dma_handle, flag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  278) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  279) 		void *queue = alloc_pages_exact(PAGE_ALIGN(size), flag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  280) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  281) 		if (queue) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  282) 			phys_addr_t phys_addr = virt_to_phys(queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  283) 			*dma_handle = (dma_addr_t)phys_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  284) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  285) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  286) 			 * Sanity check: make sure we dind't truncate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  287) 			 * the address.  The only arches I can find that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  288) 			 * have 64-bit phys_addr_t but 32-bit dma_addr_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  289) 			 * are certain non-highmem MIPS and x86
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  290) 			 * configurations, but these configurations
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  291) 			 * should never allocate physical pages above 32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  292) 			 * bits, so this is fine.  Just in case, throw a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  293) 			 * warning and abort if we end up with an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  294) 			 * unrepresentable address.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  295) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  296) 			if (WARN_ON_ONCE(*dma_handle != phys_addr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  297) 				free_pages_exact(queue, PAGE_ALIGN(size));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  298) 				return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  299) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  300) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  301) 		return queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  302) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  303) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  304) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  305) static void vring_free_queue(struct virtio_device *vdev, size_t size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  306) 			     void *queue, dma_addr_t dma_handle)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  307) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  308) 	if (vring_use_dma_api(vdev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  309) 		dma_free_coherent(vdev->dev.parent, size, queue, dma_handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  310) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  311) 		free_pages_exact(queue, PAGE_ALIGN(size));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  312) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  313) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  314) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  315)  * The DMA ops on various arches are rather gnarly right now, and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  316)  * making all of the arch DMA ops work on the vring device itself
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  317)  * is a mess.  For now, we use the parent device for DMA ops.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  318)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  319) static inline struct device *vring_dma_dev(const struct vring_virtqueue *vq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  320) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  321) 	return vq->vq.vdev->dev.parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  322) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  323) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  324) /* Map one sg entry. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  325) static dma_addr_t vring_map_one_sg(const struct vring_virtqueue *vq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  326) 				   struct scatterlist *sg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  327) 				   enum dma_data_direction direction)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  328) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  329) 	if (!vq->use_dma_api)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  330) 		return (dma_addr_t)sg_phys(sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  331) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  332) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  333) 	 * We can't use dma_map_sg, because we don't use scatterlists in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  334) 	 * the way it expects (we don't guarantee that the scatterlist
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  335) 	 * will exist for the lifetime of the mapping).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  336) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  337) 	return dma_map_page(vring_dma_dev(vq),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  338) 			    sg_page(sg), sg->offset, sg->length,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  339) 			    direction);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  340) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  341) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  342) static dma_addr_t vring_map_single(const struct vring_virtqueue *vq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  343) 				   void *cpu_addr, size_t size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  344) 				   enum dma_data_direction direction)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  345) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  346) 	if (!vq->use_dma_api)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  347) 		return (dma_addr_t)virt_to_phys(cpu_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  348) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  349) 	return dma_map_single(vring_dma_dev(vq),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  350) 			      cpu_addr, size, direction);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  351) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  352) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  353) static int vring_mapping_error(const struct vring_virtqueue *vq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  354) 			       dma_addr_t addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  355) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  356) 	if (!vq->use_dma_api)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  357) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  358) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  359) 	return dma_mapping_error(vring_dma_dev(vq), addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  360) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  361) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  362) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  363) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  364)  * Split ring specific functions - *_split().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  365)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  366) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  367) static void vring_unmap_one_split(const struct vring_virtqueue *vq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  368) 				  struct vring_desc *desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  369) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  370) 	u16 flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  371) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  372) 	if (!vq->use_dma_api)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  373) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  374) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  375) 	flags = virtio16_to_cpu(vq->vq.vdev, desc->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  376) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  377) 	if (flags & VRING_DESC_F_INDIRECT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  378) 		dma_unmap_single(vring_dma_dev(vq),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  379) 				 virtio64_to_cpu(vq->vq.vdev, desc->addr),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  380) 				 virtio32_to_cpu(vq->vq.vdev, desc->len),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  381) 				 (flags & VRING_DESC_F_WRITE) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  382) 				 DMA_FROM_DEVICE : DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  383) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  384) 		dma_unmap_page(vring_dma_dev(vq),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  385) 			       virtio64_to_cpu(vq->vq.vdev, desc->addr),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  386) 			       virtio32_to_cpu(vq->vq.vdev, desc->len),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  387) 			       (flags & VRING_DESC_F_WRITE) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  388) 			       DMA_FROM_DEVICE : DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  389) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  390) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  391) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  392) static struct vring_desc *alloc_indirect_split(struct virtqueue *_vq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  393) 					       unsigned int total_sg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  394) 					       gfp_t gfp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  395) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  396) 	struct vring_desc *desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  397) 	unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  398) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  399) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  400) 	 * We require lowmem mappings for the descriptors because
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  401) 	 * otherwise virt_to_phys will give us bogus addresses in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  402) 	 * virtqueue.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  403) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  404) 	gfp &= ~__GFP_HIGHMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  405) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  406) 	desc = kmalloc_array(total_sg, sizeof(struct vring_desc), gfp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  407) 	if (!desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  408) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  409) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  410) 	for (i = 0; i < total_sg; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  411) 		desc[i].next = cpu_to_virtio16(_vq->vdev, i + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  412) 	return desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  413) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  414) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  415) static inline int virtqueue_add_split(struct virtqueue *_vq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  416) 				      struct scatterlist *sgs[],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  417) 				      unsigned int total_sg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  418) 				      unsigned int out_sgs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  419) 				      unsigned int in_sgs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  420) 				      void *data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  421) 				      void *ctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  422) 				      gfp_t gfp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  423) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  424) 	struct vring_virtqueue *vq = to_vvq(_vq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  425) 	struct scatterlist *sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  426) 	struct vring_desc *desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  427) 	unsigned int i, n, avail, descs_used, prev, err_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  428) 	int head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  429) 	bool indirect;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  430) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  431) 	START_USE(vq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  432) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  433) 	BUG_ON(data == NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  434) 	BUG_ON(ctx && vq->indirect);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  435) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  436) 	if (unlikely(vq->broken)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  437) 		END_USE(vq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  438) 		return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  439) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  440) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  441) 	LAST_ADD_TIME_UPDATE(vq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  442) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  443) 	BUG_ON(total_sg == 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  444) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  445) 	head = vq->free_head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  446) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  447) 	if (virtqueue_use_indirect(_vq, total_sg))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  448) 		desc = alloc_indirect_split(_vq, total_sg, gfp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  449) 	else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  450) 		desc = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  451) 		WARN_ON_ONCE(total_sg > vq->split.vring.num && !vq->indirect);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  452) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  453) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  454) 	if (desc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  455) 		/* Use a single buffer which doesn't continue */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  456) 		indirect = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  457) 		/* Set up rest to use this indirect table. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  458) 		i = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  459) 		descs_used = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  460) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  461) 		indirect = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  462) 		desc = vq->split.vring.desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  463) 		i = head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  464) 		descs_used = total_sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  465) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  466) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  467) 	if (vq->vq.num_free < descs_used) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  468) 		pr_debug("Can't add buf len %i - avail = %i\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  469) 			 descs_used, vq->vq.num_free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  470) 		/* FIXME: for historical reasons, we force a notify here if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  471) 		 * there are outgoing parts to the buffer.  Presumably the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  472) 		 * host should service the ring ASAP. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  473) 		if (out_sgs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  474) 			vq->notify(&vq->vq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  475) 		if (indirect)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  476) 			kfree(desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  477) 		END_USE(vq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  478) 		return -ENOSPC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  479) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  480) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  481) 	for (n = 0; n < out_sgs; n++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  482) 		for (sg = sgs[n]; sg; sg = sg_next(sg)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  483) 			dma_addr_t addr = vring_map_one_sg(vq, sg, DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  484) 			if (vring_mapping_error(vq, addr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  485) 				goto unmap_release;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  486) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  487) 			desc[i].flags = cpu_to_virtio16(_vq->vdev, VRING_DESC_F_NEXT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  488) 			desc[i].addr = cpu_to_virtio64(_vq->vdev, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  489) 			desc[i].len = cpu_to_virtio32(_vq->vdev, sg->length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  490) 			prev = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  491) 			i = virtio16_to_cpu(_vq->vdev, desc[i].next);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  492) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  493) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  494) 	for (; n < (out_sgs + in_sgs); n++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  495) 		for (sg = sgs[n]; sg; sg = sg_next(sg)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  496) 			dma_addr_t addr = vring_map_one_sg(vq, sg, DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  497) 			if (vring_mapping_error(vq, addr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  498) 				goto unmap_release;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  499) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  500) 			desc[i].flags = cpu_to_virtio16(_vq->vdev, VRING_DESC_F_NEXT | VRING_DESC_F_WRITE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  501) 			desc[i].addr = cpu_to_virtio64(_vq->vdev, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  502) 			desc[i].len = cpu_to_virtio32(_vq->vdev, sg->length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  503) 			prev = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  504) 			i = virtio16_to_cpu(_vq->vdev, desc[i].next);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  505) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  506) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  507) 	/* Last one doesn't continue. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  508) 	desc[prev].flags &= cpu_to_virtio16(_vq->vdev, ~VRING_DESC_F_NEXT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  509) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  510) 	if (indirect) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  511) 		/* Now that the indirect table is filled in, map it. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  512) 		dma_addr_t addr = vring_map_single(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  513) 			vq, desc, total_sg * sizeof(struct vring_desc),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  514) 			DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  515) 		if (vring_mapping_error(vq, addr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  516) 			goto unmap_release;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  517) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  518) 		vq->split.vring.desc[head].flags = cpu_to_virtio16(_vq->vdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  519) 				VRING_DESC_F_INDIRECT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  520) 		vq->split.vring.desc[head].addr = cpu_to_virtio64(_vq->vdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  521) 				addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  522) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  523) 		vq->split.vring.desc[head].len = cpu_to_virtio32(_vq->vdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  524) 				total_sg * sizeof(struct vring_desc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  525) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  526) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  527) 	/* We're using some buffers from the free list. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  528) 	vq->vq.num_free -= descs_used;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  529) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  530) 	/* Update free pointer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  531) 	if (indirect)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  532) 		vq->free_head = virtio16_to_cpu(_vq->vdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  533) 					vq->split.vring.desc[head].next);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  534) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  535) 		vq->free_head = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  536) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  537) 	/* Store token and indirect buffer state. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  538) 	vq->split.desc_state[head].data = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  539) 	if (indirect)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  540) 		vq->split.desc_state[head].indir_desc = desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  541) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  542) 		vq->split.desc_state[head].indir_desc = ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  543) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  544) 	/* Put entry in available array (but don't update avail->idx until they
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  545) 	 * do sync). */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  546) 	avail = vq->split.avail_idx_shadow & (vq->split.vring.num - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  547) 	vq->split.vring.avail->ring[avail] = cpu_to_virtio16(_vq->vdev, head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  548) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  549) 	/* Descriptors and available array need to be set before we expose the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  550) 	 * new available array entries. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  551) 	virtio_wmb(vq->weak_barriers);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  552) 	vq->split.avail_idx_shadow++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  553) 	vq->split.vring.avail->idx = cpu_to_virtio16(_vq->vdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  554) 						vq->split.avail_idx_shadow);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  555) 	vq->num_added++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  556) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  557) 	pr_debug("Added buffer head %i to %p\n", head, vq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  558) 	END_USE(vq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  559) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  560) 	/* This is very unlikely, but theoretically possible.  Kick
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  561) 	 * just in case. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  562) 	if (unlikely(vq->num_added == (1 << 16) - 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  563) 		virtqueue_kick(_vq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  564) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  565) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  566) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  567) unmap_release:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  568) 	err_idx = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  569) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  570) 	if (indirect)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  571) 		i = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  572) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  573) 		i = head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  574) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  575) 	for (n = 0; n < total_sg; n++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  576) 		if (i == err_idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  577) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  578) 		vring_unmap_one_split(vq, &desc[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  579) 		i = virtio16_to_cpu(_vq->vdev, desc[i].next);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  580) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  581) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  582) 	if (indirect)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  583) 		kfree(desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  584) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  585) 	END_USE(vq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  586) 	return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  587) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  588) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  589) static bool virtqueue_kick_prepare_split(struct virtqueue *_vq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  590) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  591) 	struct vring_virtqueue *vq = to_vvq(_vq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  592) 	u16 new, old;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  593) 	bool needs_kick;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  594) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  595) 	START_USE(vq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  596) 	/* We need to expose available array entries before checking avail
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  597) 	 * event. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  598) 	virtio_mb(vq->weak_barriers);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  599) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  600) 	old = vq->split.avail_idx_shadow - vq->num_added;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  601) 	new = vq->split.avail_idx_shadow;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  602) 	vq->num_added = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  603) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  604) 	LAST_ADD_TIME_CHECK(vq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  605) 	LAST_ADD_TIME_INVALID(vq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  606) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  607) 	if (vq->event) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  608) 		needs_kick = vring_need_event(virtio16_to_cpu(_vq->vdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  609) 					vring_avail_event(&vq->split.vring)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  610) 					      new, old);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  611) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  612) 		needs_kick = !(vq->split.vring.used->flags &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  613) 					cpu_to_virtio16(_vq->vdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  614) 						VRING_USED_F_NO_NOTIFY));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  615) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  616) 	END_USE(vq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  617) 	return needs_kick;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  618) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  619) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  620) static void detach_buf_split(struct vring_virtqueue *vq, unsigned int head,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  621) 			     void **ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  622) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  623) 	unsigned int i, j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  624) 	__virtio16 nextflag = cpu_to_virtio16(vq->vq.vdev, VRING_DESC_F_NEXT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  625) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  626) 	/* Clear data ptr. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  627) 	vq->split.desc_state[head].data = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  628) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  629) 	/* Put back on free list: unmap first-level descriptors and find end */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  630) 	i = head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  631) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  632) 	while (vq->split.vring.desc[i].flags & nextflag) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  633) 		vring_unmap_one_split(vq, &vq->split.vring.desc[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  634) 		i = virtio16_to_cpu(vq->vq.vdev, vq->split.vring.desc[i].next);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  635) 		vq->vq.num_free++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  636) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  637) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  638) 	vring_unmap_one_split(vq, &vq->split.vring.desc[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  639) 	vq->split.vring.desc[i].next = cpu_to_virtio16(vq->vq.vdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  640) 						vq->free_head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  641) 	vq->free_head = head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  642) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  643) 	/* Plus final descriptor */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  644) 	vq->vq.num_free++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  645) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  646) 	if (vq->indirect) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  647) 		struct vring_desc *indir_desc =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  648) 				vq->split.desc_state[head].indir_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  649) 		u32 len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  650) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  651) 		/* Free the indirect table, if any, now that it's unmapped. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  652) 		if (!indir_desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  653) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  654) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  655) 		len = virtio32_to_cpu(vq->vq.vdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  656) 				vq->split.vring.desc[head].len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  657) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  658) 		BUG_ON(!(vq->split.vring.desc[head].flags &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  659) 			 cpu_to_virtio16(vq->vq.vdev, VRING_DESC_F_INDIRECT)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  660) 		BUG_ON(len == 0 || len % sizeof(struct vring_desc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  661) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  662) 		for (j = 0; j < len / sizeof(struct vring_desc); j++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  663) 			vring_unmap_one_split(vq, &indir_desc[j]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  664) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  665) 		kfree(indir_desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  666) 		vq->split.desc_state[head].indir_desc = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  667) 	} else if (ctx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  668) 		*ctx = vq->split.desc_state[head].indir_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  669) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  670) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  671) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  672) static inline bool more_used_split(const struct vring_virtqueue *vq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  673) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  674) 	return vq->last_used_idx != virtio16_to_cpu(vq->vq.vdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  675) 			vq->split.vring.used->idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  676) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  677) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  678) static void *virtqueue_get_buf_ctx_split(struct virtqueue *_vq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  679) 					 unsigned int *len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  680) 					 void **ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  681) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  682) 	struct vring_virtqueue *vq = to_vvq(_vq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  683) 	void *ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  684) 	unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  685) 	u16 last_used;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  686) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  687) 	START_USE(vq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  688) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  689) 	if (unlikely(vq->broken)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  690) 		END_USE(vq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  691) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  692) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  693) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  694) 	if (!more_used_split(vq)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  695) 		pr_debug("No more buffers in queue\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  696) 		END_USE(vq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  697) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  698) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  699) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  700) 	/* Only get used array entries after they have been exposed by host. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  701) 	virtio_rmb(vq->weak_barriers);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  702) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  703) 	last_used = (vq->last_used_idx & (vq->split.vring.num - 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  704) 	i = virtio32_to_cpu(_vq->vdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  705) 			vq->split.vring.used->ring[last_used].id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  706) 	*len = virtio32_to_cpu(_vq->vdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  707) 			vq->split.vring.used->ring[last_used].len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  708) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  709) 	if (unlikely(i >= vq->split.vring.num)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  710) 		BAD_RING(vq, "id %u out of range\n", i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  711) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  712) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  713) 	if (unlikely(!vq->split.desc_state[i].data)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  714) 		BAD_RING(vq, "id %u is not a head!\n", i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  715) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  716) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  717) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  718) 	/* detach_buf_split clears data, so grab it now. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  719) 	ret = vq->split.desc_state[i].data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  720) 	detach_buf_split(vq, i, ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  721) 	vq->last_used_idx++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  722) 	/* If we expect an interrupt for the next entry, tell host
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  723) 	 * by writing event index and flush out the write before
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  724) 	 * the read in the next get_buf call. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  725) 	if (!(vq->split.avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  726) 		virtio_store_mb(vq->weak_barriers,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  727) 				&vring_used_event(&vq->split.vring),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  728) 				cpu_to_virtio16(_vq->vdev, vq->last_used_idx));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  729) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  730) 	LAST_ADD_TIME_INVALID(vq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  731) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  732) 	END_USE(vq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  733) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  734) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  735) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  736) static void virtqueue_disable_cb_split(struct virtqueue *_vq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  737) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  738) 	struct vring_virtqueue *vq = to_vvq(_vq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  739) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  740) 	if (!(vq->split.avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  741) 		vq->split.avail_flags_shadow |= VRING_AVAIL_F_NO_INTERRUPT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  742) 		if (!vq->event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  743) 			vq->split.vring.avail->flags =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  744) 				cpu_to_virtio16(_vq->vdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  745) 						vq->split.avail_flags_shadow);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  746) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  747) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  748) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  749) static unsigned virtqueue_enable_cb_prepare_split(struct virtqueue *_vq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  750) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  751) 	struct vring_virtqueue *vq = to_vvq(_vq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  752) 	u16 last_used_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  753) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  754) 	START_USE(vq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  755) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  756) 	/* We optimistically turn back on interrupts, then check if there was
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  757) 	 * more to do. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  758) 	/* Depending on the VIRTIO_RING_F_EVENT_IDX feature, we need to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  759) 	 * either clear the flags bit or point the event index at the next
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  760) 	 * entry. Always do both to keep code simple. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  761) 	if (vq->split.avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  762) 		vq->split.avail_flags_shadow &= ~VRING_AVAIL_F_NO_INTERRUPT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  763) 		if (!vq->event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  764) 			vq->split.vring.avail->flags =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  765) 				cpu_to_virtio16(_vq->vdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  766) 						vq->split.avail_flags_shadow);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  767) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  768) 	vring_used_event(&vq->split.vring) = cpu_to_virtio16(_vq->vdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  769) 			last_used_idx = vq->last_used_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  770) 	END_USE(vq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  771) 	return last_used_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  772) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  773) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  774) static bool virtqueue_poll_split(struct virtqueue *_vq, unsigned last_used_idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  775) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  776) 	struct vring_virtqueue *vq = to_vvq(_vq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  777) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  778) 	return (u16)last_used_idx != virtio16_to_cpu(_vq->vdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  779) 			vq->split.vring.used->idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  780) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  781) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  782) static bool virtqueue_enable_cb_delayed_split(struct virtqueue *_vq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  783) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  784) 	struct vring_virtqueue *vq = to_vvq(_vq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  785) 	u16 bufs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  786) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  787) 	START_USE(vq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  788) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  789) 	/* We optimistically turn back on interrupts, then check if there was
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  790) 	 * more to do. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  791) 	/* Depending on the VIRTIO_RING_F_USED_EVENT_IDX feature, we need to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  792) 	 * either clear the flags bit or point the event index at the next
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  793) 	 * entry. Always update the event index to keep code simple. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  794) 	if (vq->split.avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  795) 		vq->split.avail_flags_shadow &= ~VRING_AVAIL_F_NO_INTERRUPT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  796) 		if (!vq->event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  797) 			vq->split.vring.avail->flags =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  798) 				cpu_to_virtio16(_vq->vdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  799) 						vq->split.avail_flags_shadow);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  800) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  801) 	/* TODO: tune this threshold */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  802) 	bufs = (u16)(vq->split.avail_idx_shadow - vq->last_used_idx) * 3 / 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  803) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  804) 	virtio_store_mb(vq->weak_barriers,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  805) 			&vring_used_event(&vq->split.vring),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  806) 			cpu_to_virtio16(_vq->vdev, vq->last_used_idx + bufs));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  807) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  808) 	if (unlikely((u16)(virtio16_to_cpu(_vq->vdev, vq->split.vring.used->idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  809) 					- vq->last_used_idx) > bufs)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  810) 		END_USE(vq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  811) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  812) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  813) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  814) 	END_USE(vq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  815) 	return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  816) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  817) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  818) static void *virtqueue_detach_unused_buf_split(struct virtqueue *_vq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  819) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  820) 	struct vring_virtqueue *vq = to_vvq(_vq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  821) 	unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  822) 	void *buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  823) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  824) 	START_USE(vq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  825) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  826) 	for (i = 0; i < vq->split.vring.num; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  827) 		if (!vq->split.desc_state[i].data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  828) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  829) 		/* detach_buf_split clears data, so grab it now. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  830) 		buf = vq->split.desc_state[i].data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  831) 		detach_buf_split(vq, i, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  832) 		vq->split.avail_idx_shadow--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  833) 		vq->split.vring.avail->idx = cpu_to_virtio16(_vq->vdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  834) 				vq->split.avail_idx_shadow);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  835) 		END_USE(vq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  836) 		return buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  837) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  838) 	/* That should have freed everything. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  839) 	BUG_ON(vq->vq.num_free != vq->split.vring.num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  840) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  841) 	END_USE(vq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  842) 	return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  843) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  844) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  845) static struct virtqueue *vring_create_virtqueue_split(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  846) 	unsigned int index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  847) 	unsigned int num,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  848) 	unsigned int vring_align,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  849) 	struct virtio_device *vdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  850) 	bool weak_barriers,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  851) 	bool may_reduce_num,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  852) 	bool context,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  853) 	bool (*notify)(struct virtqueue *),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  854) 	void (*callback)(struct virtqueue *),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  855) 	const char *name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  856) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  857) 	struct virtqueue *vq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  858) 	void *queue = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  859) 	dma_addr_t dma_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  860) 	size_t queue_size_in_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  861) 	struct vring vring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  862) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  863) 	/* We assume num is a power of 2. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  864) 	if (num & (num - 1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  865) 		dev_warn(&vdev->dev, "Bad virtqueue length %u\n", num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  866) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  867) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  868) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  869) 	/* TODO: allocate each queue chunk individually */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  870) 	for (; num && vring_size(num, vring_align) > PAGE_SIZE; num /= 2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  871) 		queue = vring_alloc_queue(vdev, vring_size(num, vring_align),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  872) 					  &dma_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  873) 					  GFP_KERNEL|__GFP_NOWARN|__GFP_ZERO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  874) 		if (queue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  875) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  876) 		if (!may_reduce_num)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  877) 			return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  878) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  879) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  880) 	if (!num)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  881) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  882) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  883) 	if (!queue) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  884) 		/* Try to get a single page. You are my only hope! */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  885) 		queue = vring_alloc_queue(vdev, vring_size(num, vring_align),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  886) 					  &dma_addr, GFP_KERNEL|__GFP_ZERO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  887) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  888) 	if (!queue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  889) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  890) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  891) 	queue_size_in_bytes = vring_size(num, vring_align);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  892) 	vring_init(&vring, num, queue, vring_align);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  893) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  894) 	vq = __vring_new_virtqueue(index, vring, vdev, weak_barriers, context,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  895) 				   notify, callback, name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  896) 	if (!vq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  897) 		vring_free_queue(vdev, queue_size_in_bytes, queue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  898) 				 dma_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  899) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  900) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  901) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  902) 	to_vvq(vq)->split.queue_dma_addr = dma_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  903) 	to_vvq(vq)->split.queue_size_in_bytes = queue_size_in_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  904) 	to_vvq(vq)->we_own_ring = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  905) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  906) 	return vq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  907) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  908) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  909) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  910) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  911)  * Packed ring specific functions - *_packed().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  912)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  913) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  914) static void vring_unmap_state_packed(const struct vring_virtqueue *vq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  915) 				     struct vring_desc_extra_packed *state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  916) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  917) 	u16 flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  918) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  919) 	if (!vq->use_dma_api)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  920) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  921) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  922) 	flags = state->flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  923) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  924) 	if (flags & VRING_DESC_F_INDIRECT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  925) 		dma_unmap_single(vring_dma_dev(vq),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  926) 				 state->addr, state->len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  927) 				 (flags & VRING_DESC_F_WRITE) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  928) 				 DMA_FROM_DEVICE : DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  929) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  930) 		dma_unmap_page(vring_dma_dev(vq),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  931) 			       state->addr, state->len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  932) 			       (flags & VRING_DESC_F_WRITE) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  933) 			       DMA_FROM_DEVICE : DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  934) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  935) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  936) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  937) static void vring_unmap_desc_packed(const struct vring_virtqueue *vq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  938) 				   struct vring_packed_desc *desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  939) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  940) 	u16 flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  941) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  942) 	if (!vq->use_dma_api)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  943) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  944) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  945) 	flags = le16_to_cpu(desc->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  946) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  947) 	if (flags & VRING_DESC_F_INDIRECT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  948) 		dma_unmap_single(vring_dma_dev(vq),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  949) 				 le64_to_cpu(desc->addr),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  950) 				 le32_to_cpu(desc->len),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  951) 				 (flags & VRING_DESC_F_WRITE) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  952) 				 DMA_FROM_DEVICE : DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  953) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  954) 		dma_unmap_page(vring_dma_dev(vq),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  955) 			       le64_to_cpu(desc->addr),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  956) 			       le32_to_cpu(desc->len),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  957) 			       (flags & VRING_DESC_F_WRITE) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  958) 			       DMA_FROM_DEVICE : DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  959) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  960) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  961) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  962) static struct vring_packed_desc *alloc_indirect_packed(unsigned int total_sg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  963) 						       gfp_t gfp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  964) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  965) 	struct vring_packed_desc *desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  966) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  967) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  968) 	 * We require lowmem mappings for the descriptors because
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  969) 	 * otherwise virt_to_phys will give us bogus addresses in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  970) 	 * virtqueue.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  971) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  972) 	gfp &= ~__GFP_HIGHMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  973) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  974) 	desc = kmalloc_array(total_sg, sizeof(struct vring_packed_desc), gfp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  975) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  976) 	return desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  977) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  978) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  979) static int virtqueue_add_indirect_packed(struct vring_virtqueue *vq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  980) 				       struct scatterlist *sgs[],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  981) 				       unsigned int total_sg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  982) 				       unsigned int out_sgs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  983) 				       unsigned int in_sgs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  984) 				       void *data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  985) 				       gfp_t gfp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  986) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  987) 	struct vring_packed_desc *desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  988) 	struct scatterlist *sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  989) 	unsigned int i, n, err_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  990) 	u16 head, id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  991) 	dma_addr_t addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  992) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  993) 	head = vq->packed.next_avail_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  994) 	desc = alloc_indirect_packed(total_sg, gfp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  995) 	if (!desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  996) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  997) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  998) 	if (unlikely(vq->vq.num_free < 1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  999) 		pr_debug("Can't add buf len 1 - avail = 0\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) 		kfree(desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) 		END_USE(vq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) 		return -ENOSPC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) 	i = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) 	id = vq->free_head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) 	BUG_ON(id == vq->packed.vring.num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) 	for (n = 0; n < out_sgs + in_sgs; n++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) 		for (sg = sgs[n]; sg; sg = sg_next(sg)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) 			addr = vring_map_one_sg(vq, sg, n < out_sgs ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) 					DMA_TO_DEVICE : DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) 			if (vring_mapping_error(vq, addr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) 				goto unmap_release;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) 			desc[i].flags = cpu_to_le16(n < out_sgs ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) 						0 : VRING_DESC_F_WRITE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) 			desc[i].addr = cpu_to_le64(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) 			desc[i].len = cpu_to_le32(sg->length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) 			i++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) 	/* Now that the indirect table is filled in, map it. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) 	addr = vring_map_single(vq, desc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) 			total_sg * sizeof(struct vring_packed_desc),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) 			DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) 	if (vring_mapping_error(vq, addr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) 		goto unmap_release;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) 	vq->packed.vring.desc[head].addr = cpu_to_le64(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) 	vq->packed.vring.desc[head].len = cpu_to_le32(total_sg *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) 				sizeof(struct vring_packed_desc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) 	vq->packed.vring.desc[head].id = cpu_to_le16(id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) 	if (vq->use_dma_api) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) 		vq->packed.desc_extra[id].addr = addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) 		vq->packed.desc_extra[id].len = total_sg *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) 				sizeof(struct vring_packed_desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) 		vq->packed.desc_extra[id].flags = VRING_DESC_F_INDIRECT |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) 						  vq->packed.avail_used_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) 	 * A driver MUST NOT make the first descriptor in the list
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) 	 * available before all subsequent descriptors comprising
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) 	 * the list are made available.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) 	virtio_wmb(vq->weak_barriers);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) 	vq->packed.vring.desc[head].flags = cpu_to_le16(VRING_DESC_F_INDIRECT |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) 						vq->packed.avail_used_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) 	/* We're using some buffers from the free list. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) 	vq->vq.num_free -= 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) 	/* Update free pointer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) 	n = head + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) 	if (n >= vq->packed.vring.num) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) 		n = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) 		vq->packed.avail_wrap_counter ^= 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) 		vq->packed.avail_used_flags ^=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) 				1 << VRING_PACKED_DESC_F_AVAIL |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) 				1 << VRING_PACKED_DESC_F_USED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) 	vq->packed.next_avail_idx = n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) 	vq->free_head = vq->packed.desc_state[id].next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) 	/* Store token and indirect buffer state. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) 	vq->packed.desc_state[id].num = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) 	vq->packed.desc_state[id].data = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) 	vq->packed.desc_state[id].indir_desc = desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) 	vq->packed.desc_state[id].last = id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) 	vq->num_added += 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) 	pr_debug("Added buffer head %i to %p\n", head, vq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) 	END_USE(vq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) unmap_release:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) 	err_idx = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) 	for (i = 0; i < err_idx; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) 		vring_unmap_desc_packed(vq, &desc[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) 	kfree(desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) 	END_USE(vq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) 	return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) static inline int virtqueue_add_packed(struct virtqueue *_vq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) 				       struct scatterlist *sgs[],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) 				       unsigned int total_sg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) 				       unsigned int out_sgs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) 				       unsigned int in_sgs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) 				       void *data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) 				       void *ctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) 				       gfp_t gfp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) 	struct vring_virtqueue *vq = to_vvq(_vq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) 	struct vring_packed_desc *desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) 	struct scatterlist *sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) 	unsigned int i, n, c, descs_used, err_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) 	__le16 head_flags, flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) 	u16 head, id, prev, curr, avail_used_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) 	START_USE(vq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) 	BUG_ON(data == NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) 	BUG_ON(ctx && vq->indirect);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) 	if (unlikely(vq->broken)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) 		END_USE(vq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) 		return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) 	LAST_ADD_TIME_UPDATE(vq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) 	BUG_ON(total_sg == 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) 	if (virtqueue_use_indirect(_vq, total_sg)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) 		err = virtqueue_add_indirect_packed(vq, sgs, total_sg, out_sgs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) 						    in_sgs, data, gfp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) 		if (err != -ENOMEM) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) 			END_USE(vq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) 			return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) 		/* fall back on direct */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) 	head = vq->packed.next_avail_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) 	avail_used_flags = vq->packed.avail_used_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) 	WARN_ON_ONCE(total_sg > vq->packed.vring.num && !vq->indirect);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) 	desc = vq->packed.vring.desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) 	i = head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) 	descs_used = total_sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) 	if (unlikely(vq->vq.num_free < descs_used)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) 		pr_debug("Can't add buf len %i - avail = %i\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) 			 descs_used, vq->vq.num_free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) 		END_USE(vq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) 		return -ENOSPC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) 	id = vq->free_head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) 	BUG_ON(id == vq->packed.vring.num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) 	curr = id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) 	c = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) 	for (n = 0; n < out_sgs + in_sgs; n++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) 		for (sg = sgs[n]; sg; sg = sg_next(sg)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) 			dma_addr_t addr = vring_map_one_sg(vq, sg, n < out_sgs ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) 					DMA_TO_DEVICE : DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) 			if (vring_mapping_error(vq, addr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) 				goto unmap_release;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) 			flags = cpu_to_le16(vq->packed.avail_used_flags |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) 				    (++c == total_sg ? 0 : VRING_DESC_F_NEXT) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) 				    (n < out_sgs ? 0 : VRING_DESC_F_WRITE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) 			if (i == head)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) 				head_flags = flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) 			else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) 				desc[i].flags = flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) 			desc[i].addr = cpu_to_le64(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) 			desc[i].len = cpu_to_le32(sg->length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) 			desc[i].id = cpu_to_le16(id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) 			if (unlikely(vq->use_dma_api)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) 				vq->packed.desc_extra[curr].addr = addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) 				vq->packed.desc_extra[curr].len = sg->length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) 				vq->packed.desc_extra[curr].flags =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) 					le16_to_cpu(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) 			prev = curr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) 			curr = vq->packed.desc_state[curr].next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) 			if ((unlikely(++i >= vq->packed.vring.num))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) 				i = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) 				vq->packed.avail_used_flags ^=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) 					1 << VRING_PACKED_DESC_F_AVAIL |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) 					1 << VRING_PACKED_DESC_F_USED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) 	if (i < head)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) 		vq->packed.avail_wrap_counter ^= 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) 	/* We're using some buffers from the free list. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) 	vq->vq.num_free -= descs_used;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) 	/* Update free pointer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) 	vq->packed.next_avail_idx = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) 	vq->free_head = curr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) 	/* Store token. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) 	vq->packed.desc_state[id].num = descs_used;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) 	vq->packed.desc_state[id].data = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) 	vq->packed.desc_state[id].indir_desc = ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) 	vq->packed.desc_state[id].last = prev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) 	 * A driver MUST NOT make the first descriptor in the list
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) 	 * available before all subsequent descriptors comprising
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) 	 * the list are made available.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) 	virtio_wmb(vq->weak_barriers);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) 	vq->packed.vring.desc[head].flags = head_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) 	vq->num_added += descs_used;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) 	pr_debug("Added buffer head %i to %p\n", head, vq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) 	END_USE(vq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) unmap_release:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) 	err_idx = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) 	i = head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) 	vq->packed.avail_used_flags = avail_used_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) 	for (n = 0; n < total_sg; n++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) 		if (i == err_idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) 		vring_unmap_desc_packed(vq, &desc[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) 		i++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) 		if (i >= vq->packed.vring.num)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) 			i = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) 	END_USE(vq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) 	return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) static bool virtqueue_kick_prepare_packed(struct virtqueue *_vq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) 	struct vring_virtqueue *vq = to_vvq(_vq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) 	u16 new, old, off_wrap, flags, wrap_counter, event_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) 	bool needs_kick;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) 	union {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) 		struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) 			__le16 off_wrap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) 			__le16 flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) 		};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) 		u32 u32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) 	} snapshot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) 	START_USE(vq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) 	 * We need to expose the new flags value before checking notification
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) 	 * suppressions.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) 	virtio_mb(vq->weak_barriers);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) 	old = vq->packed.next_avail_idx - vq->num_added;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) 	new = vq->packed.next_avail_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) 	vq->num_added = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) 	snapshot.u32 = *(u32 *)vq->packed.vring.device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) 	flags = le16_to_cpu(snapshot.flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) 	LAST_ADD_TIME_CHECK(vq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) 	LAST_ADD_TIME_INVALID(vq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) 	if (flags != VRING_PACKED_EVENT_FLAG_DESC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) 		needs_kick = (flags != VRING_PACKED_EVENT_FLAG_DISABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) 	off_wrap = le16_to_cpu(snapshot.off_wrap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) 	wrap_counter = off_wrap >> VRING_PACKED_EVENT_F_WRAP_CTR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) 	event_idx = off_wrap & ~(1 << VRING_PACKED_EVENT_F_WRAP_CTR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) 	if (wrap_counter != vq->packed.avail_wrap_counter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) 		event_idx -= vq->packed.vring.num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) 	needs_kick = vring_need_event(event_idx, new, old);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) 	END_USE(vq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) 	return needs_kick;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) static void detach_buf_packed(struct vring_virtqueue *vq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) 			      unsigned int id, void **ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) 	struct vring_desc_state_packed *state = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) 	struct vring_packed_desc *desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) 	unsigned int i, curr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) 	state = &vq->packed.desc_state[id];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) 	/* Clear data ptr. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) 	state->data = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) 	vq->packed.desc_state[state->last].next = vq->free_head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) 	vq->free_head = id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) 	vq->vq.num_free += state->num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) 	if (unlikely(vq->use_dma_api)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) 		curr = id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) 		for (i = 0; i < state->num; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) 			vring_unmap_state_packed(vq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) 				&vq->packed.desc_extra[curr]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) 			curr = vq->packed.desc_state[curr].next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) 	if (vq->indirect) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) 		u32 len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) 		/* Free the indirect table, if any, now that it's unmapped. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) 		desc = state->indir_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) 		if (!desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) 		if (vq->use_dma_api) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) 			len = vq->packed.desc_extra[id].len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) 			for (i = 0; i < len / sizeof(struct vring_packed_desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) 					i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) 				vring_unmap_desc_packed(vq, &desc[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) 		kfree(desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) 		state->indir_desc = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) 	} else if (ctx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) 		*ctx = state->indir_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) static inline bool is_used_desc_packed(const struct vring_virtqueue *vq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) 				       u16 idx, bool used_wrap_counter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) 	bool avail, used;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) 	u16 flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) 	flags = le16_to_cpu(vq->packed.vring.desc[idx].flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) 	avail = !!(flags & (1 << VRING_PACKED_DESC_F_AVAIL));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) 	used = !!(flags & (1 << VRING_PACKED_DESC_F_USED));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) 	return avail == used && used == used_wrap_counter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) static inline bool more_used_packed(const struct vring_virtqueue *vq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) 	return is_used_desc_packed(vq, vq->last_used_idx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) 			vq->packed.used_wrap_counter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) static void *virtqueue_get_buf_ctx_packed(struct virtqueue *_vq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) 					  unsigned int *len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) 					  void **ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) 	struct vring_virtqueue *vq = to_vvq(_vq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) 	u16 last_used, id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) 	void *ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) 	START_USE(vq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) 	if (unlikely(vq->broken)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) 		END_USE(vq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) 	if (!more_used_packed(vq)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) 		pr_debug("No more buffers in queue\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) 		END_USE(vq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) 	/* Only get used elements after they have been exposed by host. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) 	virtio_rmb(vq->weak_barriers);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) 	last_used = vq->last_used_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) 	id = le16_to_cpu(vq->packed.vring.desc[last_used].id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) 	*len = le32_to_cpu(vq->packed.vring.desc[last_used].len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) 	if (unlikely(id >= vq->packed.vring.num)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) 		BAD_RING(vq, "id %u out of range\n", id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) 	if (unlikely(!vq->packed.desc_state[id].data)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) 		BAD_RING(vq, "id %u is not a head!\n", id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) 	/* detach_buf_packed clears data, so grab it now. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) 	ret = vq->packed.desc_state[id].data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) 	detach_buf_packed(vq, id, ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) 	vq->last_used_idx += vq->packed.desc_state[id].num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) 	if (unlikely(vq->last_used_idx >= vq->packed.vring.num)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) 		vq->last_used_idx -= vq->packed.vring.num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) 		vq->packed.used_wrap_counter ^= 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) 	 * If we expect an interrupt for the next entry, tell host
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) 	 * by writing event index and flush out the write before
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) 	 * the read in the next get_buf call.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) 	if (vq->packed.event_flags_shadow == VRING_PACKED_EVENT_FLAG_DESC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) 		virtio_store_mb(vq->weak_barriers,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) 				&vq->packed.vring.driver->off_wrap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) 				cpu_to_le16(vq->last_used_idx |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) 					(vq->packed.used_wrap_counter <<
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) 					 VRING_PACKED_EVENT_F_WRAP_CTR)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) 	LAST_ADD_TIME_INVALID(vq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) 	END_USE(vq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) static void virtqueue_disable_cb_packed(struct virtqueue *_vq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) 	struct vring_virtqueue *vq = to_vvq(_vq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) 	if (vq->packed.event_flags_shadow != VRING_PACKED_EVENT_FLAG_DISABLE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) 		vq->packed.event_flags_shadow = VRING_PACKED_EVENT_FLAG_DISABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) 		vq->packed.vring.driver->flags =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) 			cpu_to_le16(vq->packed.event_flags_shadow);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) static unsigned virtqueue_enable_cb_prepare_packed(struct virtqueue *_vq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) 	struct vring_virtqueue *vq = to_vvq(_vq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) 	START_USE(vq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) 	 * We optimistically turn back on interrupts, then check if there was
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) 	 * more to do.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) 	if (vq->event) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) 		vq->packed.vring.driver->off_wrap =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) 			cpu_to_le16(vq->last_used_idx |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) 				(vq->packed.used_wrap_counter <<
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) 				 VRING_PACKED_EVENT_F_WRAP_CTR));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) 		 * We need to update event offset and event wrap
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) 		 * counter first before updating event flags.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) 		virtio_wmb(vq->weak_barriers);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) 	if (vq->packed.event_flags_shadow == VRING_PACKED_EVENT_FLAG_DISABLE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) 		vq->packed.event_flags_shadow = vq->event ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) 				VRING_PACKED_EVENT_FLAG_DESC :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) 				VRING_PACKED_EVENT_FLAG_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) 		vq->packed.vring.driver->flags =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) 				cpu_to_le16(vq->packed.event_flags_shadow);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) 	END_USE(vq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) 	return vq->last_used_idx | ((u16)vq->packed.used_wrap_counter <<
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) 			VRING_PACKED_EVENT_F_WRAP_CTR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) static bool virtqueue_poll_packed(struct virtqueue *_vq, u16 off_wrap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) 	struct vring_virtqueue *vq = to_vvq(_vq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) 	bool wrap_counter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) 	u16 used_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) 	wrap_counter = off_wrap >> VRING_PACKED_EVENT_F_WRAP_CTR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) 	used_idx = off_wrap & ~(1 << VRING_PACKED_EVENT_F_WRAP_CTR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) 	return is_used_desc_packed(vq, used_idx, wrap_counter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) static bool virtqueue_enable_cb_delayed_packed(struct virtqueue *_vq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) 	struct vring_virtqueue *vq = to_vvq(_vq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) 	u16 used_idx, wrap_counter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) 	u16 bufs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) 	START_USE(vq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) 	 * We optimistically turn back on interrupts, then check if there was
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) 	 * more to do.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) 	if (vq->event) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) 		/* TODO: tune this threshold */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) 		bufs = (vq->packed.vring.num - vq->vq.num_free) * 3 / 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) 		wrap_counter = vq->packed.used_wrap_counter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) 		used_idx = vq->last_used_idx + bufs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) 		if (used_idx >= vq->packed.vring.num) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) 			used_idx -= vq->packed.vring.num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) 			wrap_counter ^= 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) 		vq->packed.vring.driver->off_wrap = cpu_to_le16(used_idx |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) 			(wrap_counter << VRING_PACKED_EVENT_F_WRAP_CTR));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) 		 * We need to update event offset and event wrap
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) 		 * counter first before updating event flags.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) 		virtio_wmb(vq->weak_barriers);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) 	if (vq->packed.event_flags_shadow == VRING_PACKED_EVENT_FLAG_DISABLE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) 		vq->packed.event_flags_shadow = vq->event ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) 				VRING_PACKED_EVENT_FLAG_DESC :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) 				VRING_PACKED_EVENT_FLAG_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) 		vq->packed.vring.driver->flags =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) 				cpu_to_le16(vq->packed.event_flags_shadow);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) 	 * We need to update event suppression structure first
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) 	 * before re-checking for more used buffers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) 	virtio_mb(vq->weak_barriers);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) 	if (is_used_desc_packed(vq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) 				vq->last_used_idx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) 				vq->packed.used_wrap_counter)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) 		END_USE(vq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) 	END_USE(vq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) 	return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) static void *virtqueue_detach_unused_buf_packed(struct virtqueue *_vq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) 	struct vring_virtqueue *vq = to_vvq(_vq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) 	unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) 	void *buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) 	START_USE(vq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) 	for (i = 0; i < vq->packed.vring.num; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) 		if (!vq->packed.desc_state[i].data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) 		/* detach_buf clears data, so grab it now. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) 		buf = vq->packed.desc_state[i].data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) 		detach_buf_packed(vq, i, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) 		END_USE(vq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) 		return buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) 	/* That should have freed everything. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) 	BUG_ON(vq->vq.num_free != vq->packed.vring.num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) 	END_USE(vq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) 	return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) static struct virtqueue *vring_create_virtqueue_packed(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) 	unsigned int index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) 	unsigned int num,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) 	unsigned int vring_align,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) 	struct virtio_device *vdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) 	bool weak_barriers,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) 	bool may_reduce_num,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) 	bool context,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) 	bool (*notify)(struct virtqueue *),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) 	void (*callback)(struct virtqueue *),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) 	const char *name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) 	struct vring_virtqueue *vq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) 	struct vring_packed_desc *ring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) 	struct vring_packed_desc_event *driver, *device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) 	dma_addr_t ring_dma_addr, driver_event_dma_addr, device_event_dma_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) 	size_t ring_size_in_bytes, event_size_in_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) 	unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) 	ring_size_in_bytes = num * sizeof(struct vring_packed_desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) 	ring = vring_alloc_queue(vdev, ring_size_in_bytes,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) 				 &ring_dma_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) 				 GFP_KERNEL|__GFP_NOWARN|__GFP_ZERO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) 	if (!ring)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) 		goto err_ring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) 	event_size_in_bytes = sizeof(struct vring_packed_desc_event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) 	driver = vring_alloc_queue(vdev, event_size_in_bytes,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) 				   &driver_event_dma_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) 				   GFP_KERNEL|__GFP_NOWARN|__GFP_ZERO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) 	if (!driver)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) 		goto err_driver;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) 	device = vring_alloc_queue(vdev, event_size_in_bytes,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) 				   &device_event_dma_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) 				   GFP_KERNEL|__GFP_NOWARN|__GFP_ZERO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) 	if (!device)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) 		goto err_device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) 	vq = kmalloc(sizeof(*vq), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) 	if (!vq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) 		goto err_vq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) 	vq->vq.callback = callback;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) 	vq->vq.vdev = vdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) 	vq->vq.name = name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) 	vq->vq.num_free = num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) 	vq->vq.index = index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) 	vq->we_own_ring = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) 	vq->notify = notify;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) 	vq->weak_barriers = weak_barriers;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) 	vq->broken = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) 	vq->last_used_idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) 	vq->num_added = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) 	vq->packed_ring = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) 	vq->use_dma_api = vring_use_dma_api(vdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) #ifdef DEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) 	vq->in_use = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) 	vq->last_add_time_valid = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) 	vq->indirect = virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) 		!context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) 	vq->event = virtio_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) 	if (virtio_has_feature(vdev, VIRTIO_F_ORDER_PLATFORM))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) 		vq->weak_barriers = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) 	vq->packed.ring_dma_addr = ring_dma_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) 	vq->packed.driver_event_dma_addr = driver_event_dma_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) 	vq->packed.device_event_dma_addr = device_event_dma_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) 	vq->packed.ring_size_in_bytes = ring_size_in_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) 	vq->packed.event_size_in_bytes = event_size_in_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) 	vq->packed.vring.num = num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) 	vq->packed.vring.desc = ring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) 	vq->packed.vring.driver = driver;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) 	vq->packed.vring.device = device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) 	vq->packed.next_avail_idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) 	vq->packed.avail_wrap_counter = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) 	vq->packed.used_wrap_counter = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) 	vq->packed.event_flags_shadow = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) 	vq->packed.avail_used_flags = 1 << VRING_PACKED_DESC_F_AVAIL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) 	vq->packed.desc_state = kmalloc_array(num,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) 			sizeof(struct vring_desc_state_packed),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) 			GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) 	if (!vq->packed.desc_state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) 		goto err_desc_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) 	memset(vq->packed.desc_state, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) 		num * sizeof(struct vring_desc_state_packed));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) 	/* Put everything in free lists. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) 	vq->free_head = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) 	for (i = 0; i < num-1; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) 		vq->packed.desc_state[i].next = i + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) 	vq->packed.desc_extra = kmalloc_array(num,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) 			sizeof(struct vring_desc_extra_packed),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) 			GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) 	if (!vq->packed.desc_extra)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) 		goto err_desc_extra;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) 	memset(vq->packed.desc_extra, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) 		num * sizeof(struct vring_desc_extra_packed));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) 	/* No callback?  Tell other side not to bother us. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) 	if (!callback) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) 		vq->packed.event_flags_shadow = VRING_PACKED_EVENT_FLAG_DISABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) 		vq->packed.vring.driver->flags =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) 			cpu_to_le16(vq->packed.event_flags_shadow);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) 	list_add_tail(&vq->vq.list, &vdev->vqs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) 	return &vq->vq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) err_desc_extra:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) 	kfree(vq->packed.desc_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) err_desc_state:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) 	kfree(vq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) err_vq:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) 	vring_free_queue(vdev, event_size_in_bytes, device, device_event_dma_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) err_device:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) 	vring_free_queue(vdev, event_size_in_bytes, driver, driver_event_dma_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) err_driver:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) 	vring_free_queue(vdev, ring_size_in_bytes, ring, ring_dma_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) err_ring:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) 	return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700)  * Generic functions and exported symbols.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) static inline int virtqueue_add(struct virtqueue *_vq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) 				struct scatterlist *sgs[],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) 				unsigned int total_sg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) 				unsigned int out_sgs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) 				unsigned int in_sgs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) 				void *data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) 				void *ctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) 				gfp_t gfp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) 	struct vring_virtqueue *vq = to_vvq(_vq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) 	return vq->packed_ring ? virtqueue_add_packed(_vq, sgs, total_sg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) 					out_sgs, in_sgs, data, ctx, gfp) :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) 				 virtqueue_add_split(_vq, sgs, total_sg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) 					out_sgs, in_sgs, data, ctx, gfp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721)  * virtqueue_add_sgs - expose buffers to other end
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722)  * @_vq: the struct virtqueue we're talking about.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723)  * @sgs: array of terminated scatterlists.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724)  * @out_sgs: the number of scatterlists readable by other side
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725)  * @in_sgs: the number of scatterlists which are writable (after readable ones)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726)  * @data: the token identifying the buffer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727)  * @gfp: how to do memory allocations (if necessary).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729)  * Caller must ensure we don't call this with other virtqueue operations
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730)  * at the same time (except where noted).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732)  * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) int virtqueue_add_sgs(struct virtqueue *_vq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) 		      struct scatterlist *sgs[],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) 		      unsigned int out_sgs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) 		      unsigned int in_sgs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) 		      void *data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) 		      gfp_t gfp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) 	unsigned int i, total_sg = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) 	/* Count them first. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) 	for (i = 0; i < out_sgs + in_sgs; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) 		struct scatterlist *sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) 		for (sg = sgs[i]; sg; sg = sg_next(sg))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) 			total_sg++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) 	return virtqueue_add(_vq, sgs, total_sg, out_sgs, in_sgs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) 			     data, NULL, gfp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) EXPORT_SYMBOL_GPL(virtqueue_add_sgs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756)  * virtqueue_add_outbuf - expose output buffers to other end
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757)  * @vq: the struct virtqueue we're talking about.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758)  * @sg: scatterlist (must be well-formed and terminated!)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759)  * @num: the number of entries in @sg readable by other side
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760)  * @data: the token identifying the buffer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761)  * @gfp: how to do memory allocations (if necessary).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763)  * Caller must ensure we don't call this with other virtqueue operations
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764)  * at the same time (except where noted).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766)  * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) int virtqueue_add_outbuf(struct virtqueue *vq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) 			 struct scatterlist *sg, unsigned int num,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) 			 void *data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) 			 gfp_t gfp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) 	return virtqueue_add(vq, &sg, num, 1, 0, data, NULL, gfp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) EXPORT_SYMBOL_GPL(virtqueue_add_outbuf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778)  * virtqueue_add_inbuf - expose input buffers to other end
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779)  * @vq: the struct virtqueue we're talking about.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780)  * @sg: scatterlist (must be well-formed and terminated!)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781)  * @num: the number of entries in @sg writable by other side
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782)  * @data: the token identifying the buffer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783)  * @gfp: how to do memory allocations (if necessary).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785)  * Caller must ensure we don't call this with other virtqueue operations
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786)  * at the same time (except where noted).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788)  * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) int virtqueue_add_inbuf(struct virtqueue *vq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) 			struct scatterlist *sg, unsigned int num,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) 			void *data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) 			gfp_t gfp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) 	return virtqueue_add(vq, &sg, num, 0, 1, data, NULL, gfp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) EXPORT_SYMBOL_GPL(virtqueue_add_inbuf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800)  * virtqueue_add_inbuf_ctx - expose input buffers to other end
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801)  * @vq: the struct virtqueue we're talking about.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802)  * @sg: scatterlist (must be well-formed and terminated!)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803)  * @num: the number of entries in @sg writable by other side
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804)  * @data: the token identifying the buffer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805)  * @ctx: extra context for the token
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806)  * @gfp: how to do memory allocations (if necessary).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808)  * Caller must ensure we don't call this with other virtqueue operations
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809)  * at the same time (except where noted).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811)  * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) int virtqueue_add_inbuf_ctx(struct virtqueue *vq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814) 			struct scatterlist *sg, unsigned int num,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) 			void *data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) 			void *ctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) 			gfp_t gfp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) 	return virtqueue_add(vq, &sg, num, 0, 1, data, ctx, gfp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) EXPORT_SYMBOL_GPL(virtqueue_add_inbuf_ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824)  * virtqueue_kick_prepare - first half of split virtqueue_kick call.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825)  * @_vq: the struct virtqueue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827)  * Instead of virtqueue_kick(), you can do:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828)  *	if (virtqueue_kick_prepare(vq))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829)  *		virtqueue_notify(vq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831)  * This is sometimes useful because the virtqueue_kick_prepare() needs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832)  * to be serialized, but the actual virtqueue_notify() call does not.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) bool virtqueue_kick_prepare(struct virtqueue *_vq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) 	struct vring_virtqueue *vq = to_vvq(_vq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838) 	return vq->packed_ring ? virtqueue_kick_prepare_packed(_vq) :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) 				 virtqueue_kick_prepare_split(_vq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841) EXPORT_SYMBOL_GPL(virtqueue_kick_prepare);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844)  * virtqueue_notify - second half of split virtqueue_kick call.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845)  * @_vq: the struct virtqueue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847)  * This does not need to be serialized.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849)  * Returns false if host notify failed or queue is broken, otherwise true.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) bool virtqueue_notify(struct virtqueue *_vq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853) 	struct vring_virtqueue *vq = to_vvq(_vq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) 	if (unlikely(vq->broken))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) 	/* Prod other side to tell it about changes. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859) 	if (!vq->notify(_vq)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860) 		vq->broken = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863) 	return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865) EXPORT_SYMBOL_GPL(virtqueue_notify);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868)  * virtqueue_kick - update after add_buf
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869)  * @vq: the struct virtqueue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871)  * After one or more virtqueue_add_* calls, invoke this to kick
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872)  * the other side.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874)  * Caller must ensure we don't call this with other virtqueue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875)  * operations at the same time (except where noted).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877)  * Returns false if kick failed, otherwise true.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) bool virtqueue_kick(struct virtqueue *vq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881) 	if (virtqueue_kick_prepare(vq))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) 		return virtqueue_notify(vq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883) 	return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885) EXPORT_SYMBOL_GPL(virtqueue_kick);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888)  * virtqueue_get_buf - get the next used buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889)  * @_vq: the struct virtqueue we're talking about.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890)  * @len: the length written into the buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891)  * @ctx: extra context for the token
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893)  * If the device wrote data into the buffer, @len will be set to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894)  * amount written.  This means you don't need to clear the buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895)  * beforehand to ensure there's no data leakage in the case of short
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896)  * writes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898)  * Caller must ensure we don't call this with other virtqueue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899)  * operations at the same time (except where noted).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901)  * Returns NULL if there are no used buffers, or the "data" token
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902)  * handed to virtqueue_add_*().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904) void *virtqueue_get_buf_ctx(struct virtqueue *_vq, unsigned int *len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) 			    void **ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907) 	struct vring_virtqueue *vq = to_vvq(_vq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909) 	return vq->packed_ring ? virtqueue_get_buf_ctx_packed(_vq, len, ctx) :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910) 				 virtqueue_get_buf_ctx_split(_vq, len, ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912) EXPORT_SYMBOL_GPL(virtqueue_get_buf_ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914) void *virtqueue_get_buf(struct virtqueue *_vq, unsigned int *len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916) 	return virtqueue_get_buf_ctx(_vq, len, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918) EXPORT_SYMBOL_GPL(virtqueue_get_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920)  * virtqueue_disable_cb - disable callbacks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921)  * @_vq: the struct virtqueue we're talking about.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923)  * Note that this is not necessarily synchronous, hence unreliable and only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924)  * useful as an optimization.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926)  * Unlike other operations, this need not be serialized.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928) void virtqueue_disable_cb(struct virtqueue *_vq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930) 	struct vring_virtqueue *vq = to_vvq(_vq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932) 	if (vq->packed_ring)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933) 		virtqueue_disable_cb_packed(_vq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935) 		virtqueue_disable_cb_split(_vq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937) EXPORT_SYMBOL_GPL(virtqueue_disable_cb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940)  * virtqueue_enable_cb_prepare - restart callbacks after disable_cb
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941)  * @_vq: the struct virtqueue we're talking about.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943)  * This re-enables callbacks; it returns current queue state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944)  * in an opaque unsigned value. This value should be later tested by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945)  * virtqueue_poll, to detect a possible race between the driver checking for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946)  * more work, and enabling callbacks.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948)  * Caller must ensure we don't call this with other virtqueue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949)  * operations at the same time (except where noted).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951) unsigned virtqueue_enable_cb_prepare(struct virtqueue *_vq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953) 	struct vring_virtqueue *vq = to_vvq(_vq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955) 	return vq->packed_ring ? virtqueue_enable_cb_prepare_packed(_vq) :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956) 				 virtqueue_enable_cb_prepare_split(_vq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958) EXPORT_SYMBOL_GPL(virtqueue_enable_cb_prepare);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961)  * virtqueue_poll - query pending used buffers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962)  * @_vq: the struct virtqueue we're talking about.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963)  * @last_used_idx: virtqueue state (from call to virtqueue_enable_cb_prepare).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965)  * Returns "true" if there are pending used buffers in the queue.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967)  * This does not need to be serialized.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969) bool virtqueue_poll(struct virtqueue *_vq, unsigned last_used_idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971) 	struct vring_virtqueue *vq = to_vvq(_vq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973) 	if (unlikely(vq->broken))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976) 	virtio_mb(vq->weak_barriers);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977) 	return vq->packed_ring ? virtqueue_poll_packed(_vq, last_used_idx) :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978) 				 virtqueue_poll_split(_vq, last_used_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980) EXPORT_SYMBOL_GPL(virtqueue_poll);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983)  * virtqueue_enable_cb - restart callbacks after disable_cb.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984)  * @_vq: the struct virtqueue we're talking about.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986)  * This re-enables callbacks; it returns "false" if there are pending
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987)  * buffers in the queue, to detect a possible race between the driver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988)  * checking for more work, and enabling callbacks.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990)  * Caller must ensure we don't call this with other virtqueue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991)  * operations at the same time (except where noted).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993) bool virtqueue_enable_cb(struct virtqueue *_vq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995) 	unsigned last_used_idx = virtqueue_enable_cb_prepare(_vq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997) 	return !virtqueue_poll(_vq, last_used_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999) EXPORT_SYMBOL_GPL(virtqueue_enable_cb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002)  * virtqueue_enable_cb_delayed - restart callbacks after disable_cb.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003)  * @_vq: the struct virtqueue we're talking about.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005)  * This re-enables callbacks but hints to the other side to delay
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006)  * interrupts until most of the available buffers have been processed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007)  * it returns "false" if there are many pending buffers in the queue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008)  * to detect a possible race between the driver checking for more work,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009)  * and enabling callbacks.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011)  * Caller must ensure we don't call this with other virtqueue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2012)  * operations at the same time (except where noted).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2013)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2014) bool virtqueue_enable_cb_delayed(struct virtqueue *_vq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2015) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2016) 	struct vring_virtqueue *vq = to_vvq(_vq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2017) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2018) 	return vq->packed_ring ? virtqueue_enable_cb_delayed_packed(_vq) :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2019) 				 virtqueue_enable_cb_delayed_split(_vq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2020) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2021) EXPORT_SYMBOL_GPL(virtqueue_enable_cb_delayed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2022) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2023) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2024)  * virtqueue_detach_unused_buf - detach first unused buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2025)  * @_vq: the struct virtqueue we're talking about.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2026)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2027)  * Returns NULL or the "data" token handed to virtqueue_add_*().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2028)  * This is not valid on an active queue; it is useful only for device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2029)  * shutdown.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2030)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2031) void *virtqueue_detach_unused_buf(struct virtqueue *_vq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2032) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2033) 	struct vring_virtqueue *vq = to_vvq(_vq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2034) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2035) 	return vq->packed_ring ? virtqueue_detach_unused_buf_packed(_vq) :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2036) 				 virtqueue_detach_unused_buf_split(_vq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2037) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2038) EXPORT_SYMBOL_GPL(virtqueue_detach_unused_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2039) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2040) static inline bool more_used(const struct vring_virtqueue *vq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2041) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2042) 	return vq->packed_ring ? more_used_packed(vq) : more_used_split(vq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2043) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2044) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2045) irqreturn_t vring_interrupt(int irq, void *_vq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2046) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2047) 	struct vring_virtqueue *vq = to_vvq(_vq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2048) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2049) 	if (!more_used(vq)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2050) 		pr_debug("virtqueue interrupt with no work for %p\n", vq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2051) 		return IRQ_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2052) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2053) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2054) 	if (unlikely(vq->broken))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2055) 		return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2056) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2057) 	pr_debug("virtqueue callback for %p (%p)\n", vq, vq->vq.callback);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2058) 	if (vq->vq.callback)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2059) 		vq->vq.callback(&vq->vq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2060) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2061) 	return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2062) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2063) EXPORT_SYMBOL_GPL(vring_interrupt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2064) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2065) /* Only available for split ring */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2066) struct virtqueue *__vring_new_virtqueue(unsigned int index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2067) 					struct vring vring,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2068) 					struct virtio_device *vdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2069) 					bool weak_barriers,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2070) 					bool context,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2071) 					bool (*notify)(struct virtqueue *),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2072) 					void (*callback)(struct virtqueue *),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2073) 					const char *name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2074) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2075) 	unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2076) 	struct vring_virtqueue *vq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2077) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2078) 	if (virtio_has_feature(vdev, VIRTIO_F_RING_PACKED))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2079) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2080) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2081) 	vq = kmalloc(sizeof(*vq), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2082) 	if (!vq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2083) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2084) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2085) 	vq->packed_ring = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2086) 	vq->vq.callback = callback;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2087) 	vq->vq.vdev = vdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2088) 	vq->vq.name = name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2089) 	vq->vq.num_free = vring.num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2090) 	vq->vq.index = index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2091) 	vq->we_own_ring = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2092) 	vq->notify = notify;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2093) 	vq->weak_barriers = weak_barriers;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2094) 	vq->broken = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2095) 	vq->last_used_idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2096) 	vq->num_added = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2097) 	vq->use_dma_api = vring_use_dma_api(vdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2098) #ifdef DEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2099) 	vq->in_use = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2100) 	vq->last_add_time_valid = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2101) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2102) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2103) 	vq->indirect = virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2104) 		!context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2105) 	vq->event = virtio_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2106) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2107) 	if (virtio_has_feature(vdev, VIRTIO_F_ORDER_PLATFORM))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2108) 		vq->weak_barriers = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2109) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2110) 	vq->split.queue_dma_addr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2111) 	vq->split.queue_size_in_bytes = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2112) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2113) 	vq->split.vring = vring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2114) 	vq->split.avail_flags_shadow = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2115) 	vq->split.avail_idx_shadow = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2116) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2117) 	/* No callback?  Tell other side not to bother us. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2118) 	if (!callback) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2119) 		vq->split.avail_flags_shadow |= VRING_AVAIL_F_NO_INTERRUPT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2120) 		if (!vq->event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2121) 			vq->split.vring.avail->flags = cpu_to_virtio16(vdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2122) 					vq->split.avail_flags_shadow);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2123) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2124) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2125) 	vq->split.desc_state = kmalloc_array(vring.num,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2126) 			sizeof(struct vring_desc_state_split), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2127) 	if (!vq->split.desc_state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2128) 		kfree(vq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2129) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2130) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2131) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2132) 	/* Put everything in free lists. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2133) 	vq->free_head = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2134) 	for (i = 0; i < vring.num-1; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2135) 		vq->split.vring.desc[i].next = cpu_to_virtio16(vdev, i + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2136) 	memset(vq->split.desc_state, 0, vring.num *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2137) 			sizeof(struct vring_desc_state_split));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2138) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2139) 	list_add_tail(&vq->vq.list, &vdev->vqs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2140) 	return &vq->vq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2141) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2142) EXPORT_SYMBOL_GPL(__vring_new_virtqueue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2143) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2144) struct virtqueue *vring_create_virtqueue(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2145) 	unsigned int index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2146) 	unsigned int num,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2147) 	unsigned int vring_align,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2148) 	struct virtio_device *vdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2149) 	bool weak_barriers,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2150) 	bool may_reduce_num,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2151) 	bool context,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2152) 	bool (*notify)(struct virtqueue *),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2153) 	void (*callback)(struct virtqueue *),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2154) 	const char *name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2155) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2156) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2157) 	if (virtio_has_feature(vdev, VIRTIO_F_RING_PACKED))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2158) 		return vring_create_virtqueue_packed(index, num, vring_align,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2159) 				vdev, weak_barriers, may_reduce_num,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2160) 				context, notify, callback, name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2161) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2162) 	return vring_create_virtqueue_split(index, num, vring_align,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2163) 			vdev, weak_barriers, may_reduce_num,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2164) 			context, notify, callback, name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2165) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2166) EXPORT_SYMBOL_GPL(vring_create_virtqueue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2167) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2168) /* Only available for split ring */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2169) struct virtqueue *vring_new_virtqueue(unsigned int index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2170) 				      unsigned int num,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2171) 				      unsigned int vring_align,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2172) 				      struct virtio_device *vdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2173) 				      bool weak_barriers,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2174) 				      bool context,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2175) 				      void *pages,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2176) 				      bool (*notify)(struct virtqueue *vq),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2177) 				      void (*callback)(struct virtqueue *vq),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2178) 				      const char *name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2179) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2180) 	struct vring vring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2181) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2182) 	if (virtio_has_feature(vdev, VIRTIO_F_RING_PACKED))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2183) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2184) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2185) 	vring_init(&vring, num, pages, vring_align);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2186) 	return __vring_new_virtqueue(index, vring, vdev, weak_barriers, context,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2187) 				     notify, callback, name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2188) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2189) EXPORT_SYMBOL_GPL(vring_new_virtqueue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2190) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2191) void vring_del_virtqueue(struct virtqueue *_vq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2192) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2193) 	struct vring_virtqueue *vq = to_vvq(_vq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2194) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2195) 	if (vq->we_own_ring) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2196) 		if (vq->packed_ring) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2197) 			vring_free_queue(vq->vq.vdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2198) 					 vq->packed.ring_size_in_bytes,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2199) 					 vq->packed.vring.desc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2200) 					 vq->packed.ring_dma_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2201) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2202) 			vring_free_queue(vq->vq.vdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2203) 					 vq->packed.event_size_in_bytes,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2204) 					 vq->packed.vring.driver,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2205) 					 vq->packed.driver_event_dma_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2206) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2207) 			vring_free_queue(vq->vq.vdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2208) 					 vq->packed.event_size_in_bytes,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2209) 					 vq->packed.vring.device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2210) 					 vq->packed.device_event_dma_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2211) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2212) 			kfree(vq->packed.desc_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2213) 			kfree(vq->packed.desc_extra);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2214) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2215) 			vring_free_queue(vq->vq.vdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2216) 					 vq->split.queue_size_in_bytes,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2217) 					 vq->split.vring.desc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2218) 					 vq->split.queue_dma_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2219) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2220) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2221) 	if (!vq->packed_ring)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2222) 		kfree(vq->split.desc_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2223) 	list_del(&_vq->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2224) 	kfree(vq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2225) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2226) EXPORT_SYMBOL_GPL(vring_del_virtqueue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2227) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2228) /* Manipulates transport-specific feature bits. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2229) void vring_transport_features(struct virtio_device *vdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2230) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2231) 	unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2232) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2233) 	for (i = VIRTIO_TRANSPORT_F_START; i < VIRTIO_TRANSPORT_F_END; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2234) 		switch (i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2235) 		case VIRTIO_RING_F_INDIRECT_DESC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2236) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2237) 		case VIRTIO_RING_F_EVENT_IDX:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2238) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2239) 		case VIRTIO_F_VERSION_1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2240) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2241) 		case VIRTIO_F_ACCESS_PLATFORM:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2242) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2243) 		case VIRTIO_F_RING_PACKED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2244) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2245) 		case VIRTIO_F_ORDER_PLATFORM:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2246) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2247) 		default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2248) 			/* We don't understand this bit. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2249) 			__virtio_clear_bit(vdev, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2250) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2251) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2252) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2253) EXPORT_SYMBOL_GPL(vring_transport_features);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2254) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2255) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2256)  * virtqueue_get_vring_size - return the size of the virtqueue's vring
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2257)  * @_vq: the struct virtqueue containing the vring of interest.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2258)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2259)  * Returns the size of the vring.  This is mainly used for boasting to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2260)  * userspace.  Unlike other operations, this need not be serialized.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2261)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2262) unsigned int virtqueue_get_vring_size(struct virtqueue *_vq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2263) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2264) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2265) 	struct vring_virtqueue *vq = to_vvq(_vq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2266) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2267) 	return vq->packed_ring ? vq->packed.vring.num : vq->split.vring.num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2268) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2269) EXPORT_SYMBOL_GPL(virtqueue_get_vring_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2270) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2271) bool virtqueue_is_broken(struct virtqueue *_vq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2272) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2273) 	struct vring_virtqueue *vq = to_vvq(_vq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2274) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2275) 	return READ_ONCE(vq->broken);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2276) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2277) EXPORT_SYMBOL_GPL(virtqueue_is_broken);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2278) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2279) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2280)  * This should prevent the device from being used, allowing drivers to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2281)  * recover.  You may need to grab appropriate locks to flush.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2282)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2283) void virtio_break_device(struct virtio_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2284) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2285) 	struct virtqueue *_vq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2286) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2287) 	list_for_each_entry(_vq, &dev->vqs, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2288) 		struct vring_virtqueue *vq = to_vvq(_vq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2289) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2290) 		/* Pairs with READ_ONCE() in virtqueue_is_broken(). */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2291) 		WRITE_ONCE(vq->broken, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2292) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2293) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2294) EXPORT_SYMBOL_GPL(virtio_break_device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2295) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2296) dma_addr_t virtqueue_get_desc_addr(struct virtqueue *_vq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2297) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2298) 	struct vring_virtqueue *vq = to_vvq(_vq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2299) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2300) 	BUG_ON(!vq->we_own_ring);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2301) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2302) 	if (vq->packed_ring)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2303) 		return vq->packed.ring_dma_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2304) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2305) 	return vq->split.queue_dma_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2306) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2307) EXPORT_SYMBOL_GPL(virtqueue_get_desc_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2308) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2309) dma_addr_t virtqueue_get_avail_addr(struct virtqueue *_vq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2310) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2311) 	struct vring_virtqueue *vq = to_vvq(_vq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2312) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2313) 	BUG_ON(!vq->we_own_ring);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2314) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2315) 	if (vq->packed_ring)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2316) 		return vq->packed.driver_event_dma_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2317) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2318) 	return vq->split.queue_dma_addr +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2319) 		((char *)vq->split.vring.avail - (char *)vq->split.vring.desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2320) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2321) EXPORT_SYMBOL_GPL(virtqueue_get_avail_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2322) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2323) dma_addr_t virtqueue_get_used_addr(struct virtqueue *_vq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2324) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2325) 	struct vring_virtqueue *vq = to_vvq(_vq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2326) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2327) 	BUG_ON(!vq->we_own_ring);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2328) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2329) 	if (vq->packed_ring)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2330) 		return vq->packed.device_event_dma_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2331) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2332) 	return vq->split.queue_dma_addr +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2333) 		((char *)vq->split.vring.used - (char *)vq->split.vring.desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2334) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2335) EXPORT_SYMBOL_GPL(virtqueue_get_used_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2336) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2337) /* Only available for split ring */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2338) const struct vring *virtqueue_get_vring(struct virtqueue *vq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2339) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2340) 	return &to_vvq(vq)->split.vring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2341) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2342) EXPORT_SYMBOL_GPL(virtqueue_get_vring);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2343) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2344) MODULE_LICENSE("GPL");