^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /* Copyright (C) 2009 Red Hat, Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Author: Michael S. Tsirkin <mst@redhat.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * virtio-net server in host kernel.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/compat.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/eventfd.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/vhost.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/virtio_net.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/miscdevice.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/moduleparam.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/mutex.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/workqueue.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/file.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/sched/clock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/sched/signal.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/vmalloc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <linux/net.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <linux/if_packet.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include <linux/if_arp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include <linux/if_tun.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include <linux/if_macvlan.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #include <linux/if_tap.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #include <linux/if_vlan.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #include <linux/skb_array.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #include <linux/skbuff.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #include <net/sock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #include <net/xdp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #include "vhost.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) static int experimental_zcopytx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) module_param(experimental_zcopytx, int, 0444);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) MODULE_PARM_DESC(experimental_zcopytx, "Enable Zero Copy TX;"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) " 1 -Enable; 0 - Disable");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) /* Max number of bytes transferred before requeueing the job.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) * Using this limit prevents one virtqueue from starving others. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) #define VHOST_NET_WEIGHT 0x80000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) /* Max number of packets transferred before requeueing the job.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) * Using this limit prevents one virtqueue from starving others with small
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) * pkts.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) #define VHOST_NET_PKT_WEIGHT 256
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) /* MAX number of TX used buffers for outstanding zerocopy */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) #define VHOST_MAX_PEND 128
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) #define VHOST_GOODCOPY_LEN 256
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) * For transmit, used buffer len is unused; we override it to track buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) * status internally; used for zerocopy tx only.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) /* Lower device DMA failed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) #define VHOST_DMA_FAILED_LEN ((__force __virtio32)3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) /* Lower device DMA done */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) #define VHOST_DMA_DONE_LEN ((__force __virtio32)2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) /* Lower device DMA in progress */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) #define VHOST_DMA_IN_PROGRESS ((__force __virtio32)1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) /* Buffer unused */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) #define VHOST_DMA_CLEAR_LEN ((__force __virtio32)0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) #define VHOST_DMA_IS_DONE(len) ((__force u32)(len) >= (__force u32)VHOST_DMA_DONE_LEN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) enum {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) VHOST_NET_FEATURES = VHOST_FEATURES |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) (1ULL << VHOST_NET_F_VIRTIO_NET_HDR) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) (1ULL << VIRTIO_NET_F_MRG_RXBUF) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) (1ULL << VIRTIO_F_ACCESS_PLATFORM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) enum {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) VHOST_NET_BACKEND_FEATURES = (1ULL << VHOST_BACKEND_F_IOTLB_MSG_V2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) enum {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) VHOST_NET_VQ_RX = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) VHOST_NET_VQ_TX = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) VHOST_NET_VQ_MAX = 2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) struct vhost_net_ubuf_ref {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) /* refcount follows semantics similar to kref:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) * 0: object is released
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) * 1: no outstanding ubufs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) * >1: outstanding ubufs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) atomic_t refcount;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) wait_queue_head_t wait;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) struct vhost_virtqueue *vq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) #define VHOST_NET_BATCH 64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) struct vhost_net_buf {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) void **queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) int tail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) int head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) struct vhost_net_virtqueue {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) struct vhost_virtqueue vq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) size_t vhost_hlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) size_t sock_hlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) /* vhost zerocopy support fields below: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) /* last used idx for outstanding DMA zerocopy buffers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) int upend_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) /* For TX, first used idx for DMA done zerocopy buffers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) * For RX, number of batched heads
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) int done_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) /* Number of XDP frames batched */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) int batched_xdp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) /* an array of userspace buffers info */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) struct ubuf_info *ubuf_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) /* Reference counting for outstanding ubufs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) * Protected by vq mutex. Writers must also take device mutex. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) struct vhost_net_ubuf_ref *ubufs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) struct ptr_ring *rx_ring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) struct vhost_net_buf rxq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) /* Batched XDP buffs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) struct xdp_buff *xdp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) struct vhost_net {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) struct vhost_dev dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) struct vhost_net_virtqueue vqs[VHOST_NET_VQ_MAX];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) struct vhost_poll poll[VHOST_NET_VQ_MAX];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) /* Number of TX recently submitted.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) * Protected by tx vq lock. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) unsigned tx_packets;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) /* Number of times zerocopy TX recently failed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) * Protected by tx vq lock. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) unsigned tx_zcopy_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) /* Flush in progress. Protected by tx vq lock. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) bool tx_flush;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) /* Private page frag */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) struct page_frag page_frag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) /* Refcount bias of page frag */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) int refcnt_bias;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) static unsigned vhost_net_zcopy_mask __read_mostly;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) static void *vhost_net_buf_get_ptr(struct vhost_net_buf *rxq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) if (rxq->tail != rxq->head)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) return rxq->queue[rxq->head];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) static int vhost_net_buf_get_size(struct vhost_net_buf *rxq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) return rxq->tail - rxq->head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) static int vhost_net_buf_is_empty(struct vhost_net_buf *rxq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) return rxq->tail == rxq->head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) static void *vhost_net_buf_consume(struct vhost_net_buf *rxq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) void *ret = vhost_net_buf_get_ptr(rxq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) ++rxq->head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) static int vhost_net_buf_produce(struct vhost_net_virtqueue *nvq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) struct vhost_net_buf *rxq = &nvq->rxq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) rxq->head = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) rxq->tail = ptr_ring_consume_batched(nvq->rx_ring, rxq->queue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) VHOST_NET_BATCH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) return rxq->tail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) static void vhost_net_buf_unproduce(struct vhost_net_virtqueue *nvq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) struct vhost_net_buf *rxq = &nvq->rxq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) if (nvq->rx_ring && !vhost_net_buf_is_empty(rxq)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) ptr_ring_unconsume(nvq->rx_ring, rxq->queue + rxq->head,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) vhost_net_buf_get_size(rxq),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) tun_ptr_free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) rxq->head = rxq->tail = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) static int vhost_net_buf_peek_len(void *ptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) if (tun_is_xdp_frame(ptr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) struct xdp_frame *xdpf = tun_ptr_to_xdp(ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) return xdpf->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) return __skb_array_len_with_tag(ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) static int vhost_net_buf_peek(struct vhost_net_virtqueue *nvq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) struct vhost_net_buf *rxq = &nvq->rxq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) if (!vhost_net_buf_is_empty(rxq))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) if (!vhost_net_buf_produce(nvq))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) return vhost_net_buf_peek_len(vhost_net_buf_get_ptr(rxq));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) static void vhost_net_buf_init(struct vhost_net_buf *rxq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) rxq->head = rxq->tail = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) static void vhost_net_enable_zcopy(int vq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) vhost_net_zcopy_mask |= 0x1 << vq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) static struct vhost_net_ubuf_ref *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) vhost_net_ubuf_alloc(struct vhost_virtqueue *vq, bool zcopy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) struct vhost_net_ubuf_ref *ubufs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) /* No zero copy backend? Nothing to count. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) if (!zcopy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) ubufs = kmalloc(sizeof(*ubufs), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) if (!ubufs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) atomic_set(&ubufs->refcount, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) init_waitqueue_head(&ubufs->wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) ubufs->vq = vq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) return ubufs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) static int vhost_net_ubuf_put(struct vhost_net_ubuf_ref *ubufs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) int r = atomic_sub_return(1, &ubufs->refcount);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) if (unlikely(!r))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) wake_up(&ubufs->wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) static void vhost_net_ubuf_put_and_wait(struct vhost_net_ubuf_ref *ubufs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) vhost_net_ubuf_put(ubufs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) wait_event(ubufs->wait, !atomic_read(&ubufs->refcount));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) static void vhost_net_ubuf_put_wait_and_free(struct vhost_net_ubuf_ref *ubufs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) vhost_net_ubuf_put_and_wait(ubufs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) kfree(ubufs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) static void vhost_net_clear_ubuf_info(struct vhost_net *n)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) for (i = 0; i < VHOST_NET_VQ_MAX; ++i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) kfree(n->vqs[i].ubuf_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) n->vqs[i].ubuf_info = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) static int vhost_net_set_ubuf_info(struct vhost_net *n)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) bool zcopy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) for (i = 0; i < VHOST_NET_VQ_MAX; ++i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) zcopy = vhost_net_zcopy_mask & (0x1 << i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) if (!zcopy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) n->vqs[i].ubuf_info =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) kmalloc_array(UIO_MAXIOV,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) sizeof(*n->vqs[i].ubuf_info),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) if (!n->vqs[i].ubuf_info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) vhost_net_clear_ubuf_info(n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) static void vhost_net_vq_reset(struct vhost_net *n)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) vhost_net_clear_ubuf_info(n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) for (i = 0; i < VHOST_NET_VQ_MAX; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) n->vqs[i].done_idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) n->vqs[i].upend_idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) n->vqs[i].ubufs = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) n->vqs[i].vhost_hlen = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) n->vqs[i].sock_hlen = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) vhost_net_buf_init(&n->vqs[i].rxq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) static void vhost_net_tx_packet(struct vhost_net *net)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) ++net->tx_packets;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) if (net->tx_packets < 1024)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) net->tx_packets = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) net->tx_zcopy_err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) static void vhost_net_tx_err(struct vhost_net *net)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) ++net->tx_zcopy_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) static bool vhost_net_tx_select_zcopy(struct vhost_net *net)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) /* TX flush waits for outstanding DMAs to be done.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) * Don't start new DMAs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) return !net->tx_flush &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) net->tx_packets / 64 >= net->tx_zcopy_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) static bool vhost_sock_zcopy(struct socket *sock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) return unlikely(experimental_zcopytx) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) sock_flag(sock->sk, SOCK_ZEROCOPY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) static bool vhost_sock_xdp(struct socket *sock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) return sock_flag(sock->sk, SOCK_XDP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) /* In case of DMA done not in order in lower device driver for some reason.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) * upend_idx is used to track end of used idx, done_idx is used to track head
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) * of used idx. Once lower device DMA done contiguously, we will signal KVM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) * guest used idx.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) static void vhost_zerocopy_signal_used(struct vhost_net *net,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) struct vhost_virtqueue *vq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) struct vhost_net_virtqueue *nvq =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) container_of(vq, struct vhost_net_virtqueue, vq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) int i, add;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) int j = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) for (i = nvq->done_idx; i != nvq->upend_idx; i = (i + 1) % UIO_MAXIOV) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) if (vq->heads[i].len == VHOST_DMA_FAILED_LEN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) vhost_net_tx_err(net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) if (VHOST_DMA_IS_DONE(vq->heads[i].len)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) vq->heads[i].len = VHOST_DMA_CLEAR_LEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) ++j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) while (j) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) add = min(UIO_MAXIOV - nvq->done_idx, j);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) vhost_add_used_and_signal_n(vq->dev, vq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) &vq->heads[nvq->done_idx], add);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) nvq->done_idx = (nvq->done_idx + add) % UIO_MAXIOV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) j -= add;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) static void vhost_zerocopy_callback(struct ubuf_info *ubuf, bool success)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) struct vhost_net_ubuf_ref *ubufs = ubuf->ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) struct vhost_virtqueue *vq = ubufs->vq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) int cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) rcu_read_lock_bh();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) /* set len to mark this desc buffers done DMA */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) vq->heads[ubuf->desc].len = success ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) VHOST_DMA_DONE_LEN : VHOST_DMA_FAILED_LEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) cnt = vhost_net_ubuf_put(ubufs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) * Trigger polling thread if guest stopped submitting new buffers:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) * in this case, the refcount after decrement will eventually reach 1.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) * We also trigger polling periodically after each 16 packets
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) * (the value 16 here is more or less arbitrary, it's tuned to trigger
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) * less than 10% of times).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) if (cnt <= 1 || !(cnt % 16))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) vhost_poll_queue(&vq->poll);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) rcu_read_unlock_bh();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) static inline unsigned long busy_clock(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) return local_clock() >> 10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) static bool vhost_can_busy_poll(unsigned long endtime)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) return likely(!need_resched() && !time_after(busy_clock(), endtime) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) !signal_pending(current));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) static void vhost_net_disable_vq(struct vhost_net *n,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) struct vhost_virtqueue *vq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) struct vhost_net_virtqueue *nvq =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) container_of(vq, struct vhost_net_virtqueue, vq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) struct vhost_poll *poll = n->poll + (nvq - n->vqs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) if (!vhost_vq_get_backend(vq))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) vhost_poll_stop(poll);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) static int vhost_net_enable_vq(struct vhost_net *n,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) struct vhost_virtqueue *vq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) struct vhost_net_virtqueue *nvq =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) container_of(vq, struct vhost_net_virtqueue, vq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) struct vhost_poll *poll = n->poll + (nvq - n->vqs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) struct socket *sock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) sock = vhost_vq_get_backend(vq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) if (!sock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) return vhost_poll_start(poll, sock->file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) static void vhost_net_signal_used(struct vhost_net_virtqueue *nvq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) struct vhost_virtqueue *vq = &nvq->vq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) struct vhost_dev *dev = vq->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) if (!nvq->done_idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) vhost_add_used_and_signal_n(dev, vq, vq->heads, nvq->done_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) nvq->done_idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) static void vhost_tx_batch(struct vhost_net *net,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) struct vhost_net_virtqueue *nvq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) struct socket *sock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) struct msghdr *msghdr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) struct tun_msg_ctl ctl = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) .type = TUN_MSG_PTR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) .num = nvq->batched_xdp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) .ptr = nvq->xdp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) int i, err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) if (nvq->batched_xdp == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) goto signal_used;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) msghdr->msg_control = &ctl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) err = sock->ops->sendmsg(sock, msghdr, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) if (unlikely(err < 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) vq_err(&nvq->vq, "Fail to batch sending packets\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) /* free pages owned by XDP; since this is an unlikely error path,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) * keep it simple and avoid more complex bulk update for the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) * used pages
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) for (i = 0; i < nvq->batched_xdp; ++i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) put_page(virt_to_head_page(nvq->xdp[i].data));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) nvq->batched_xdp = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) nvq->done_idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) signal_used:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) vhost_net_signal_used(nvq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) nvq->batched_xdp = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) static int sock_has_rx_data(struct socket *sock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) if (unlikely(!sock))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) if (sock->ops->peek_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) return sock->ops->peek_len(sock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) return skb_queue_empty(&sock->sk->sk_receive_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) static void vhost_net_busy_poll_try_queue(struct vhost_net *net,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) struct vhost_virtqueue *vq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) if (!vhost_vq_avail_empty(&net->dev, vq)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) vhost_poll_queue(&vq->poll);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) } else if (unlikely(vhost_enable_notify(&net->dev, vq))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) vhost_disable_notify(&net->dev, vq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) vhost_poll_queue(&vq->poll);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) static void vhost_net_busy_poll(struct vhost_net *net,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) struct vhost_virtqueue *rvq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) struct vhost_virtqueue *tvq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) bool *busyloop_intr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) bool poll_rx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) unsigned long busyloop_timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) unsigned long endtime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) struct socket *sock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) struct vhost_virtqueue *vq = poll_rx ? tvq : rvq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) /* Try to hold the vq mutex of the paired virtqueue. We can't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) * use mutex_lock() here since we could not guarantee a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) * consistenet lock ordering.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) if (!mutex_trylock(&vq->mutex))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) vhost_disable_notify(&net->dev, vq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) sock = vhost_vq_get_backend(rvq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) busyloop_timeout = poll_rx ? rvq->busyloop_timeout:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) tvq->busyloop_timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) preempt_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) endtime = busy_clock() + busyloop_timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) while (vhost_can_busy_poll(endtime)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) if (vhost_has_work(&net->dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) *busyloop_intr = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) if ((sock_has_rx_data(sock) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) !vhost_vq_avail_empty(&net->dev, rvq)) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) !vhost_vq_avail_empty(&net->dev, tvq))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) cpu_relax();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) preempt_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) if (poll_rx || sock_has_rx_data(sock))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) vhost_net_busy_poll_try_queue(net, vq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) else if (!poll_rx) /* On tx here, sock has no rx data. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) vhost_enable_notify(&net->dev, rvq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) mutex_unlock(&vq->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) static int vhost_net_tx_get_vq_desc(struct vhost_net *net,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) struct vhost_net_virtqueue *tnvq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) unsigned int *out_num, unsigned int *in_num,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) struct msghdr *msghdr, bool *busyloop_intr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) struct vhost_net_virtqueue *rnvq = &net->vqs[VHOST_NET_VQ_RX];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) struct vhost_virtqueue *rvq = &rnvq->vq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) struct vhost_virtqueue *tvq = &tnvq->vq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) int r = vhost_get_vq_desc(tvq, tvq->iov, ARRAY_SIZE(tvq->iov),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) out_num, in_num, NULL, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) if (r == tvq->num && tvq->busyloop_timeout) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) /* Flush batched packets first */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) if (!vhost_sock_zcopy(vhost_vq_get_backend(tvq)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) vhost_tx_batch(net, tnvq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) vhost_vq_get_backend(tvq),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) msghdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) vhost_net_busy_poll(net, rvq, tvq, busyloop_intr, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) r = vhost_get_vq_desc(tvq, tvq->iov, ARRAY_SIZE(tvq->iov),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) out_num, in_num, NULL, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) static bool vhost_exceeds_maxpend(struct vhost_net *net)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) struct vhost_net_virtqueue *nvq = &net->vqs[VHOST_NET_VQ_TX];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) struct vhost_virtqueue *vq = &nvq->vq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) return (nvq->upend_idx + UIO_MAXIOV - nvq->done_idx) % UIO_MAXIOV >
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) min_t(unsigned int, VHOST_MAX_PEND, vq->num >> 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) static size_t init_iov_iter(struct vhost_virtqueue *vq, struct iov_iter *iter,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) size_t hdr_size, int out)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) /* Skip header. TODO: support TSO. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) size_t len = iov_length(vq->iov, out);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) iov_iter_init(iter, WRITE, vq->iov, out, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) iov_iter_advance(iter, hdr_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) return iov_iter_count(iter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) static int get_tx_bufs(struct vhost_net *net,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) struct vhost_net_virtqueue *nvq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) struct msghdr *msg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) unsigned int *out, unsigned int *in,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) size_t *len, bool *busyloop_intr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) struct vhost_virtqueue *vq = &nvq->vq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) ret = vhost_net_tx_get_vq_desc(net, nvq, out, in, msg, busyloop_intr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) if (ret < 0 || ret == vq->num)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) if (*in) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) vq_err(vq, "Unexpected descriptor format for TX: out %d, int %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) *out, *in);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) /* Sanity check */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) *len = init_iov_iter(vq, &msg->msg_iter, nvq->vhost_hlen, *out);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) if (*len == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) vq_err(vq, "Unexpected header len for TX: %zd expected %zd\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) *len, nvq->vhost_hlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) static bool tx_can_batch(struct vhost_virtqueue *vq, size_t total_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) return total_len < VHOST_NET_WEIGHT &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) !vhost_vq_avail_empty(vq->dev, vq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) #define SKB_FRAG_PAGE_ORDER get_order(32768)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) static bool vhost_net_page_frag_refill(struct vhost_net *net, unsigned int sz,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) struct page_frag *pfrag, gfp_t gfp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) if (pfrag->page) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) if (pfrag->offset + sz <= pfrag->size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) __page_frag_cache_drain(pfrag->page, net->refcnt_bias);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) pfrag->offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) net->refcnt_bias = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) if (SKB_FRAG_PAGE_ORDER) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) /* Avoid direct reclaim but allow kswapd to wake */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) pfrag->page = alloc_pages((gfp & ~__GFP_DIRECT_RECLAIM) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) __GFP_COMP | __GFP_NOWARN |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) __GFP_NORETRY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) SKB_FRAG_PAGE_ORDER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) if (likely(pfrag->page)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) pfrag->size = PAGE_SIZE << SKB_FRAG_PAGE_ORDER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) pfrag->page = alloc_page(gfp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) if (likely(pfrag->page)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) pfrag->size = PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) net->refcnt_bias = USHRT_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) page_ref_add(pfrag->page, USHRT_MAX - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) #define VHOST_NET_RX_PAD (NET_IP_ALIGN + NET_SKB_PAD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) static int vhost_net_build_xdp(struct vhost_net_virtqueue *nvq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) struct iov_iter *from)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) struct vhost_virtqueue *vq = &nvq->vq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) struct vhost_net *net = container_of(vq->dev, struct vhost_net,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) struct socket *sock = vhost_vq_get_backend(vq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) struct page_frag *alloc_frag = &net->page_frag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) struct virtio_net_hdr *gso;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) struct xdp_buff *xdp = &nvq->xdp[nvq->batched_xdp];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) struct tun_xdp_hdr *hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) size_t len = iov_iter_count(from);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) int headroom = vhost_sock_xdp(sock) ? XDP_PACKET_HEADROOM : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) int buflen = SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) int pad = SKB_DATA_ALIGN(VHOST_NET_RX_PAD + headroom + nvq->sock_hlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) int sock_hlen = nvq->sock_hlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) void *buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) int copied;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) if (unlikely(len < nvq->sock_hlen))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) if (SKB_DATA_ALIGN(len + pad) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) > PAGE_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) return -ENOSPC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) buflen += SKB_DATA_ALIGN(len + pad);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) alloc_frag->offset = ALIGN((u64)alloc_frag->offset, SMP_CACHE_BYTES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) if (unlikely(!vhost_net_page_frag_refill(net, buflen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) alloc_frag, GFP_KERNEL)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) buf = (char *)page_address(alloc_frag->page) + alloc_frag->offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) copied = copy_page_from_iter(alloc_frag->page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) alloc_frag->offset +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) offsetof(struct tun_xdp_hdr, gso),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) sock_hlen, from);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) if (copied != sock_hlen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) hdr = buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) gso = &hdr->gso;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) if ((gso->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) vhost16_to_cpu(vq, gso->csum_start) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) vhost16_to_cpu(vq, gso->csum_offset) + 2 >
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) vhost16_to_cpu(vq, gso->hdr_len)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) gso->hdr_len = cpu_to_vhost16(vq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) vhost16_to_cpu(vq, gso->csum_start) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) vhost16_to_cpu(vq, gso->csum_offset) + 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) if (vhost16_to_cpu(vq, gso->hdr_len) > len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) len -= sock_hlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) copied = copy_page_from_iter(alloc_frag->page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) alloc_frag->offset + pad,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) len, from);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) if (copied != len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) xdp->data_hard_start = buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) xdp->data = buf + pad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) xdp->data_end = xdp->data + len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) hdr->buflen = buflen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) xdp->frame_sz = buflen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) --net->refcnt_bias;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) alloc_frag->offset += buflen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) ++nvq->batched_xdp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) static void handle_tx_copy(struct vhost_net *net, struct socket *sock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) struct vhost_net_virtqueue *nvq = &net->vqs[VHOST_NET_VQ_TX];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) struct vhost_virtqueue *vq = &nvq->vq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) unsigned out, in;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) int head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) struct msghdr msg = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) .msg_name = NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) .msg_namelen = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) .msg_control = NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) .msg_controllen = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) .msg_flags = MSG_DONTWAIT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) size_t len, total_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) int sent_pkts = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) bool sock_can_batch = (sock->sk->sk_sndbuf == INT_MAX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) bool busyloop_intr = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) if (nvq->done_idx == VHOST_NET_BATCH)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) vhost_tx_batch(net, nvq, sock, &msg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) head = get_tx_bufs(net, nvq, &msg, &out, &in, &len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) &busyloop_intr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) /* On error, stop handling until the next kick. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) if (unlikely(head < 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) /* Nothing new? Wait for eventfd to tell us they refilled. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) if (head == vq->num) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) if (unlikely(busyloop_intr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) vhost_poll_queue(&vq->poll);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) } else if (unlikely(vhost_enable_notify(&net->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) vq))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) vhost_disable_notify(&net->dev, vq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) total_len += len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) /* For simplicity, TX batching is only enabled if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) * sndbuf is unlimited.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) if (sock_can_batch) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) err = vhost_net_build_xdp(nvq, &msg.msg_iter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) if (!err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) } else if (unlikely(err != -ENOSPC)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) vhost_tx_batch(net, nvq, sock, &msg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) vhost_discard_vq_desc(vq, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) vhost_net_enable_vq(net, vq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) /* We can't build XDP buff, go for single
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) * packet path but let's flush batched
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) * packets.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) vhost_tx_batch(net, nvq, sock, &msg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) msg.msg_control = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) if (tx_can_batch(vq, total_len))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) msg.msg_flags |= MSG_MORE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) msg.msg_flags &= ~MSG_MORE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) /* TODO: Check specific error and bomb out unless ENOBUFS? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) err = sock->ops->sendmsg(sock, &msg, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) if (unlikely(err < 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) vhost_discard_vq_desc(vq, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) vhost_net_enable_vq(net, vq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) if (err != len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) pr_debug("Truncated TX packet: len %d != %zd\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) err, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) vq->heads[nvq->done_idx].id = cpu_to_vhost32(vq, head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) vq->heads[nvq->done_idx].len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) ++nvq->done_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) } while (likely(!vhost_exceeds_weight(vq, ++sent_pkts, total_len)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) vhost_tx_batch(net, nvq, sock, &msg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) static void handle_tx_zerocopy(struct vhost_net *net, struct socket *sock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) struct vhost_net_virtqueue *nvq = &net->vqs[VHOST_NET_VQ_TX];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) struct vhost_virtqueue *vq = &nvq->vq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) unsigned out, in;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) int head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) struct msghdr msg = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) .msg_name = NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) .msg_namelen = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) .msg_control = NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) .msg_controllen = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) .msg_flags = MSG_DONTWAIT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) struct tun_msg_ctl ctl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) size_t len, total_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) struct vhost_net_ubuf_ref *ubufs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) struct ubuf_info *ubuf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) bool zcopy_used;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) int sent_pkts = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) bool busyloop_intr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) /* Release DMAs done buffers first */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) vhost_zerocopy_signal_used(net, vq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) busyloop_intr = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) head = get_tx_bufs(net, nvq, &msg, &out, &in, &len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) &busyloop_intr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) /* On error, stop handling until the next kick. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) if (unlikely(head < 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) /* Nothing new? Wait for eventfd to tell us they refilled. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) if (head == vq->num) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) if (unlikely(busyloop_intr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) vhost_poll_queue(&vq->poll);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) } else if (unlikely(vhost_enable_notify(&net->dev, vq))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) vhost_disable_notify(&net->dev, vq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) zcopy_used = len >= VHOST_GOODCOPY_LEN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) && !vhost_exceeds_maxpend(net)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) && vhost_net_tx_select_zcopy(net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) /* use msg_control to pass vhost zerocopy ubuf info to skb */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) if (zcopy_used) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) ubuf = nvq->ubuf_info + nvq->upend_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) vq->heads[nvq->upend_idx].id = cpu_to_vhost32(vq, head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) vq->heads[nvq->upend_idx].len = VHOST_DMA_IN_PROGRESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) ubuf->callback = vhost_zerocopy_callback;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) ubuf->ctx = nvq->ubufs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) ubuf->desc = nvq->upend_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) refcount_set(&ubuf->refcnt, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) msg.msg_control = &ctl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) ctl.type = TUN_MSG_UBUF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) ctl.ptr = ubuf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) msg.msg_controllen = sizeof(ctl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) ubufs = nvq->ubufs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) atomic_inc(&ubufs->refcount);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) nvq->upend_idx = (nvq->upend_idx + 1) % UIO_MAXIOV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) msg.msg_control = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) ubufs = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) total_len += len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) if (tx_can_batch(vq, total_len) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) likely(!vhost_exceeds_maxpend(net))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) msg.msg_flags |= MSG_MORE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) msg.msg_flags &= ~MSG_MORE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) /* TODO: Check specific error and bomb out unless ENOBUFS? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) err = sock->ops->sendmsg(sock, &msg, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) if (unlikely(err < 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) if (zcopy_used) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) if (vq->heads[ubuf->desc].len == VHOST_DMA_IN_PROGRESS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) vhost_net_ubuf_put(ubufs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) nvq->upend_idx = ((unsigned)nvq->upend_idx - 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) % UIO_MAXIOV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) vhost_discard_vq_desc(vq, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) vhost_net_enable_vq(net, vq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) if (err != len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) pr_debug("Truncated TX packet: "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) " len %d != %zd\n", err, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) if (!zcopy_used)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) vhost_add_used_and_signal(&net->dev, vq, head, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) vhost_zerocopy_signal_used(net, vq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) vhost_net_tx_packet(net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) } while (likely(!vhost_exceeds_weight(vq, ++sent_pkts, total_len)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) /* Expects to be always run from workqueue - which acts as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) * read-size critical section for our kind of RCU. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) static void handle_tx(struct vhost_net *net)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) struct vhost_net_virtqueue *nvq = &net->vqs[VHOST_NET_VQ_TX];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) struct vhost_virtqueue *vq = &nvq->vq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) struct socket *sock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) mutex_lock_nested(&vq->mutex, VHOST_NET_VQ_TX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) sock = vhost_vq_get_backend(vq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) if (!sock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) if (!vq_meta_prefetch(vq))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) vhost_disable_notify(&net->dev, vq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) vhost_net_disable_vq(net, vq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) if (vhost_sock_zcopy(sock))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) handle_tx_zerocopy(net, sock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) handle_tx_copy(net, sock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) mutex_unlock(&vq->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) static int peek_head_len(struct vhost_net_virtqueue *rvq, struct sock *sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) struct sk_buff *head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) int len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) if (rvq->rx_ring)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) return vhost_net_buf_peek(rvq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) spin_lock_irqsave(&sk->sk_receive_queue.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) head = skb_peek(&sk->sk_receive_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) if (likely(head)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) len = head->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) if (skb_vlan_tag_present(head))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) len += VLAN_HLEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) spin_unlock_irqrestore(&sk->sk_receive_queue.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) return len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) static int vhost_net_rx_peek_head_len(struct vhost_net *net, struct sock *sk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) bool *busyloop_intr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) struct vhost_net_virtqueue *rnvq = &net->vqs[VHOST_NET_VQ_RX];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) struct vhost_net_virtqueue *tnvq = &net->vqs[VHOST_NET_VQ_TX];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) struct vhost_virtqueue *rvq = &rnvq->vq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) struct vhost_virtqueue *tvq = &tnvq->vq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) int len = peek_head_len(rnvq, sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) if (!len && rvq->busyloop_timeout) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) /* Flush batched heads first */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) vhost_net_signal_used(rnvq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) /* Both tx vq and rx socket were polled here */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) vhost_net_busy_poll(net, rvq, tvq, busyloop_intr, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) len = peek_head_len(rnvq, sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) return len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) /* This is a multi-buffer version of vhost_get_desc, that works if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) * vq has read descriptors only.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) * @vq - the relevant virtqueue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) * @datalen - data length we'll be reading
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) * @iovcount - returned count of io vectors we fill
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) * @log - vhost log
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) * @log_num - log offset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) * @quota - headcount quota, 1 for big buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) * returns number of buffer heads allocated, negative on error
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) static int get_rx_bufs(struct vhost_virtqueue *vq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) struct vring_used_elem *heads,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) int datalen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) unsigned *iovcount,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) struct vhost_log *log,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) unsigned *log_num,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) unsigned int quota)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) unsigned int out, in;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) int seg = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) int headcount = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) unsigned d;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) int r, nlogs = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) /* len is always initialized before use since we are always called with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) * datalen > 0.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) u32 len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) while (datalen > 0 && headcount < quota) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) if (unlikely(seg >= UIO_MAXIOV)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) r = -ENOBUFS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) r = vhost_get_vq_desc(vq, vq->iov + seg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) ARRAY_SIZE(vq->iov) - seg, &out,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) &in, log, log_num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) if (unlikely(r < 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) d = r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) if (d == vq->num) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) r = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) if (unlikely(out || in <= 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) vq_err(vq, "unexpected descriptor format for RX: "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) "out %d, in %d\n", out, in);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) r = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) if (unlikely(log)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) nlogs += *log_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) log += *log_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) heads[headcount].id = cpu_to_vhost32(vq, d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) len = iov_length(vq->iov + seg, in);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) heads[headcount].len = cpu_to_vhost32(vq, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) datalen -= len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) ++headcount;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) seg += in;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) heads[headcount - 1].len = cpu_to_vhost32(vq, len + datalen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) *iovcount = seg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) if (unlikely(log))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) *log_num = nlogs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) /* Detect overrun */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) if (unlikely(datalen > 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) r = UIO_MAXIOV + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) return headcount;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) vhost_discard_vq_desc(vq, headcount);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) /* Expects to be always run from workqueue - which acts as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) * read-size critical section for our kind of RCU. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) static void handle_rx(struct vhost_net *net)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) struct vhost_net_virtqueue *nvq = &net->vqs[VHOST_NET_VQ_RX];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) struct vhost_virtqueue *vq = &nvq->vq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) unsigned in, log;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) struct vhost_log *vq_log;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) struct msghdr msg = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) .msg_name = NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) .msg_namelen = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) .msg_control = NULL, /* FIXME: get and handle RX aux data. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) .msg_controllen = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) .msg_flags = MSG_DONTWAIT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) struct virtio_net_hdr hdr = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) .flags = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) .gso_type = VIRTIO_NET_HDR_GSO_NONE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) size_t total_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) int err, mergeable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) s16 headcount;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) size_t vhost_hlen, sock_hlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) size_t vhost_len, sock_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) bool busyloop_intr = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) struct socket *sock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) struct iov_iter fixup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) __virtio16 num_buffers;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) int recv_pkts = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) mutex_lock_nested(&vq->mutex, VHOST_NET_VQ_RX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) sock = vhost_vq_get_backend(vq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) if (!sock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) if (!vq_meta_prefetch(vq))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) vhost_disable_notify(&net->dev, vq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) vhost_net_disable_vq(net, vq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) vhost_hlen = nvq->vhost_hlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) sock_hlen = nvq->sock_hlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) vq_log = unlikely(vhost_has_feature(vq, VHOST_F_LOG_ALL)) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) vq->log : NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) mergeable = vhost_has_feature(vq, VIRTIO_NET_F_MRG_RXBUF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) sock_len = vhost_net_rx_peek_head_len(net, sock->sk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) &busyloop_intr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) if (!sock_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) sock_len += sock_hlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) vhost_len = sock_len + vhost_hlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) headcount = get_rx_bufs(vq, vq->heads + nvq->done_idx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) vhost_len, &in, vq_log, &log,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) likely(mergeable) ? UIO_MAXIOV : 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) /* On error, stop handling until the next kick. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) if (unlikely(headcount < 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) /* OK, now we need to know about added descriptors. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) if (!headcount) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) if (unlikely(busyloop_intr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) vhost_poll_queue(&vq->poll);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) } else if (unlikely(vhost_enable_notify(&net->dev, vq))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) /* They have slipped one in as we were
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) * doing that: check again. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) vhost_disable_notify(&net->dev, vq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) /* Nothing new? Wait for eventfd to tell us
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) * they refilled. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) busyloop_intr = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) if (nvq->rx_ring)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) msg.msg_control = vhost_net_buf_consume(&nvq->rxq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) /* On overrun, truncate and discard */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) if (unlikely(headcount > UIO_MAXIOV)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) iov_iter_init(&msg.msg_iter, READ, vq->iov, 1, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) err = sock->ops->recvmsg(sock, &msg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) 1, MSG_DONTWAIT | MSG_TRUNC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) pr_debug("Discarded rx packet: len %zd\n", sock_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) /* We don't need to be notified again. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) iov_iter_init(&msg.msg_iter, READ, vq->iov, in, vhost_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) fixup = msg.msg_iter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) if (unlikely((vhost_hlen))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) /* We will supply the header ourselves
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) * TODO: support TSO.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) iov_iter_advance(&msg.msg_iter, vhost_hlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) err = sock->ops->recvmsg(sock, &msg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) sock_len, MSG_DONTWAIT | MSG_TRUNC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) /* Userspace might have consumed the packet meanwhile:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) * it's not supposed to do this usually, but might be hard
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) * to prevent. Discard data we got (if any) and keep going. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) if (unlikely(err != sock_len)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) pr_debug("Discarded rx packet: "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) " len %d, expected %zd\n", err, sock_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) vhost_discard_vq_desc(vq, headcount);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) /* Supply virtio_net_hdr if VHOST_NET_F_VIRTIO_NET_HDR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) if (unlikely(vhost_hlen)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) if (copy_to_iter(&hdr, sizeof(hdr),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) &fixup) != sizeof(hdr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) vq_err(vq, "Unable to write vnet_hdr "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) "at addr %p\n", vq->iov->iov_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) /* Header came from socket; we'll need to patch
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) * ->num_buffers over if VIRTIO_NET_F_MRG_RXBUF
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) iov_iter_advance(&fixup, sizeof(hdr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) /* TODO: Should check and handle checksum. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) num_buffers = cpu_to_vhost16(vq, headcount);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) if (likely(mergeable) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) copy_to_iter(&num_buffers, sizeof num_buffers,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) &fixup) != sizeof num_buffers) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) vq_err(vq, "Failed num_buffers write");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) vhost_discard_vq_desc(vq, headcount);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) nvq->done_idx += headcount;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) if (nvq->done_idx > VHOST_NET_BATCH)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) vhost_net_signal_used(nvq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) if (unlikely(vq_log))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) vhost_log_write(vq, vq_log, log, vhost_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) vq->iov, in);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) total_len += vhost_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) } while (likely(!vhost_exceeds_weight(vq, ++recv_pkts, total_len)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) if (unlikely(busyloop_intr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) vhost_poll_queue(&vq->poll);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) else if (!sock_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) vhost_net_enable_vq(net, vq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) vhost_net_signal_used(nvq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) mutex_unlock(&vq->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) static void handle_tx_kick(struct vhost_work *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) poll.work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) struct vhost_net *net = container_of(vq->dev, struct vhost_net, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) handle_tx(net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) static void handle_rx_kick(struct vhost_work *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) poll.work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) struct vhost_net *net = container_of(vq->dev, struct vhost_net, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) handle_rx(net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) static void handle_tx_net(struct vhost_work *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) struct vhost_net *net = container_of(work, struct vhost_net,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) poll[VHOST_NET_VQ_TX].work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) handle_tx(net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) static void handle_rx_net(struct vhost_work *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) struct vhost_net *net = container_of(work, struct vhost_net,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) poll[VHOST_NET_VQ_RX].work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) handle_rx(net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) static int vhost_net_open(struct inode *inode, struct file *f)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) struct vhost_net *n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) struct vhost_dev *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) struct vhost_virtqueue **vqs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) void **queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) struct xdp_buff *xdp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) n = kvmalloc(sizeof *n, GFP_KERNEL | __GFP_RETRY_MAYFAIL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) if (!n)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) vqs = kmalloc_array(VHOST_NET_VQ_MAX, sizeof(*vqs), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) if (!vqs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) kvfree(n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) queue = kmalloc_array(VHOST_NET_BATCH, sizeof(void *),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) if (!queue) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) kfree(vqs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) kvfree(n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) n->vqs[VHOST_NET_VQ_RX].rxq.queue = queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) xdp = kmalloc_array(VHOST_NET_BATCH, sizeof(*xdp), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) if (!xdp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) kfree(vqs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) kvfree(n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) kfree(queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) n->vqs[VHOST_NET_VQ_TX].xdp = xdp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) dev = &n->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) vqs[VHOST_NET_VQ_TX] = &n->vqs[VHOST_NET_VQ_TX].vq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) vqs[VHOST_NET_VQ_RX] = &n->vqs[VHOST_NET_VQ_RX].vq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) n->vqs[VHOST_NET_VQ_TX].vq.handle_kick = handle_tx_kick;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) n->vqs[VHOST_NET_VQ_RX].vq.handle_kick = handle_rx_kick;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) for (i = 0; i < VHOST_NET_VQ_MAX; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) n->vqs[i].ubufs = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) n->vqs[i].ubuf_info = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) n->vqs[i].upend_idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) n->vqs[i].done_idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) n->vqs[i].batched_xdp = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) n->vqs[i].vhost_hlen = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) n->vqs[i].sock_hlen = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) n->vqs[i].rx_ring = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) vhost_net_buf_init(&n->vqs[i].rxq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) vhost_dev_init(dev, vqs, VHOST_NET_VQ_MAX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) UIO_MAXIOV + VHOST_NET_BATCH,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) VHOST_NET_PKT_WEIGHT, VHOST_NET_WEIGHT, true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) vhost_poll_init(n->poll + VHOST_NET_VQ_TX, handle_tx_net, EPOLLOUT, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) vhost_poll_init(n->poll + VHOST_NET_VQ_RX, handle_rx_net, EPOLLIN, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) f->private_data = n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) n->page_frag.page = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) n->refcnt_bias = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) static struct socket *vhost_net_stop_vq(struct vhost_net *n,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) struct vhost_virtqueue *vq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) struct socket *sock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) struct vhost_net_virtqueue *nvq =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) container_of(vq, struct vhost_net_virtqueue, vq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) mutex_lock(&vq->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) sock = vhost_vq_get_backend(vq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) vhost_net_disable_vq(n, vq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) vhost_vq_set_backend(vq, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) vhost_net_buf_unproduce(nvq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) nvq->rx_ring = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) mutex_unlock(&vq->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) return sock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) static void vhost_net_stop(struct vhost_net *n, struct socket **tx_sock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) struct socket **rx_sock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) *tx_sock = vhost_net_stop_vq(n, &n->vqs[VHOST_NET_VQ_TX].vq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) *rx_sock = vhost_net_stop_vq(n, &n->vqs[VHOST_NET_VQ_RX].vq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) static void vhost_net_flush_vq(struct vhost_net *n, int index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) vhost_poll_flush(n->poll + index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) vhost_poll_flush(&n->vqs[index].vq.poll);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) static void vhost_net_flush(struct vhost_net *n)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) vhost_net_flush_vq(n, VHOST_NET_VQ_TX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) vhost_net_flush_vq(n, VHOST_NET_VQ_RX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) if (n->vqs[VHOST_NET_VQ_TX].ubufs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) mutex_lock(&n->vqs[VHOST_NET_VQ_TX].vq.mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) n->tx_flush = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) mutex_unlock(&n->vqs[VHOST_NET_VQ_TX].vq.mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) /* Wait for all lower device DMAs done. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) vhost_net_ubuf_put_and_wait(n->vqs[VHOST_NET_VQ_TX].ubufs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) mutex_lock(&n->vqs[VHOST_NET_VQ_TX].vq.mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) n->tx_flush = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) atomic_set(&n->vqs[VHOST_NET_VQ_TX].ubufs->refcount, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) mutex_unlock(&n->vqs[VHOST_NET_VQ_TX].vq.mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) static int vhost_net_release(struct inode *inode, struct file *f)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) struct vhost_net *n = f->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) struct socket *tx_sock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) struct socket *rx_sock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) vhost_net_stop(n, &tx_sock, &rx_sock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) vhost_net_flush(n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) vhost_dev_stop(&n->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) vhost_dev_cleanup(&n->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) vhost_net_vq_reset(n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) if (tx_sock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) sockfd_put(tx_sock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) if (rx_sock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) sockfd_put(rx_sock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) /* Make sure no callbacks are outstanding */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) synchronize_rcu();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) /* We do an extra flush before freeing memory,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) * since jobs can re-queue themselves. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) vhost_net_flush(n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) kfree(n->vqs[VHOST_NET_VQ_RX].rxq.queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) kfree(n->vqs[VHOST_NET_VQ_TX].xdp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) kfree(n->dev.vqs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) if (n->page_frag.page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) __page_frag_cache_drain(n->page_frag.page, n->refcnt_bias);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) kvfree(n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) static struct socket *get_raw_socket(int fd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) int r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) struct socket *sock = sockfd_lookup(fd, &r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) if (!sock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) return ERR_PTR(-ENOTSOCK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) /* Parameter checking */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) if (sock->sk->sk_type != SOCK_RAW) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) r = -ESOCKTNOSUPPORT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) if (sock->sk->sk_family != AF_PACKET) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) r = -EPFNOSUPPORT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) return sock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) sockfd_put(sock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) return ERR_PTR(r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) static struct ptr_ring *get_tap_ptr_ring(int fd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) struct ptr_ring *ring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) struct file *file = fget(fd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) if (!file)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) ring = tun_get_tx_ring(file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) if (!IS_ERR(ring))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) ring = tap_get_ptr_ring(file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) if (!IS_ERR(ring))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) ring = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) fput(file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) return ring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) static struct socket *get_tap_socket(int fd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) struct file *file = fget(fd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) struct socket *sock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) if (!file)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) return ERR_PTR(-EBADF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) sock = tun_get_socket(file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) if (!IS_ERR(sock))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) return sock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) sock = tap_get_socket(file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) if (IS_ERR(sock))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) fput(file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) return sock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) static struct socket *get_socket(int fd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) struct socket *sock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) /* special case to disable backend */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) if (fd == -1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) sock = get_raw_socket(fd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) if (!IS_ERR(sock))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) return sock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) sock = get_tap_socket(fd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) if (!IS_ERR(sock))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) return sock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) return ERR_PTR(-ENOTSOCK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) static long vhost_net_set_backend(struct vhost_net *n, unsigned index, int fd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) struct socket *sock, *oldsock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) struct vhost_virtqueue *vq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) struct vhost_net_virtqueue *nvq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) struct vhost_net_ubuf_ref *ubufs, *oldubufs = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) int r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) mutex_lock(&n->dev.mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) r = vhost_dev_check_owner(&n->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) if (r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) if (index >= VHOST_NET_VQ_MAX) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) r = -ENOBUFS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) vq = &n->vqs[index].vq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) nvq = &n->vqs[index];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) mutex_lock(&vq->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) /* Verify that ring has been setup correctly. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) if (!vhost_vq_access_ok(vq)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) r = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) goto err_vq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) sock = get_socket(fd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) if (IS_ERR(sock)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) r = PTR_ERR(sock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) goto err_vq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) /* start polling new socket */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) oldsock = vhost_vq_get_backend(vq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) if (sock != oldsock) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) ubufs = vhost_net_ubuf_alloc(vq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) sock && vhost_sock_zcopy(sock));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) if (IS_ERR(ubufs)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) r = PTR_ERR(ubufs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) goto err_ubufs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) vhost_net_disable_vq(n, vq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) vhost_vq_set_backend(vq, sock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) vhost_net_buf_unproduce(nvq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) r = vhost_vq_init_access(vq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) if (r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) goto err_used;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) r = vhost_net_enable_vq(n, vq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) if (r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) goto err_used;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) if (index == VHOST_NET_VQ_RX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) nvq->rx_ring = get_tap_ptr_ring(fd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) oldubufs = nvq->ubufs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) nvq->ubufs = ubufs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) n->tx_packets = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) n->tx_zcopy_err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) n->tx_flush = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) mutex_unlock(&vq->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) if (oldubufs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) vhost_net_ubuf_put_wait_and_free(oldubufs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) mutex_lock(&vq->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) vhost_zerocopy_signal_used(n, vq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) mutex_unlock(&vq->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) if (oldsock) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) vhost_net_flush_vq(n, index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) sockfd_put(oldsock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) mutex_unlock(&n->dev.mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) err_used:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) vhost_vq_set_backend(vq, oldsock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) vhost_net_enable_vq(n, vq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) if (ubufs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) vhost_net_ubuf_put_wait_and_free(ubufs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) err_ubufs:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) if (sock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) sockfd_put(sock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) err_vq:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) mutex_unlock(&vq->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) mutex_unlock(&n->dev.mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) static long vhost_net_reset_owner(struct vhost_net *n)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) struct socket *tx_sock = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) struct socket *rx_sock = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) long err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) struct vhost_iotlb *umem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) mutex_lock(&n->dev.mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) err = vhost_dev_check_owner(&n->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) umem = vhost_dev_reset_owner_prepare();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) if (!umem) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) vhost_net_stop(n, &tx_sock, &rx_sock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) vhost_net_flush(n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) vhost_dev_stop(&n->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) vhost_dev_reset_owner(&n->dev, umem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) vhost_net_vq_reset(n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) mutex_unlock(&n->dev.mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) if (tx_sock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) sockfd_put(tx_sock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) if (rx_sock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) sockfd_put(rx_sock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) static int vhost_net_set_features(struct vhost_net *n, u64 features)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) size_t vhost_hlen, sock_hlen, hdr_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) hdr_len = (features & ((1ULL << VIRTIO_NET_F_MRG_RXBUF) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) (1ULL << VIRTIO_F_VERSION_1))) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) sizeof(struct virtio_net_hdr_mrg_rxbuf) :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) sizeof(struct virtio_net_hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) if (features & (1 << VHOST_NET_F_VIRTIO_NET_HDR)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) /* vhost provides vnet_hdr */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) vhost_hlen = hdr_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) sock_hlen = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) /* socket provides vnet_hdr */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) vhost_hlen = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) sock_hlen = hdr_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) mutex_lock(&n->dev.mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) if ((features & (1 << VHOST_F_LOG_ALL)) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) !vhost_log_access_ok(&n->dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) if ((features & (1ULL << VIRTIO_F_ACCESS_PLATFORM))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) if (vhost_init_device_iotlb(&n->dev, true))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) for (i = 0; i < VHOST_NET_VQ_MAX; ++i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) mutex_lock(&n->vqs[i].vq.mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) n->vqs[i].vq.acked_features = features;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) n->vqs[i].vhost_hlen = vhost_hlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) n->vqs[i].sock_hlen = sock_hlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) mutex_unlock(&n->vqs[i].vq.mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) mutex_unlock(&n->dev.mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) out_unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) mutex_unlock(&n->dev.mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) static long vhost_net_set_owner(struct vhost_net *n)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) int r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) mutex_lock(&n->dev.mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) if (vhost_dev_has_owner(&n->dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) r = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) r = vhost_net_set_ubuf_info(n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) if (r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) r = vhost_dev_set_owner(&n->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) if (r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) vhost_net_clear_ubuf_info(n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) vhost_net_flush(n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) mutex_unlock(&n->dev.mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) static long vhost_net_ioctl(struct file *f, unsigned int ioctl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) unsigned long arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) struct vhost_net *n = f->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) void __user *argp = (void __user *)arg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) u64 __user *featurep = argp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) struct vhost_vring_file backend;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) u64 features;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) int r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) switch (ioctl) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) case VHOST_NET_SET_BACKEND:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) if (copy_from_user(&backend, argp, sizeof backend))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) return vhost_net_set_backend(n, backend.index, backend.fd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) case VHOST_GET_FEATURES:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) features = VHOST_NET_FEATURES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) if (copy_to_user(featurep, &features, sizeof features))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) case VHOST_SET_FEATURES:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) if (copy_from_user(&features, featurep, sizeof features))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) if (features & ~VHOST_NET_FEATURES)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) return vhost_net_set_features(n, features);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) case VHOST_GET_BACKEND_FEATURES:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) features = VHOST_NET_BACKEND_FEATURES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) if (copy_to_user(featurep, &features, sizeof(features)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) case VHOST_SET_BACKEND_FEATURES:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) if (copy_from_user(&features, featurep, sizeof(features)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) if (features & ~VHOST_NET_BACKEND_FEATURES)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) vhost_set_backend_features(&n->dev, features);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) case VHOST_RESET_OWNER:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) return vhost_net_reset_owner(n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) case VHOST_SET_OWNER:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) return vhost_net_set_owner(n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) mutex_lock(&n->dev.mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) r = vhost_dev_ioctl(&n->dev, ioctl, argp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) if (r == -ENOIOCTLCMD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) r = vhost_vring_ioctl(&n->dev, ioctl, argp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) vhost_net_flush(n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) mutex_unlock(&n->dev.mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) static ssize_t vhost_net_chr_read_iter(struct kiocb *iocb, struct iov_iter *to)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) struct file *file = iocb->ki_filp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) struct vhost_net *n = file->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) struct vhost_dev *dev = &n->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) int noblock = file->f_flags & O_NONBLOCK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) return vhost_chr_read_iter(dev, to, noblock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) static ssize_t vhost_net_chr_write_iter(struct kiocb *iocb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) struct iov_iter *from)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) struct file *file = iocb->ki_filp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) struct vhost_net *n = file->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) struct vhost_dev *dev = &n->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) return vhost_chr_write_iter(dev, from);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) static __poll_t vhost_net_chr_poll(struct file *file, poll_table *wait)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) struct vhost_net *n = file->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) struct vhost_dev *dev = &n->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) return vhost_chr_poll(file, dev, wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) static const struct file_operations vhost_net_fops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) .owner = THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) .release = vhost_net_release,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) .read_iter = vhost_net_chr_read_iter,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) .write_iter = vhost_net_chr_write_iter,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) .poll = vhost_net_chr_poll,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) .unlocked_ioctl = vhost_net_ioctl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) .compat_ioctl = compat_ptr_ioctl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) .open = vhost_net_open,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) .llseek = noop_llseek,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785) static struct miscdevice vhost_net_misc = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) .minor = VHOST_NET_MINOR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) .name = "vhost-net",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) .fops = &vhost_net_fops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) static int vhost_net_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) if (experimental_zcopytx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) vhost_net_enable_zcopy(VHOST_NET_VQ_TX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) return misc_register(&vhost_net_misc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) module_init(vhost_net_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) static void vhost_net_exit(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) misc_deregister(&vhost_net_misc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) module_exit(vhost_net_exit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) MODULE_VERSION("0.0.1");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) MODULE_LICENSE("GPL v2");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) MODULE_AUTHOR("Michael S. Tsirkin");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) MODULE_DESCRIPTION("Host kernel accelerator for virtio net");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) MODULE_ALIAS_MISCDEV(VHOST_NET_MINOR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) MODULE_ALIAS("devname:vhost-net");