^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) * Back-end of the driver for virtual network devices. This portion of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * driver exports a 'unified' network-device interface that can be accessed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * by any operating system that implements a compatible front end. A
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * reference front-end implementation can be found in:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * drivers/net/xen-netfront.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * Copyright (c) 2002-2005, K A Fraser
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * This program is free software; you can redistribute it and/or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) * modify it under the terms of the GNU General Public License version 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) * as published by the Free Software Foundation; or, when distributed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) * separately from the Linux kernel or incorporated into other
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) * software packages, subject to the following license:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) * Permission is hereby granted, free of charge, to any person obtaining a copy
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) * of this source file (the "Software"), to deal in the Software without
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) * restriction, including without limitation the rights to use, copy, modify,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) * merge, publish, distribute, sublicense, and/or sell copies of the Software,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) * and to permit persons to whom the Software is furnished to do so, subject to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) * the following conditions:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) * The above copyright notice and this permission notice shall be included in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) * all copies or substantial portions of the Software.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) * IN THE SOFTWARE.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #include "common.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #include <linux/kthread.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #include <linux/if_vlan.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) #include <linux/udp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) #include <linux/highmem.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) #include <net/tcp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) #include <xen/xen.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) #include <xen/events.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) #include <xen/interface/memory.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) #include <xen/page.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) #include <asm/xen/hypercall.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) /* Provide an option to disable split event channels at load time as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) * event channels are limited resource. Split event channels are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) * enabled by default.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) bool separate_tx_rx_irq = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) module_param(separate_tx_rx_irq, bool, 0644);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) /* The time that packets can stay on the guest Rx internal queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) * before they are dropped.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) unsigned int rx_drain_timeout_msecs = 10000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) module_param(rx_drain_timeout_msecs, uint, 0444);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) /* The length of time before the frontend is considered unresponsive
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) * because it isn't providing Rx slots.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) unsigned int rx_stall_timeout_msecs = 60000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) module_param(rx_stall_timeout_msecs, uint, 0444);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) #define MAX_QUEUES_DEFAULT 8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) unsigned int xenvif_max_queues;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) module_param_named(max_queues, xenvif_max_queues, uint, 0644);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) MODULE_PARM_DESC(max_queues,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) "Maximum number of queues per virtual interface");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) * This is the maximum slots a skb can have. If a guest sends a skb
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) * which exceeds this limit it is considered malicious.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) #define FATAL_SKB_SLOTS_DEFAULT 20
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) static unsigned int fatal_skb_slots = FATAL_SKB_SLOTS_DEFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) module_param(fatal_skb_slots, uint, 0444);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) /* The amount to copy out of the first guest Tx slot into the skb's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) * linear area. If the first slot has more data, it will be mapped
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) * and put into the first frag.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) * This is sized to avoid pulling headers from the frags for most
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) * TCP/IP packets.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) #define XEN_NETBACK_TX_COPY_LEN 128
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) /* This is the maximum number of flows in the hash cache. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) #define XENVIF_HASH_CACHE_SIZE_DEFAULT 64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) unsigned int xenvif_hash_cache_size = XENVIF_HASH_CACHE_SIZE_DEFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) module_param_named(hash_cache_size, xenvif_hash_cache_size, uint, 0644);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) MODULE_PARM_DESC(hash_cache_size, "Number of flows in the hash cache");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) /* The module parameter tells that we have to put data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) * for xen-netfront with the XDP_PACKET_HEADROOM offset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) * needed for XDP processing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) bool provides_xdp_headroom = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) module_param(provides_xdp_headroom, bool, 0644);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) static void xenvif_idx_release(struct xenvif_queue *queue, u16 pending_idx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) u8 status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) static void make_tx_response(struct xenvif_queue *queue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) struct xen_netif_tx_request *txp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) unsigned int extra_count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) s8 st);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) static void push_tx_responses(struct xenvif_queue *queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) static inline int tx_work_todo(struct xenvif_queue *queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) static inline unsigned long idx_to_pfn(struct xenvif_queue *queue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) u16 idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) return page_to_pfn(queue->mmap_pages[idx]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) static inline unsigned long idx_to_kaddr(struct xenvif_queue *queue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) u16 idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) return (unsigned long)pfn_to_kaddr(idx_to_pfn(queue, idx));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) #define callback_param(vif, pending_idx) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) (vif->pending_tx_info[pending_idx].callback_struct)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) /* Find the containing VIF's structure from a pointer in pending_tx_info array
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) static inline struct xenvif_queue *ubuf_to_queue(const struct ubuf_info *ubuf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) u16 pending_idx = ubuf->desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) struct pending_tx_info *temp =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) container_of(ubuf, struct pending_tx_info, callback_struct);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) return container_of(temp - pending_idx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) struct xenvif_queue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) pending_tx_info[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) static u16 frag_get_pending_idx(skb_frag_t *frag)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) return (u16)skb_frag_off(frag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) static void frag_set_pending_idx(skb_frag_t *frag, u16 pending_idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) skb_frag_off_set(frag, pending_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) static inline pending_ring_idx_t pending_index(unsigned i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) return i & (MAX_PENDING_REQS-1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) void xenvif_kick_thread(struct xenvif_queue *queue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) wake_up(&queue->wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) void xenvif_napi_schedule_or_enable_events(struct xenvif_queue *queue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) int more_to_do;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) RING_FINAL_CHECK_FOR_REQUESTS(&queue->tx, more_to_do);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) if (more_to_do)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) napi_schedule(&queue->napi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) else if (atomic_fetch_andnot(NETBK_TX_EOI | NETBK_COMMON_EOI,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) &queue->eoi_pending) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) (NETBK_TX_EOI | NETBK_COMMON_EOI))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) xen_irq_lateeoi(queue->tx_irq, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) static void tx_add_credit(struct xenvif_queue *queue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) unsigned long max_burst, max_credit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) * Allow a burst big enough to transmit a jumbo packet of up to 128kB.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) * Otherwise the interface can seize up due to insufficient credit.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) max_burst = max(131072UL, queue->credit_bytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) /* Take care that adding a new chunk of credit doesn't wrap to zero. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) max_credit = queue->remaining_credit + queue->credit_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) if (max_credit < queue->remaining_credit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) max_credit = ULONG_MAX; /* wrapped: clamp to ULONG_MAX */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) queue->remaining_credit = min(max_credit, max_burst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) queue->rate_limited = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) void xenvif_tx_credit_callback(struct timer_list *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) struct xenvif_queue *queue = from_timer(queue, t, credit_timeout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) tx_add_credit(queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) xenvif_napi_schedule_or_enable_events(queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) static void xenvif_tx_err(struct xenvif_queue *queue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) struct xen_netif_tx_request *txp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) unsigned int extra_count, RING_IDX end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) RING_IDX cons = queue->tx.req_cons;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) spin_lock_irqsave(&queue->response_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) make_tx_response(queue, txp, extra_count, XEN_NETIF_RSP_ERROR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) push_tx_responses(queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) spin_unlock_irqrestore(&queue->response_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) if (cons == end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) RING_COPY_REQUEST(&queue->tx, cons++, txp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) extra_count = 0; /* only the first frag can have extras */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) } while (1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) queue->tx.req_cons = cons;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) static void xenvif_fatal_tx_err(struct xenvif *vif)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) netdev_err(vif->dev, "fatal error; disabling device\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) vif->disabled = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) /* Disable the vif from queue 0's kthread */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) if (vif->num_queues)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) xenvif_kick_thread(&vif->queues[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) static int xenvif_count_requests(struct xenvif_queue *queue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) struct xen_netif_tx_request *first,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) unsigned int extra_count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) struct xen_netif_tx_request *txp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) int work_to_do)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) RING_IDX cons = queue->tx.req_cons;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) int slots = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) int drop_err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) int more_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) if (!(first->flags & XEN_NETTXF_more_data))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) struct xen_netif_tx_request dropped_tx = { 0 };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) if (slots >= work_to_do) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) netdev_err(queue->vif->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) "Asked for %d slots but exceeds this limit\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) work_to_do);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) xenvif_fatal_tx_err(queue->vif);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) return -ENODATA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) /* This guest is really using too many slots and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) * considered malicious.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) if (unlikely(slots >= fatal_skb_slots)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) netdev_err(queue->vif->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) "Malicious frontend using %d slots, threshold %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) slots, fatal_skb_slots);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) xenvif_fatal_tx_err(queue->vif);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) return -E2BIG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) /* Xen network protocol had implicit dependency on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) * MAX_SKB_FRAGS. XEN_NETBK_LEGACY_SLOTS_MAX is set to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) * the historical MAX_SKB_FRAGS value 18 to honor the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) * same behavior as before. Any packet using more than
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) * 18 slots but less than fatal_skb_slots slots is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) * dropped
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) if (!drop_err && slots >= XEN_NETBK_LEGACY_SLOTS_MAX) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) if (net_ratelimit())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) netdev_dbg(queue->vif->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) "Too many slots (%d) exceeding limit (%d), dropping packet\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) slots, XEN_NETBK_LEGACY_SLOTS_MAX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) drop_err = -E2BIG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) if (drop_err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) txp = &dropped_tx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) RING_COPY_REQUEST(&queue->tx, cons + slots, txp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) /* If the guest submitted a frame >= 64 KiB then
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) * first->size overflowed and following slots will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) * appear to be larger than the frame.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) * This cannot be fatal error as there are buggy
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) * frontends that do this.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) * Consume all slots and drop the packet.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) if (!drop_err && txp->size > first->size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) if (net_ratelimit())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) netdev_dbg(queue->vif->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) "Invalid tx request, slot size %u > remaining size %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) txp->size, first->size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) drop_err = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) first->size -= txp->size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) slots++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) if (unlikely((txp->offset + txp->size) > XEN_PAGE_SIZE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) netdev_err(queue->vif->dev, "Cross page boundary, txp->offset: %u, size: %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) txp->offset, txp->size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) xenvif_fatal_tx_err(queue->vif);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) more_data = txp->flags & XEN_NETTXF_more_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) if (!drop_err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) txp++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) } while (more_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) if (drop_err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) xenvif_tx_err(queue, first, extra_count, cons + slots);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) return drop_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) return slots;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) struct xenvif_tx_cb {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) u16 pending_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) #define XENVIF_TX_CB(skb) ((struct xenvif_tx_cb *)(skb)->cb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) static inline void xenvif_tx_create_map_op(struct xenvif_queue *queue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) u16 pending_idx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) struct xen_netif_tx_request *txp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) unsigned int extra_count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) struct gnttab_map_grant_ref *mop)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) queue->pages_to_map[mop-queue->tx_map_ops] = queue->mmap_pages[pending_idx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) gnttab_set_map_op(mop, idx_to_kaddr(queue, pending_idx),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) GNTMAP_host_map | GNTMAP_readonly,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) txp->gref, queue->vif->domid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) memcpy(&queue->pending_tx_info[pending_idx].req, txp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) sizeof(*txp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) queue->pending_tx_info[pending_idx].extra_count = extra_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) static inline struct sk_buff *xenvif_alloc_skb(unsigned int size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) struct sk_buff *skb =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) alloc_skb(size + NET_SKB_PAD + NET_IP_ALIGN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) GFP_ATOMIC | __GFP_NOWARN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) if (unlikely(skb == NULL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) /* Packets passed to netif_rx() must have some headroom. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) /* Initialize it here to avoid later surprises */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) skb_shinfo(skb)->destructor_arg = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) return skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) static struct gnttab_map_grant_ref *xenvif_get_requests(struct xenvif_queue *queue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) struct xen_netif_tx_request *txp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) struct gnttab_map_grant_ref *gop,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) unsigned int frag_overflow,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) struct sk_buff *nskb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) struct skb_shared_info *shinfo = skb_shinfo(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) skb_frag_t *frags = shinfo->frags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) u16 pending_idx = XENVIF_TX_CB(skb)->pending_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) int start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) pending_ring_idx_t index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) unsigned int nr_slots;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) nr_slots = shinfo->nr_frags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) /* Skip first skb fragment if it is on same page as header fragment. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) start = (frag_get_pending_idx(&shinfo->frags[0]) == pending_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) for (shinfo->nr_frags = start; shinfo->nr_frags < nr_slots;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) shinfo->nr_frags++, txp++, gop++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) index = pending_index(queue->pending_cons++);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) pending_idx = queue->pending_ring[index];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) xenvif_tx_create_map_op(queue, pending_idx, txp, 0, gop);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) frag_set_pending_idx(&frags[shinfo->nr_frags], pending_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) if (frag_overflow) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) shinfo = skb_shinfo(nskb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) frags = shinfo->frags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) for (shinfo->nr_frags = 0; shinfo->nr_frags < frag_overflow;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) shinfo->nr_frags++, txp++, gop++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) index = pending_index(queue->pending_cons++);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) pending_idx = queue->pending_ring[index];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) xenvif_tx_create_map_op(queue, pending_idx, txp, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) gop);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) frag_set_pending_idx(&frags[shinfo->nr_frags],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) pending_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) skb_shinfo(skb)->frag_list = nskb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) return gop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) static inline void xenvif_grant_handle_set(struct xenvif_queue *queue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) u16 pending_idx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) grant_handle_t handle)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) if (unlikely(queue->grant_tx_handle[pending_idx] !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) NETBACK_INVALID_HANDLE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) netdev_err(queue->vif->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) "Trying to overwrite active handle! pending_idx: 0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) pending_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) queue->grant_tx_handle[pending_idx] = handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) static inline void xenvif_grant_handle_reset(struct xenvif_queue *queue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) u16 pending_idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) if (unlikely(queue->grant_tx_handle[pending_idx] ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) NETBACK_INVALID_HANDLE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) netdev_err(queue->vif->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) "Trying to unmap invalid handle! pending_idx: 0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) pending_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) queue->grant_tx_handle[pending_idx] = NETBACK_INVALID_HANDLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) static int xenvif_tx_check_gop(struct xenvif_queue *queue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) struct gnttab_map_grant_ref **gopp_map,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) struct gnttab_copy **gopp_copy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) struct gnttab_map_grant_ref *gop_map = *gopp_map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) u16 pending_idx = XENVIF_TX_CB(skb)->pending_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) /* This always points to the shinfo of the skb being checked, which
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) * could be either the first or the one on the frag_list
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) struct skb_shared_info *shinfo = skb_shinfo(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) /* If this is non-NULL, we are currently checking the frag_list skb, and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) * this points to the shinfo of the first one
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) struct skb_shared_info *first_shinfo = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) int nr_frags = shinfo->nr_frags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) const bool sharedslot = nr_frags &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) frag_get_pending_idx(&shinfo->frags[0]) == pending_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) int i, err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) /* Check status of header. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) err = (*gopp_copy)->status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) if (unlikely(err)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) if (net_ratelimit())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) netdev_dbg(queue->vif->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) "Grant copy of header failed! status: %d pending_idx: %u ref: %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) (*gopp_copy)->status,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) pending_idx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) (*gopp_copy)->source.u.ref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) /* The first frag might still have this slot mapped */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) if (!sharedslot)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) xenvif_idx_release(queue, pending_idx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) XEN_NETIF_RSP_ERROR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) (*gopp_copy)++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) check_frags:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) for (i = 0; i < nr_frags; i++, gop_map++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) int j, newerr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) pending_idx = frag_get_pending_idx(&shinfo->frags[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) /* Check error status: if okay then remember grant handle. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) newerr = gop_map->status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) if (likely(!newerr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) xenvif_grant_handle_set(queue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) pending_idx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) gop_map->handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) /* Had a previous error? Invalidate this fragment. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) if (unlikely(err)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) xenvif_idx_unmap(queue, pending_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) /* If the mapping of the first frag was OK, but
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) * the header's copy failed, and they are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) * sharing a slot, send an error
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) if (i == 0 && !first_shinfo && sharedslot)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) xenvif_idx_release(queue, pending_idx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) XEN_NETIF_RSP_ERROR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) xenvif_idx_release(queue, pending_idx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) XEN_NETIF_RSP_OKAY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) /* Error on this fragment: respond to client with an error. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) if (net_ratelimit())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) netdev_dbg(queue->vif->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) "Grant map of %d. frag failed! status: %d pending_idx: %u ref: %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) i,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) gop_map->status,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) pending_idx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) gop_map->ref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) xenvif_idx_release(queue, pending_idx, XEN_NETIF_RSP_ERROR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) /* Not the first error? Preceding frags already invalidated. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) /* First error: if the header haven't shared a slot with the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) * first frag, release it as well.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) if (!sharedslot)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) xenvif_idx_release(queue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) XENVIF_TX_CB(skb)->pending_idx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) XEN_NETIF_RSP_OKAY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) /* Invalidate preceding fragments of this skb. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) for (j = 0; j < i; j++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) pending_idx = frag_get_pending_idx(&shinfo->frags[j]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) xenvif_idx_unmap(queue, pending_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) xenvif_idx_release(queue, pending_idx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) XEN_NETIF_RSP_OKAY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) /* And if we found the error while checking the frag_list, unmap
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) * the first skb's frags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) if (first_shinfo) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) for (j = 0; j < first_shinfo->nr_frags; j++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) pending_idx = frag_get_pending_idx(&first_shinfo->frags[j]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) xenvif_idx_unmap(queue, pending_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) xenvif_idx_release(queue, pending_idx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) XEN_NETIF_RSP_OKAY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) /* Remember the error: invalidate all subsequent fragments. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) err = newerr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) if (skb_has_frag_list(skb) && !first_shinfo) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) first_shinfo = skb_shinfo(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) shinfo = skb_shinfo(skb_shinfo(skb)->frag_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) nr_frags = shinfo->nr_frags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) goto check_frags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) *gopp_map = gop_map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) static void xenvif_fill_frags(struct xenvif_queue *queue, struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) struct skb_shared_info *shinfo = skb_shinfo(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) int nr_frags = shinfo->nr_frags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) u16 prev_pending_idx = INVALID_PENDING_IDX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) for (i = 0; i < nr_frags; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) skb_frag_t *frag = shinfo->frags + i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) struct xen_netif_tx_request *txp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) struct page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) u16 pending_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) pending_idx = frag_get_pending_idx(frag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) /* If this is not the first frag, chain it to the previous*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) if (prev_pending_idx == INVALID_PENDING_IDX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) skb_shinfo(skb)->destructor_arg =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) &callback_param(queue, pending_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) callback_param(queue, prev_pending_idx).ctx =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) &callback_param(queue, pending_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) callback_param(queue, pending_idx).ctx = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) prev_pending_idx = pending_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) txp = &queue->pending_tx_info[pending_idx].req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) page = virt_to_page(idx_to_kaddr(queue, pending_idx));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) __skb_fill_page_desc(skb, i, page, txp->offset, txp->size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) skb->len += txp->size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) skb->data_len += txp->size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) skb->truesize += txp->size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) /* Take an extra reference to offset network stack's put_page */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) get_page(queue->mmap_pages[pending_idx]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) static int xenvif_get_extras(struct xenvif_queue *queue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) struct xen_netif_extra_info *extras,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) unsigned int *extra_count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) int work_to_do)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) struct xen_netif_extra_info extra;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) RING_IDX cons = queue->tx.req_cons;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) if (unlikely(work_to_do-- <= 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) netdev_err(queue->vif->dev, "Missing extra info\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) xenvif_fatal_tx_err(queue->vif);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) return -EBADR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) RING_COPY_REQUEST(&queue->tx, cons, &extra);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) queue->tx.req_cons = ++cons;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) (*extra_count)++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) if (unlikely(!extra.type ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) extra.type >= XEN_NETIF_EXTRA_TYPE_MAX)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) netdev_err(queue->vif->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) "Invalid extra type: %d\n", extra.type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) xenvif_fatal_tx_err(queue->vif);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) memcpy(&extras[extra.type - 1], &extra, sizeof(extra));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) } while (extra.flags & XEN_NETIF_EXTRA_FLAG_MORE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) return work_to_do;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) static int xenvif_set_skb_gso(struct xenvif *vif,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) struct xen_netif_extra_info *gso)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) if (!gso->u.gso.size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) netdev_err(vif->dev, "GSO size must not be zero.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) xenvif_fatal_tx_err(vif);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) switch (gso->u.gso.type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) case XEN_NETIF_GSO_TYPE_TCPV4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) case XEN_NETIF_GSO_TYPE_TCPV6:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) netdev_err(vif->dev, "Bad GSO type %d.\n", gso->u.gso.type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) xenvif_fatal_tx_err(vif);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) skb_shinfo(skb)->gso_size = gso->u.gso.size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) /* gso_segs will be calculated later */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) static int checksum_setup(struct xenvif_queue *queue, struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) bool recalculate_partial_csum = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) /* A GSO SKB must be CHECKSUM_PARTIAL. However some buggy
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) * peers can fail to set NETRXF_csum_blank when sending a GSO
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) * frame. In this case force the SKB to CHECKSUM_PARTIAL and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) * recalculate the partial checksum.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) if (skb->ip_summed != CHECKSUM_PARTIAL && skb_is_gso(skb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) queue->stats.rx_gso_checksum_fixup++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) skb->ip_summed = CHECKSUM_PARTIAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) recalculate_partial_csum = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) /* A non-CHECKSUM_PARTIAL SKB does not require setup. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) if (skb->ip_summed != CHECKSUM_PARTIAL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) return skb_checksum_setup(skb, recalculate_partial_csum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) static bool tx_credit_exceeded(struct xenvif_queue *queue, unsigned size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) u64 now = get_jiffies_64();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) u64 next_credit = queue->credit_window_start +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) msecs_to_jiffies(queue->credit_usec / 1000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) /* Timer could already be pending in rare cases. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) if (timer_pending(&queue->credit_timeout)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) queue->rate_limited = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) /* Passed the point where we can replenish credit? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) if (time_after_eq64(now, next_credit)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) queue->credit_window_start = now;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) tx_add_credit(queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) /* Still too big to send right now? Set a callback. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) if (size > queue->remaining_credit) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) mod_timer(&queue->credit_timeout,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) next_credit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) queue->credit_window_start = next_credit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) queue->rate_limited = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) /* No locking is required in xenvif_mcast_add/del() as they are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) * only ever invoked from NAPI poll. An RCU list is used because
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) * xenvif_mcast_match() is called asynchronously, during start_xmit.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) static int xenvif_mcast_add(struct xenvif *vif, const u8 *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) struct xenvif_mcast_addr *mcast;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) if (vif->fe_mcast_count == XEN_NETBK_MCAST_MAX) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) if (net_ratelimit())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) netdev_err(vif->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) "Too many multicast addresses\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) return -ENOSPC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) mcast = kzalloc(sizeof(*mcast), GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) if (!mcast)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) ether_addr_copy(mcast->addr, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) list_add_tail_rcu(&mcast->entry, &vif->fe_mcast_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) vif->fe_mcast_count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) static void xenvif_mcast_del(struct xenvif *vif, const u8 *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) struct xenvif_mcast_addr *mcast;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) list_for_each_entry_rcu(mcast, &vif->fe_mcast_addr, entry) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) if (ether_addr_equal(addr, mcast->addr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) --vif->fe_mcast_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) list_del_rcu(&mcast->entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) kfree_rcu(mcast, rcu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) bool xenvif_mcast_match(struct xenvif *vif, const u8 *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) struct xenvif_mcast_addr *mcast;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) list_for_each_entry_rcu(mcast, &vif->fe_mcast_addr, entry) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) if (ether_addr_equal(addr, mcast->addr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) void xenvif_mcast_addr_list_free(struct xenvif *vif)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) /* No need for locking or RCU here. NAPI poll and TX queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) * are stopped.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) while (!list_empty(&vif->fe_mcast_addr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) struct xenvif_mcast_addr *mcast;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) mcast = list_first_entry(&vif->fe_mcast_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) struct xenvif_mcast_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) --vif->fe_mcast_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) list_del(&mcast->entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) kfree(mcast);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) static void xenvif_tx_build_gops(struct xenvif_queue *queue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) int budget,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) unsigned *copy_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) unsigned *map_ops)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) struct gnttab_map_grant_ref *gop = queue->tx_map_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) struct sk_buff *skb, *nskb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) unsigned int frag_overflow;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) while (skb_queue_len(&queue->tx_queue) < budget) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) struct xen_netif_tx_request txreq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) struct xen_netif_tx_request txfrags[XEN_NETBK_LEGACY_SLOTS_MAX];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) struct xen_netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX-1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) unsigned int extra_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) u16 pending_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) RING_IDX idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) int work_to_do;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) unsigned int data_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) pending_ring_idx_t index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) if (queue->tx.sring->req_prod - queue->tx.req_cons >
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) XEN_NETIF_TX_RING_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) netdev_err(queue->vif->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) "Impossible number of requests. "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) "req_prod %d, req_cons %d, size %ld\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) queue->tx.sring->req_prod, queue->tx.req_cons,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) XEN_NETIF_TX_RING_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) xenvif_fatal_tx_err(queue->vif);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) work_to_do = RING_HAS_UNCONSUMED_REQUESTS(&queue->tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) if (!work_to_do)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) idx = queue->tx.req_cons;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) rmb(); /* Ensure that we see the request before we copy it. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) RING_COPY_REQUEST(&queue->tx, idx, &txreq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) /* Credit-based scheduling. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) if (txreq.size > queue->remaining_credit &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) tx_credit_exceeded(queue, txreq.size))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) queue->remaining_credit -= txreq.size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) work_to_do--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) queue->tx.req_cons = ++idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) memset(extras, 0, sizeof(extras));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) extra_count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) if (txreq.flags & XEN_NETTXF_extra_info) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) work_to_do = xenvif_get_extras(queue, extras,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) &extra_count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) work_to_do);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) idx = queue->tx.req_cons;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) if (unlikely(work_to_do < 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) if (extras[XEN_NETIF_EXTRA_TYPE_MCAST_ADD - 1].type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) struct xen_netif_extra_info *extra;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) extra = &extras[XEN_NETIF_EXTRA_TYPE_MCAST_ADD - 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) ret = xenvif_mcast_add(queue->vif, extra->u.mcast.addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) make_tx_response(queue, &txreq, extra_count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) (ret == 0) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) XEN_NETIF_RSP_OKAY :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) XEN_NETIF_RSP_ERROR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) push_tx_responses(queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) if (extras[XEN_NETIF_EXTRA_TYPE_MCAST_DEL - 1].type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) struct xen_netif_extra_info *extra;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) extra = &extras[XEN_NETIF_EXTRA_TYPE_MCAST_DEL - 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) xenvif_mcast_del(queue->vif, extra->u.mcast.addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) make_tx_response(queue, &txreq, extra_count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) XEN_NETIF_RSP_OKAY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) push_tx_responses(queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) ret = xenvif_count_requests(queue, &txreq, extra_count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) txfrags, work_to_do);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) if (unlikely(ret < 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) idx += ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) if (unlikely(txreq.size < ETH_HLEN)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) netdev_dbg(queue->vif->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) "Bad packet size: %d\n", txreq.size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) xenvif_tx_err(queue, &txreq, extra_count, idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) /* No crossing a page as the payload mustn't fragment. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) if (unlikely((txreq.offset + txreq.size) > XEN_PAGE_SIZE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) netdev_err(queue->vif->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) "txreq.offset: %u, size: %u, end: %lu\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) txreq.offset, txreq.size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) (unsigned long)(txreq.offset&~XEN_PAGE_MASK) + txreq.size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) xenvif_fatal_tx_err(queue->vif);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) index = pending_index(queue->pending_cons);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) pending_idx = queue->pending_ring[index];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) data_len = (txreq.size > XEN_NETBACK_TX_COPY_LEN &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) ret < XEN_NETBK_LEGACY_SLOTS_MAX) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) XEN_NETBACK_TX_COPY_LEN : txreq.size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) skb = xenvif_alloc_skb(data_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) if (unlikely(skb == NULL)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) netdev_dbg(queue->vif->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) "Can't allocate a skb in start_xmit.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) xenvif_tx_err(queue, &txreq, extra_count, idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) skb_shinfo(skb)->nr_frags = ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) if (data_len < txreq.size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) skb_shinfo(skb)->nr_frags++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) /* At this point shinfo->nr_frags is in fact the number of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) * slots, which can be as large as XEN_NETBK_LEGACY_SLOTS_MAX.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) frag_overflow = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) nskb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) if (skb_shinfo(skb)->nr_frags > MAX_SKB_FRAGS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) frag_overflow = skb_shinfo(skb)->nr_frags - MAX_SKB_FRAGS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) BUG_ON(frag_overflow > MAX_SKB_FRAGS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) skb_shinfo(skb)->nr_frags = MAX_SKB_FRAGS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) nskb = xenvif_alloc_skb(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) if (unlikely(nskb == NULL)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) skb_shinfo(skb)->nr_frags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) xenvif_tx_err(queue, &txreq, extra_count, idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) if (net_ratelimit())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) netdev_err(queue->vif->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) "Can't allocate the frag_list skb.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) if (extras[XEN_NETIF_EXTRA_TYPE_GSO - 1].type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) struct xen_netif_extra_info *gso;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) if (xenvif_set_skb_gso(queue->vif, skb, gso)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) /* Failure in xenvif_set_skb_gso is fatal. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) skb_shinfo(skb)->nr_frags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) kfree_skb(nskb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) if (extras[XEN_NETIF_EXTRA_TYPE_HASH - 1].type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) struct xen_netif_extra_info *extra;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) enum pkt_hash_types type = PKT_HASH_TYPE_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) extra = &extras[XEN_NETIF_EXTRA_TYPE_HASH - 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) switch (extra->u.hash.type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) case _XEN_NETIF_CTRL_HASH_TYPE_IPV4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) case _XEN_NETIF_CTRL_HASH_TYPE_IPV6:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) type = PKT_HASH_TYPE_L3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) case _XEN_NETIF_CTRL_HASH_TYPE_IPV4_TCP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) case _XEN_NETIF_CTRL_HASH_TYPE_IPV6_TCP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) type = PKT_HASH_TYPE_L4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) if (type != PKT_HASH_TYPE_NONE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) skb_set_hash(skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) *(u32 *)extra->u.hash.value,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) XENVIF_TX_CB(skb)->pending_idx = pending_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) __skb_put(skb, data_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) queue->tx_copy_ops[*copy_ops].source.u.ref = txreq.gref;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) queue->tx_copy_ops[*copy_ops].source.domid = queue->vif->domid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) queue->tx_copy_ops[*copy_ops].source.offset = txreq.offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) queue->tx_copy_ops[*copy_ops].dest.u.gmfn =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) virt_to_gfn(skb->data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) queue->tx_copy_ops[*copy_ops].dest.domid = DOMID_SELF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) queue->tx_copy_ops[*copy_ops].dest.offset =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) offset_in_page(skb->data) & ~XEN_PAGE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) queue->tx_copy_ops[*copy_ops].len = data_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) queue->tx_copy_ops[*copy_ops].flags = GNTCOPY_source_gref;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) (*copy_ops)++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) if (data_len < txreq.size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) frag_set_pending_idx(&skb_shinfo(skb)->frags[0],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) pending_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) xenvif_tx_create_map_op(queue, pending_idx, &txreq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) extra_count, gop);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) gop++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) frag_set_pending_idx(&skb_shinfo(skb)->frags[0],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) INVALID_PENDING_IDX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) memcpy(&queue->pending_tx_info[pending_idx].req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) &txreq, sizeof(txreq));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) queue->pending_tx_info[pending_idx].extra_count =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) extra_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) queue->pending_cons++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) gop = xenvif_get_requests(queue, skb, txfrags, gop,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) frag_overflow, nskb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) __skb_queue_tail(&queue->tx_queue, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) queue->tx.req_cons = idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) if (((gop-queue->tx_map_ops) >= ARRAY_SIZE(queue->tx_map_ops)) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) (*copy_ops >= ARRAY_SIZE(queue->tx_copy_ops)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) (*map_ops) = gop - queue->tx_map_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) /* Consolidate skb with a frag_list into a brand new one with local pages on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) * frags. Returns 0 or -ENOMEM if can't allocate new pages.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) static int xenvif_handle_frag_list(struct xenvif_queue *queue, struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) unsigned int offset = skb_headlen(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) skb_frag_t frags[MAX_SKB_FRAGS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) int i, f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) struct ubuf_info *uarg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) struct sk_buff *nskb = skb_shinfo(skb)->frag_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) queue->stats.tx_zerocopy_sent += 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) queue->stats.tx_frag_overflow++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) xenvif_fill_frags(queue, nskb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) /* Subtract frags size, we will correct it later */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) skb->truesize -= skb->data_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) skb->len += nskb->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) skb->data_len += nskb->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) /* create a brand new frags array and coalesce there */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) for (i = 0; offset < skb->len; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) struct page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) unsigned int len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) BUG_ON(i >= MAX_SKB_FRAGS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) page = alloc_page(GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) if (!page) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) int j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) skb->truesize += skb->data_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) for (j = 0; j < i; j++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) put_page(skb_frag_page(&frags[j]));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) if (offset + PAGE_SIZE < skb->len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) len = PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) len = skb->len - offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) if (skb_copy_bits(skb, offset, page_address(page), len))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) offset += len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) __skb_frag_set_page(&frags[i], page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) skb_frag_off_set(&frags[i], 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) skb_frag_size_set(&frags[i], len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) /* Release all the original (foreign) frags. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) skb_frag_unref(skb, f);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) uarg = skb_shinfo(skb)->destructor_arg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) /* increase inflight counter to offset decrement in callback */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) atomic_inc(&queue->inflight_packets);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) uarg->callback(uarg, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) skb_shinfo(skb)->destructor_arg = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) /* Fill the skb with the new (local) frags. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) memcpy(skb_shinfo(skb)->frags, frags, i * sizeof(skb_frag_t));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) skb_shinfo(skb)->nr_frags = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) skb->truesize += i * PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) static int xenvif_tx_submit(struct xenvif_queue *queue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) struct gnttab_map_grant_ref *gop_map = queue->tx_map_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) struct gnttab_copy *gop_copy = queue->tx_copy_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) int work_done = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) while ((skb = __skb_dequeue(&queue->tx_queue)) != NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) struct xen_netif_tx_request *txp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) u16 pending_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) unsigned data_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) pending_idx = XENVIF_TX_CB(skb)->pending_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) txp = &queue->pending_tx_info[pending_idx].req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) /* Check the remap error code. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) if (unlikely(xenvif_tx_check_gop(queue, skb, &gop_map, &gop_copy))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) /* If there was an error, xenvif_tx_check_gop is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) * expected to release all the frags which were mapped,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) * so kfree_skb shouldn't do it again
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) skb_shinfo(skb)->nr_frags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) if (skb_has_frag_list(skb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) struct sk_buff *nskb =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) skb_shinfo(skb)->frag_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) skb_shinfo(nskb)->nr_frags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) data_len = skb->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) callback_param(queue, pending_idx).ctx = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) if (data_len < txp->size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) /* Append the packet payload as a fragment. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) txp->offset += data_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) txp->size -= data_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) /* Schedule a response immediately. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) xenvif_idx_release(queue, pending_idx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) XEN_NETIF_RSP_OKAY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) if (txp->flags & XEN_NETTXF_csum_blank)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) skb->ip_summed = CHECKSUM_PARTIAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) else if (txp->flags & XEN_NETTXF_data_validated)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) skb->ip_summed = CHECKSUM_UNNECESSARY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) xenvif_fill_frags(queue, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) if (unlikely(skb_has_frag_list(skb))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) struct sk_buff *nskb = skb_shinfo(skb)->frag_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) xenvif_skb_zerocopy_prepare(queue, nskb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) if (xenvif_handle_frag_list(queue, skb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) if (net_ratelimit())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) netdev_err(queue->vif->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) "Not enough memory to consolidate frag_list!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) xenvif_skb_zerocopy_prepare(queue, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) /* Copied all the bits from the frag list -- free it. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) skb_frag_list_init(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) kfree_skb(nskb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) skb->dev = queue->vif->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) skb->protocol = eth_type_trans(skb, skb->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) skb_reset_network_header(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) if (checksum_setup(queue, skb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) netdev_dbg(queue->vif->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) "Can't setup checksum in net_tx_action\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) /* We have to set this flag to trigger the callback */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) if (skb_shinfo(skb)->destructor_arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) xenvif_skb_zerocopy_prepare(queue, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) skb_probe_transport_header(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) /* If the packet is GSO then we will have just set up the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) * transport header offset in checksum_setup so it's now
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) * straightforward to calculate gso_segs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) if (skb_is_gso(skb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) int mss, hdrlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) /* GSO implies having the L4 header. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) WARN_ON_ONCE(!skb_transport_header_was_set(skb));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) if (unlikely(!skb_transport_header_was_set(skb))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) mss = skb_shinfo(skb)->gso_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) hdrlen = skb_transport_header(skb) -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) skb_mac_header(skb) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) tcp_hdrlen(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) skb_shinfo(skb)->gso_segs =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) DIV_ROUND_UP(skb->len - hdrlen, mss);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) queue->stats.rx_bytes += skb->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) queue->stats.rx_packets++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) work_done++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) /* Set this flag right before netif_receive_skb, otherwise
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) * someone might think this packet already left netback, and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) * do a skb_copy_ubufs while we are still in control of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) * skb. E.g. the __pskb_pull_tail earlier can do such thing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) if (skb_shinfo(skb)->destructor_arg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) xenvif_skb_zerocopy_prepare(queue, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) queue->stats.tx_zerocopy_sent++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) netif_receive_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) return work_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) void xenvif_zerocopy_callback(struct ubuf_info *ubuf, bool zerocopy_success)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) pending_ring_idx_t index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) struct xenvif_queue *queue = ubuf_to_queue(ubuf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) /* This is the only place where we grab this lock, to protect callbacks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) * from each other.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) spin_lock_irqsave(&queue->callback_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) u16 pending_idx = ubuf->desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) ubuf = (struct ubuf_info *) ubuf->ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) BUG_ON(queue->dealloc_prod - queue->dealloc_cons >=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) MAX_PENDING_REQS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) index = pending_index(queue->dealloc_prod);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) queue->dealloc_ring[index] = pending_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) /* Sync with xenvif_tx_dealloc_action:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) * insert idx then incr producer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) smp_wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) queue->dealloc_prod++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) } while (ubuf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) spin_unlock_irqrestore(&queue->callback_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) if (likely(zerocopy_success))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) queue->stats.tx_zerocopy_success++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) queue->stats.tx_zerocopy_fail++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) xenvif_skb_zerocopy_complete(queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) static inline void xenvif_tx_dealloc_action(struct xenvif_queue *queue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) struct gnttab_unmap_grant_ref *gop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) pending_ring_idx_t dc, dp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) u16 pending_idx, pending_idx_release[MAX_PENDING_REQS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) unsigned int i = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) dc = queue->dealloc_cons;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) gop = queue->tx_unmap_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) /* Free up any grants we have finished using */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) dp = queue->dealloc_prod;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) /* Ensure we see all indices enqueued by all
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) * xenvif_zerocopy_callback().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) smp_rmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) while (dc != dp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) BUG_ON(gop - queue->tx_unmap_ops >= MAX_PENDING_REQS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) pending_idx =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) queue->dealloc_ring[pending_index(dc++)];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) pending_idx_release[gop - queue->tx_unmap_ops] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) pending_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) queue->pages_to_unmap[gop - queue->tx_unmap_ops] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) queue->mmap_pages[pending_idx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) gnttab_set_unmap_op(gop,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) idx_to_kaddr(queue, pending_idx),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) GNTMAP_host_map,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) queue->grant_tx_handle[pending_idx]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) xenvif_grant_handle_reset(queue, pending_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) ++gop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) } while (dp != queue->dealloc_prod);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) queue->dealloc_cons = dc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) if (gop - queue->tx_unmap_ops > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) ret = gnttab_unmap_refs(queue->tx_unmap_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) queue->pages_to_unmap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) gop - queue->tx_unmap_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) netdev_err(queue->vif->dev, "Unmap fail: nr_ops %tu ret %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) gop - queue->tx_unmap_ops, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) for (i = 0; i < gop - queue->tx_unmap_ops; ++i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) if (gop[i].status != GNTST_okay)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) netdev_err(queue->vif->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) " host_addr: 0x%llx handle: 0x%x status: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) gop[i].host_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) gop[i].handle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) gop[i].status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) for (i = 0; i < gop - queue->tx_unmap_ops; ++i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) xenvif_idx_release(queue, pending_idx_release[i],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) XEN_NETIF_RSP_OKAY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) /* Called after netfront has transmitted */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) int xenvif_tx_action(struct xenvif_queue *queue, int budget)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) unsigned nr_mops, nr_cops = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) int work_done, ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) if (unlikely(!tx_work_todo(queue)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) xenvif_tx_build_gops(queue, budget, &nr_cops, &nr_mops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) if (nr_cops == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) gnttab_batch_copy(queue->tx_copy_ops, nr_cops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) if (nr_mops != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) ret = gnttab_map_refs(queue->tx_map_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) queue->pages_to_map,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) nr_mops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) netdev_err(queue->vif->dev, "Map fail: nr %u ret %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) nr_mops, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) for (i = 0; i < nr_mops; ++i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) WARN_ON_ONCE(queue->tx_map_ops[i].status ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) GNTST_okay);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) work_done = xenvif_tx_submit(queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) return work_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) static void xenvif_idx_release(struct xenvif_queue *queue, u16 pending_idx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) u8 status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) struct pending_tx_info *pending_tx_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) pending_ring_idx_t index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) pending_tx_info = &queue->pending_tx_info[pending_idx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) spin_lock_irqsave(&queue->response_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) make_tx_response(queue, &pending_tx_info->req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) pending_tx_info->extra_count, status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) /* Release the pending index before pusing the Tx response so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) * its available before a new Tx request is pushed by the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) * frontend.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) index = pending_index(queue->pending_prod++);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) queue->pending_ring[index] = pending_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) push_tx_responses(queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) spin_unlock_irqrestore(&queue->response_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) static void make_tx_response(struct xenvif_queue *queue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) struct xen_netif_tx_request *txp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) unsigned int extra_count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) s8 st)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) RING_IDX i = queue->tx.rsp_prod_pvt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) struct xen_netif_tx_response *resp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) resp = RING_GET_RESPONSE(&queue->tx, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) resp->id = txp->id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) resp->status = st;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) while (extra_count-- != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) RING_GET_RESPONSE(&queue->tx, ++i)->status = XEN_NETIF_RSP_NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) queue->tx.rsp_prod_pvt = ++i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) static void push_tx_responses(struct xenvif_queue *queue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) int notify;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&queue->tx, notify);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) if (notify)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) notify_remote_via_irq(queue->tx_irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) void xenvif_idx_unmap(struct xenvif_queue *queue, u16 pending_idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) struct gnttab_unmap_grant_ref tx_unmap_op;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) gnttab_set_unmap_op(&tx_unmap_op,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) idx_to_kaddr(queue, pending_idx),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) GNTMAP_host_map,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) queue->grant_tx_handle[pending_idx]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) xenvif_grant_handle_reset(queue, pending_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) ret = gnttab_unmap_refs(&tx_unmap_op, NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) &queue->mmap_pages[pending_idx], 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) netdev_err(queue->vif->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) "Unmap fail: ret: %d pending_idx: %d host_addr: %llx handle: 0x%x status: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) ret,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) pending_idx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) tx_unmap_op.host_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) tx_unmap_op.handle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) tx_unmap_op.status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) static inline int tx_work_todo(struct xenvif_queue *queue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) if (likely(RING_HAS_UNCONSUMED_REQUESTS(&queue->tx)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) static inline bool tx_dealloc_work_todo(struct xenvif_queue *queue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) return queue->dealloc_cons != queue->dealloc_prod;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) void xenvif_unmap_frontend_data_rings(struct xenvif_queue *queue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) if (queue->tx.sring)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(queue->vif),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) queue->tx.sring);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) if (queue->rx.sring)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(queue->vif),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) queue->rx.sring);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) int xenvif_map_frontend_data_rings(struct xenvif_queue *queue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) grant_ref_t tx_ring_ref,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) grant_ref_t rx_ring_ref)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) void *addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) struct xen_netif_tx_sring *txs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) struct xen_netif_rx_sring *rxs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) RING_IDX rsp_prod, req_prod;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) int err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) err = xenbus_map_ring_valloc(xenvif_to_xenbus_device(queue->vif),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) &tx_ring_ref, 1, &addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) txs = (struct xen_netif_tx_sring *)addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) rsp_prod = READ_ONCE(txs->rsp_prod);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) req_prod = READ_ONCE(txs->req_prod);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) BACK_RING_ATTACH(&queue->tx, txs, rsp_prod, XEN_PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) err = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) if (req_prod - rsp_prod > RING_SIZE(&queue->tx))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) err = xenbus_map_ring_valloc(xenvif_to_xenbus_device(queue->vif),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) &rx_ring_ref, 1, &addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) rxs = (struct xen_netif_rx_sring *)addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) rsp_prod = READ_ONCE(rxs->rsp_prod);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) req_prod = READ_ONCE(rxs->req_prod);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) BACK_RING_ATTACH(&queue->rx, rxs, rsp_prod, XEN_PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) err = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) if (req_prod - rsp_prod > RING_SIZE(&queue->rx))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) xenvif_unmap_frontend_data_rings(queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) static bool xenvif_dealloc_kthread_should_stop(struct xenvif_queue *queue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) /* Dealloc thread must remain running until all inflight
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) * packets complete.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) return kthread_should_stop() &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) !atomic_read(&queue->inflight_packets);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) int xenvif_dealloc_kthread(void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) struct xenvif_queue *queue = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) for (;;) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) wait_event_interruptible(queue->dealloc_wq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) tx_dealloc_work_todo(queue) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) xenvif_dealloc_kthread_should_stop(queue));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) if (xenvif_dealloc_kthread_should_stop(queue))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) xenvif_tx_dealloc_action(queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) cond_resched();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) /* Unmap anything remaining*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) if (tx_dealloc_work_todo(queue))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) xenvif_tx_dealloc_action(queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) static void make_ctrl_response(struct xenvif *vif,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) const struct xen_netif_ctrl_request *req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) u32 status, u32 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) RING_IDX idx = vif->ctrl.rsp_prod_pvt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) struct xen_netif_ctrl_response rsp = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) .id = req->id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) .type = req->type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) .status = status,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) .data = data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) *RING_GET_RESPONSE(&vif->ctrl, idx) = rsp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) vif->ctrl.rsp_prod_pvt = ++idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) static void push_ctrl_response(struct xenvif *vif)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) int notify;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&vif->ctrl, notify);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) if (notify)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) notify_remote_via_irq(vif->ctrl_irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) static void process_ctrl_request(struct xenvif *vif,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) const struct xen_netif_ctrl_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) u32 status = XEN_NETIF_CTRL_STATUS_NOT_SUPPORTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) u32 data = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) switch (req->type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) case XEN_NETIF_CTRL_TYPE_SET_HASH_ALGORITHM:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) status = xenvif_set_hash_alg(vif, req->data[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) case XEN_NETIF_CTRL_TYPE_GET_HASH_FLAGS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) status = xenvif_get_hash_flags(vif, &data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) case XEN_NETIF_CTRL_TYPE_SET_HASH_FLAGS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) status = xenvif_set_hash_flags(vif, req->data[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) case XEN_NETIF_CTRL_TYPE_SET_HASH_KEY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) status = xenvif_set_hash_key(vif, req->data[0],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) req->data[1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) case XEN_NETIF_CTRL_TYPE_GET_HASH_MAPPING_SIZE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) status = XEN_NETIF_CTRL_STATUS_SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) data = XEN_NETBK_MAX_HASH_MAPPING_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) case XEN_NETIF_CTRL_TYPE_SET_HASH_MAPPING_SIZE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) status = xenvif_set_hash_mapping_size(vif,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) req->data[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) case XEN_NETIF_CTRL_TYPE_SET_HASH_MAPPING:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) status = xenvif_set_hash_mapping(vif, req->data[0],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) req->data[1],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) req->data[2]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) make_ctrl_response(vif, req, status, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) push_ctrl_response(vif);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) static void xenvif_ctrl_action(struct xenvif *vif)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) for (;;) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) RING_IDX req_prod, req_cons;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) req_prod = vif->ctrl.sring->req_prod;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) req_cons = vif->ctrl.req_cons;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) /* Make sure we can see requests before we process them. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) rmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) if (req_cons == req_prod)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) while (req_cons != req_prod) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) struct xen_netif_ctrl_request req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) RING_COPY_REQUEST(&vif->ctrl, req_cons, &req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) req_cons++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) process_ctrl_request(vif, &req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) vif->ctrl.req_cons = req_cons;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) vif->ctrl.sring->req_event = req_cons + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) static bool xenvif_ctrl_work_todo(struct xenvif *vif)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) if (likely(RING_HAS_UNCONSUMED_REQUESTS(&vif->ctrl)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) irqreturn_t xenvif_ctrl_irq_fn(int irq, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) struct xenvif *vif = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) unsigned int eoi_flag = XEN_EOI_FLAG_SPURIOUS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) while (xenvif_ctrl_work_todo(vif)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) xenvif_ctrl_action(vif);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) eoi_flag = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) xen_irq_lateeoi(irq, eoi_flag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) static int __init netback_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) int rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) if (!xen_domain())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) /* Allow as many queues as there are CPUs but max. 8 if user has not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) * specified a value.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) if (xenvif_max_queues == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) xenvif_max_queues = min_t(unsigned int, MAX_QUEUES_DEFAULT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) num_online_cpus());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) if (fatal_skb_slots < XEN_NETBK_LEGACY_SLOTS_MAX) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) pr_info("fatal_skb_slots too small (%d), bump it to XEN_NETBK_LEGACY_SLOTS_MAX (%d)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) fatal_skb_slots, XEN_NETBK_LEGACY_SLOTS_MAX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) fatal_skb_slots = XEN_NETBK_LEGACY_SLOTS_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) rc = xenvif_xenbus_init();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) goto failed_init;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) #ifdef CONFIG_DEBUG_FS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) xen_netback_dbg_root = debugfs_create_dir("xen-netback", NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) #endif /* CONFIG_DEBUG_FS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) failed_init:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) module_init(netback_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) static void __exit netback_fini(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) #ifdef CONFIG_DEBUG_FS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) debugfs_remove_recursive(xen_netback_dbg_root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) #endif /* CONFIG_DEBUG_FS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) xenvif_xenbus_fini();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) module_exit(netback_fini);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) MODULE_LICENSE("Dual BSD/GPL");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) MODULE_ALIAS("xen-backend:vif");