^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) * This program is free software; you can redistribute it and/or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * modify it under the terms of the GNU General Public License version 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * as published by the Free Software Foundation; or, when distributed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * separately from the Linux kernel or incorporated into other
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * software packages, subject to the following license:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * Permission is hereby granted, free of charge, to any person obtaining a copy
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * of this source file (the "Software"), to deal in the Software without
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * restriction, including without limitation the rights to use, copy, modify,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) * merge, publish, distribute, sublicense, and/or sell copies of the Software,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) * and to permit persons to whom the Software is furnished to do so, subject to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) * the following conditions:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) * The above copyright notice and this permission notice shall be included in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) * all copies or substantial portions of the Software.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) * IN THE SOFTWARE.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #ifndef __XEN_NETBACK__COMMON_H__
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #define __XEN_NETBACK__COMMON_H__
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #include <linux/ip.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #include <linux/in.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #include <linux/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #include <linux/netdevice.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) #include <linux/etherdevice.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) #include <linux/wait.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) #include <linux/sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) #include <xen/interface/io/netif.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) #include <xen/interface/grant_table.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) #include <xen/grant_table.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) #include <xen/xenbus.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) #include <xen/page.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) #include <linux/debugfs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) typedef unsigned int pending_ring_idx_t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) #define INVALID_PENDING_RING_IDX (~0U)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) struct pending_tx_info {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) struct xen_netif_tx_request req; /* tx request */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) unsigned int extra_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) /* Callback data for released SKBs. The callback is always
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) * xenvif_zerocopy_callback, desc contains the pending_idx, which is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) * also an index in pending_tx_info array. It is initialized in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) * xenvif_alloc and it never changes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) * skb_shinfo(skb)->destructor_arg points to the first mapped slot's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) * callback_struct in this array of struct pending_tx_info's, then ctx
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) * to the next, or NULL if there is no more slot for this skb.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) * ubuf_to_vif is a helper which finds the struct xenvif from a pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) * to this field.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) struct ubuf_info callback_struct;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) #define XEN_NETIF_TX_RING_SIZE __CONST_RING_SIZE(xen_netif_tx, XEN_PAGE_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) #define XEN_NETIF_RX_RING_SIZE __CONST_RING_SIZE(xen_netif_rx, XEN_PAGE_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) struct xenvif_rx_meta {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) int id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) int size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) int gso_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) int gso_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) #define GSO_BIT(type) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) (1 << XEN_NETIF_GSO_TYPE_ ## type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) /* Discriminate from any valid pending_idx value. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) #define INVALID_PENDING_IDX 0xFFFF
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) #define MAX_BUFFER_OFFSET XEN_PAGE_SIZE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) #define MAX_PENDING_REQS XEN_NETIF_TX_RING_SIZE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) /* The maximum number of frags is derived from the size of a grant (same
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) * as a Xen page size for now).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) #define MAX_XEN_SKB_FRAGS (65536 / XEN_PAGE_SIZE + 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) #define NETBACK_INVALID_HANDLE -1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) /* To avoid confusion, we define XEN_NETBK_LEGACY_SLOTS_MAX indicating
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) * the maximum slots a valid packet can use. Now this value is defined
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) * to be XEN_NETIF_NR_SLOTS_MIN, which is supposed to be supported by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) * all backend.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) #define XEN_NETBK_LEGACY_SLOTS_MAX XEN_NETIF_NR_SLOTS_MIN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) /* Queue name is interface name with "-qNNN" appended */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) #define QUEUE_NAME_SIZE (IFNAMSIZ + 5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) /* IRQ name is queue name with "-tx" or "-rx" appended */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) #define IRQ_NAME_SIZE (QUEUE_NAME_SIZE + 3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) struct xenvif;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) struct xenvif_stats {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) /* Stats fields to be updated per-queue.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) * A subset of struct net_device_stats that contains only the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) * fields that are updated in netback.c for each queue.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) u64 rx_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) u64 rx_packets;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) u64 tx_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) u64 tx_packets;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) /* Additional stats used by xenvif */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) unsigned long rx_gso_checksum_fixup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) unsigned long tx_zerocopy_sent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) unsigned long tx_zerocopy_success;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) unsigned long tx_zerocopy_fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) unsigned long tx_frag_overflow;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) #define COPY_BATCH_SIZE 64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) struct xenvif_copy_state {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) struct gnttab_copy op[COPY_BATCH_SIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) RING_IDX idx[COPY_BATCH_SIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) unsigned int num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) struct sk_buff_head *completed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) struct xenvif_queue { /* Per-queue data for xenvif */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) unsigned int id; /* Queue ID, 0-based */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) char name[QUEUE_NAME_SIZE]; /* DEVNAME-qN */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) struct xenvif *vif; /* Parent VIF */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) * TX/RX common EOI handling.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) * When feature-split-event-channels = 0, interrupt handler sets
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) * NETBK_COMMON_EOI, otherwise NETBK_RX_EOI and NETBK_TX_EOI are set
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) * by the RX and TX interrupt handlers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) * RX and TX handler threads will issue an EOI when either
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) * NETBK_COMMON_EOI or their specific bits (NETBK_RX_EOI or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) * NETBK_TX_EOI) are set and they will reset those bits.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) atomic_t eoi_pending;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) #define NETBK_RX_EOI 0x01
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) #define NETBK_TX_EOI 0x02
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) #define NETBK_COMMON_EOI 0x04
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) /* Use NAPI for guest TX */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) struct napi_struct napi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) /* When feature-split-event-channels = 0, tx_irq = rx_irq. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) unsigned int tx_irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) /* Only used when feature-split-event-channels = 1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) char tx_irq_name[IRQ_NAME_SIZE]; /* DEVNAME-qN-tx */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) struct xen_netif_tx_back_ring tx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) struct sk_buff_head tx_queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) struct page *mmap_pages[MAX_PENDING_REQS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) pending_ring_idx_t pending_prod;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) pending_ring_idx_t pending_cons;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) u16 pending_ring[MAX_PENDING_REQS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) struct pending_tx_info pending_tx_info[MAX_PENDING_REQS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) grant_handle_t grant_tx_handle[MAX_PENDING_REQS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) struct gnttab_copy tx_copy_ops[MAX_PENDING_REQS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) struct gnttab_map_grant_ref tx_map_ops[MAX_PENDING_REQS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) struct gnttab_unmap_grant_ref tx_unmap_ops[MAX_PENDING_REQS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) /* passed to gnttab_[un]map_refs with pages under (un)mapping */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) struct page *pages_to_map[MAX_PENDING_REQS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) struct page *pages_to_unmap[MAX_PENDING_REQS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) /* This prevents zerocopy callbacks to race over dealloc_ring */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) spinlock_t callback_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) /* This prevents dealloc thread and NAPI instance to race over response
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) * creation and pending_ring in xenvif_idx_release. In xenvif_tx_err
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) * it only protect response creation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) spinlock_t response_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) pending_ring_idx_t dealloc_prod;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) pending_ring_idx_t dealloc_cons;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) u16 dealloc_ring[MAX_PENDING_REQS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) struct task_struct *dealloc_task;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) wait_queue_head_t dealloc_wq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) atomic_t inflight_packets;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) /* Use kthread for guest RX */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) struct task_struct *task;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) wait_queue_head_t wq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) /* When feature-split-event-channels = 0, tx_irq = rx_irq. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) unsigned int rx_irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) /* Only used when feature-split-event-channels = 1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) char rx_irq_name[IRQ_NAME_SIZE]; /* DEVNAME-qN-rx */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) struct xen_netif_rx_back_ring rx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) struct sk_buff_head rx_queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) unsigned int rx_queue_max;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) unsigned int rx_queue_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) unsigned long last_rx_time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) unsigned int rx_slots_needed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) bool stalled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) struct xenvif_copy_state rx_copy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) /* Transmit shaping: allow 'credit_bytes' every 'credit_usec'. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) unsigned long credit_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) unsigned long credit_usec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) unsigned long remaining_credit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) struct timer_list credit_timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) u64 credit_window_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) bool rate_limited;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) /* Statistics */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) struct xenvif_stats stats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) enum state_bit_shift {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) /* This bit marks that the vif is connected */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) VIF_STATUS_CONNECTED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) struct xenvif_mcast_addr {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) struct list_head entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) struct rcu_head rcu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) u8 addr[6];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) #define XEN_NETBK_MCAST_MAX 64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) #define XEN_NETBK_MAX_HASH_KEY_SIZE 40
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) #define XEN_NETBK_MAX_HASH_MAPPING_SIZE 128
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) #define XEN_NETBK_HASH_TAG_SIZE 40
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) struct xenvif_hash_cache_entry {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) struct list_head link;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) struct rcu_head rcu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) u8 tag[XEN_NETBK_HASH_TAG_SIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) unsigned int len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) int seq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) struct xenvif_hash_cache {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) spinlock_t lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) struct list_head list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) unsigned int count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) atomic_t seq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) struct xenvif_hash {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) unsigned int alg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) u32 flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) bool mapping_sel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) u8 key[XEN_NETBK_MAX_HASH_KEY_SIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) u32 mapping[2][XEN_NETBK_MAX_HASH_MAPPING_SIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) unsigned int size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) struct xenvif_hash_cache cache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) struct backend_info {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) struct xenbus_device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) struct xenvif *vif;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) /* This is the state that will be reflected in xenstore when any
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) * active hotplug script completes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) enum xenbus_state state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) enum xenbus_state frontend_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) struct xenbus_watch hotplug_status_watch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) u8 have_hotplug_status_watch:1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) const char *hotplug_script;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) struct xenvif {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) /* Unique identifier for this interface. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) domid_t domid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) unsigned int handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) u8 fe_dev_addr[6];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) struct list_head fe_mcast_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) unsigned int fe_mcast_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) /* Frontend feature information. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) int gso_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) u8 can_sg:1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) u8 ip_csum:1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) u8 ipv6_csum:1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) u8 multicast_control:1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) /* headroom requested by xen-netfront */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) u16 xdp_headroom;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) /* Is this interface disabled? True when backend discovers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) * frontend is rogue.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) bool disabled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) unsigned long status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) unsigned long drain_timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) unsigned long stall_timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) /* Queues */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) struct xenvif_queue *queues;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) unsigned int num_queues; /* active queues, resource allocated */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) unsigned int stalled_queues;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) struct xenvif_hash hash;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) struct xenbus_watch credit_watch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) struct xenbus_watch mcast_ctrl_watch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) struct backend_info *be;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) spinlock_t lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) #ifdef CONFIG_DEBUG_FS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) struct dentry *xenvif_dbg_root;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) struct xen_netif_ctrl_back_ring ctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) unsigned int ctrl_irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) /* Miscellaneous private stuff. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) struct net_device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) struct xenvif_rx_cb {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) unsigned long expires;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) int meta_slots_used;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) #define XENVIF_RX_CB(skb) ((struct xenvif_rx_cb *)(skb)->cb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) static inline struct xenbus_device *xenvif_to_xenbus_device(struct xenvif *vif)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) return to_xenbus_device(vif->dev->dev.parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) void xenvif_tx_credit_callback(struct timer_list *t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) struct xenvif *xenvif_alloc(struct device *parent,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) domid_t domid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) unsigned int handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) int xenvif_init_queue(struct xenvif_queue *queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) void xenvif_deinit_queue(struct xenvif_queue *queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) int xenvif_connect_data(struct xenvif_queue *queue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) unsigned long tx_ring_ref,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) unsigned long rx_ring_ref,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) unsigned int tx_evtchn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) unsigned int rx_evtchn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) void xenvif_disconnect_data(struct xenvif *vif);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) int xenvif_connect_ctrl(struct xenvif *vif, grant_ref_t ring_ref,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) unsigned int evtchn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) void xenvif_disconnect_ctrl(struct xenvif *vif);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) void xenvif_free(struct xenvif *vif);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) int xenvif_xenbus_init(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) void xenvif_xenbus_fini(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) int xenvif_schedulable(struct xenvif *vif);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) int xenvif_queue_stopped(struct xenvif_queue *queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) void xenvif_wake_queue(struct xenvif_queue *queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) /* (Un)Map communication rings. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) void xenvif_unmap_frontend_data_rings(struct xenvif_queue *queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) int xenvif_map_frontend_data_rings(struct xenvif_queue *queue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) grant_ref_t tx_ring_ref,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) grant_ref_t rx_ring_ref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) /* Check for SKBs from frontend and schedule backend processing */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) void xenvif_napi_schedule_or_enable_events(struct xenvif_queue *queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) /* Prevent the device from generating any further traffic. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) void xenvif_carrier_off(struct xenvif *vif);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) int xenvif_tx_action(struct xenvif_queue *queue, int budget);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) int xenvif_kthread_guest_rx(void *data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) void xenvif_kick_thread(struct xenvif_queue *queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) int xenvif_dealloc_kthread(void *data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) irqreturn_t xenvif_ctrl_irq_fn(int irq, void *data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) bool xenvif_have_rx_work(struct xenvif_queue *queue, bool test_kthread);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) void xenvif_rx_action(struct xenvif_queue *queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) void xenvif_rx_queue_tail(struct xenvif_queue *queue, struct sk_buff *skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) void xenvif_carrier_on(struct xenvif *vif);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) /* Callback from stack when TX packet can be released */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) void xenvif_zerocopy_callback(struct ubuf_info *ubuf, bool zerocopy_success);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) /* Unmap a pending page and release it back to the guest */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) void xenvif_idx_unmap(struct xenvif_queue *queue, u16 pending_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) static inline pending_ring_idx_t nr_pending_reqs(struct xenvif_queue *queue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) return MAX_PENDING_REQS -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) queue->pending_prod + queue->pending_cons;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) irqreturn_t xenvif_interrupt(int irq, void *dev_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) extern bool separate_tx_rx_irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) extern bool provides_xdp_headroom;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) extern unsigned int rx_drain_timeout_msecs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) extern unsigned int rx_stall_timeout_msecs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) extern unsigned int xenvif_max_queues;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) extern unsigned int xenvif_hash_cache_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) #ifdef CONFIG_DEBUG_FS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) extern struct dentry *xen_netback_dbg_root;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) void xenvif_skb_zerocopy_prepare(struct xenvif_queue *queue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) struct sk_buff *skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) void xenvif_skb_zerocopy_complete(struct xenvif_queue *queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) /* Multicast control */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) bool xenvif_mcast_match(struct xenvif *vif, const u8 *addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) void xenvif_mcast_addr_list_free(struct xenvif *vif);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) /* Hash */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) void xenvif_init_hash(struct xenvif *vif);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) void xenvif_deinit_hash(struct xenvif *vif);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) u32 xenvif_set_hash_alg(struct xenvif *vif, u32 alg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) u32 xenvif_get_hash_flags(struct xenvif *vif, u32 *flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) u32 xenvif_set_hash_flags(struct xenvif *vif, u32 flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) u32 xenvif_set_hash_key(struct xenvif *vif, u32 gref, u32 len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) u32 xenvif_set_hash_mapping_size(struct xenvif *vif, u32 size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) u32 xenvif_set_hash_mapping(struct xenvif *vif, u32 gref, u32 len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) u32 off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) void xenvif_set_skb_hash(struct xenvif *vif, struct sk_buff *skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) #ifdef CONFIG_DEBUG_FS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) void xenvif_dump_hash_info(struct xenvif *vif, struct seq_file *m);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) #endif /* __XEN_NETBACK__COMMON_H__ */