Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    1) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    2)  * Virtual network driver for conversing with remote driver backends.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    3)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    4)  * Copyright (c) 2002-2005, K A Fraser
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    5)  * Copyright (c) 2005, XenSource Ltd
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    6)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    7)  * This program is free software; you can redistribute it and/or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    8)  * modify it under the terms of the GNU General Public License version 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    9)  * as published by the Free Software Foundation; or, when distributed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   10)  * separately from the Linux kernel or incorporated into other
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   11)  * software packages, subject to the following license:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   12)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   13)  * Permission is hereby granted, free of charge, to any person obtaining a copy
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   14)  * of this source file (the "Software"), to deal in the Software without
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   15)  * restriction, including without limitation the rights to use, copy, modify,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   16)  * merge, publish, distribute, sublicense, and/or sell copies of the Software,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   17)  * and to permit persons to whom the Software is furnished to do so, subject to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   18)  * the following conditions:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   19)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   20)  * The above copyright notice and this permission notice shall be included in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   21)  * all copies or substantial portions of the Software.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   22)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   23)  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   24)  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   25)  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   26)  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   27)  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   28)  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   29)  * IN THE SOFTWARE.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   30)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   31) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   32) #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   33) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   34) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   35) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   36) #include <linux/netdevice.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   37) #include <linux/etherdevice.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   38) #include <linux/skbuff.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   39) #include <linux/ethtool.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   40) #include <linux/if_ether.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   41) #include <net/tcp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   42) #include <linux/udp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   43) #include <linux/moduleparam.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   44) #include <linux/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   45) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   46) #include <net/ip.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   47) #include <linux/bpf.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   48) #include <net/page_pool.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   49) #include <linux/bpf_trace.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   50) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   51) #include <xen/xen.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   52) #include <xen/xenbus.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   53) #include <xen/events.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   54) #include <xen/page.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   55) #include <xen/platform_pci.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   56) #include <xen/grant_table.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   57) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   58) #include <xen/interface/io/netif.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   59) #include <xen/interface/memory.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   60) #include <xen/interface/grant_table.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   61) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   62) /* Module parameters */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   63) #define MAX_QUEUES_DEFAULT 8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   64) static unsigned int xennet_max_queues;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   65) module_param_named(max_queues, xennet_max_queues, uint, 0644);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   66) MODULE_PARM_DESC(max_queues,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   67) 		 "Maximum number of queues per virtual interface");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   68) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   69) #define XENNET_TIMEOUT  (5 * HZ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   70) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   71) static const struct ethtool_ops xennet_ethtool_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   72) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   73) struct netfront_cb {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   74) 	int pull_to;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   75) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   76) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   77) #define NETFRONT_SKB_CB(skb)	((struct netfront_cb *)((skb)->cb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   78) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   79) #define RX_COPY_THRESHOLD 256
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   80) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   81) #define GRANT_INVALID_REF	0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   82) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   83) #define NET_TX_RING_SIZE __CONST_RING_SIZE(xen_netif_tx, XEN_PAGE_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   84) #define NET_RX_RING_SIZE __CONST_RING_SIZE(xen_netif_rx, XEN_PAGE_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   85) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   86) /* Minimum number of Rx slots (includes slot for GSO metadata). */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   87) #define NET_RX_SLOTS_MIN (XEN_NETIF_NR_SLOTS_MIN + 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   88) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   89) /* Queue name is interface name with "-qNNN" appended */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   90) #define QUEUE_NAME_SIZE (IFNAMSIZ + 6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   91) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   92) /* IRQ name is queue name with "-tx" or "-rx" appended */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   93) #define IRQ_NAME_SIZE (QUEUE_NAME_SIZE + 3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   94) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   95) static DECLARE_WAIT_QUEUE_HEAD(module_wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   96) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   97) struct netfront_stats {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   98) 	u64			packets;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   99) 	u64			bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  100) 	struct u64_stats_sync	syncp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  101) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  102) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  103) struct netfront_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  104) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  105) struct netfront_queue {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  106) 	unsigned int id; /* Queue ID, 0-based */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  107) 	char name[QUEUE_NAME_SIZE]; /* DEVNAME-qN */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  108) 	struct netfront_info *info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  109) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  110) 	struct bpf_prog __rcu *xdp_prog;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  111) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  112) 	struct napi_struct napi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  113) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  114) 	/* Split event channels support, tx_* == rx_* when using
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  115) 	 * single event channel.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  116) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  117) 	unsigned int tx_evtchn, rx_evtchn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  118) 	unsigned int tx_irq, rx_irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  119) 	/* Only used when split event channels support is enabled */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  120) 	char tx_irq_name[IRQ_NAME_SIZE]; /* DEVNAME-qN-tx */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  121) 	char rx_irq_name[IRQ_NAME_SIZE]; /* DEVNAME-qN-rx */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  122) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  123) 	spinlock_t   tx_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  124) 	struct xen_netif_tx_front_ring tx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  125) 	int tx_ring_ref;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  126) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  127) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  128) 	 * {tx,rx}_skbs store outstanding skbuffs. Free tx_skb entries
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  129) 	 * are linked from tx_skb_freelist through tx_link.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  130) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  131) 	struct sk_buff *tx_skbs[NET_TX_RING_SIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  132) 	unsigned short tx_link[NET_TX_RING_SIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  133) #define TX_LINK_NONE 0xffff
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  134) #define TX_PENDING   0xfffe
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  135) 	grant_ref_t gref_tx_head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  136) 	grant_ref_t grant_tx_ref[NET_TX_RING_SIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  137) 	struct page *grant_tx_page[NET_TX_RING_SIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  138) 	unsigned tx_skb_freelist;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  139) 	unsigned int tx_pend_queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  140) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  141) 	spinlock_t   rx_lock ____cacheline_aligned_in_smp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  142) 	struct xen_netif_rx_front_ring rx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  143) 	int rx_ring_ref;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  144) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  145) 	struct timer_list rx_refill_timer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  146) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  147) 	struct sk_buff *rx_skbs[NET_RX_RING_SIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  148) 	grant_ref_t gref_rx_head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  149) 	grant_ref_t grant_rx_ref[NET_RX_RING_SIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  150) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  151) 	unsigned int rx_rsp_unconsumed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  152) 	spinlock_t rx_cons_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  153) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  154) 	struct page_pool *page_pool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  155) 	struct xdp_rxq_info xdp_rxq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  156) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  157) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  158) struct netfront_info {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  159) 	struct list_head list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  160) 	struct net_device *netdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  161) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  162) 	struct xenbus_device *xbdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  163) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  164) 	/* Multi-queue support */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  165) 	struct netfront_queue *queues;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  166) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  167) 	/* Statistics */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  168) 	struct netfront_stats __percpu *rx_stats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  169) 	struct netfront_stats __percpu *tx_stats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  170) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  171) 	/* XDP state */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  172) 	bool netback_has_xdp_headroom;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  173) 	bool netfront_xdp_enabled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  174) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  175) 	/* Is device behaving sane? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  176) 	bool broken;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  177) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  178) 	atomic_t rx_gso_checksum_fixup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  179) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  180) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  181) struct netfront_rx_info {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  182) 	struct xen_netif_rx_response rx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  183) 	struct xen_netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX - 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  184) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  185) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  186) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  187)  * Access macros for acquiring freeing slots in tx_skbs[].
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  188)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  189) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  190) static void add_id_to_list(unsigned *head, unsigned short *list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  191) 			   unsigned short id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  192) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  193) 	list[id] = *head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  194) 	*head = id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  195) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  196) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  197) static unsigned short get_id_from_list(unsigned *head, unsigned short *list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  198) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  199) 	unsigned int id = *head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  200) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  201) 	if (id != TX_LINK_NONE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  202) 		*head = list[id];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  203) 		list[id] = TX_LINK_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  204) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  205) 	return id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  206) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  207) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  208) static int xennet_rxidx(RING_IDX idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  209) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  210) 	return idx & (NET_RX_RING_SIZE - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  211) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  212) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  213) static struct sk_buff *xennet_get_rx_skb(struct netfront_queue *queue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  214) 					 RING_IDX ri)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  215) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  216) 	int i = xennet_rxidx(ri);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  217) 	struct sk_buff *skb = queue->rx_skbs[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  218) 	queue->rx_skbs[i] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  219) 	return skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  220) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  221) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  222) static grant_ref_t xennet_get_rx_ref(struct netfront_queue *queue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  223) 					    RING_IDX ri)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  224) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  225) 	int i = xennet_rxidx(ri);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  226) 	grant_ref_t ref = queue->grant_rx_ref[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  227) 	queue->grant_rx_ref[i] = GRANT_INVALID_REF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  228) 	return ref;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  229) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  230) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  231) #ifdef CONFIG_SYSFS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  232) static const struct attribute_group xennet_dev_group;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  233) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  234) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  235) static bool xennet_can_sg(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  236) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  237) 	return dev->features & NETIF_F_SG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  238) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  239) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  240) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  241) static void rx_refill_timeout(struct timer_list *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  242) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  243) 	struct netfront_queue *queue = from_timer(queue, t, rx_refill_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  244) 	napi_schedule(&queue->napi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  245) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  246) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  247) static int netfront_tx_slot_available(struct netfront_queue *queue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  248) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  249) 	return (queue->tx.req_prod_pvt - queue->tx.rsp_cons) <
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  250) 		(NET_TX_RING_SIZE - XEN_NETIF_NR_SLOTS_MIN - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  251) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  252) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  253) static void xennet_maybe_wake_tx(struct netfront_queue *queue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  254) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  255) 	struct net_device *dev = queue->info->netdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  256) 	struct netdev_queue *dev_queue = netdev_get_tx_queue(dev, queue->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  257) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  258) 	if (unlikely(netif_tx_queue_stopped(dev_queue)) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  259) 	    netfront_tx_slot_available(queue) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  260) 	    likely(netif_running(dev)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  261) 		netif_tx_wake_queue(netdev_get_tx_queue(dev, queue->id));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  262) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  263) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  264) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  265) static struct sk_buff *xennet_alloc_one_rx_buffer(struct netfront_queue *queue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  266) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  267) 	struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  268) 	struct page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  269) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  270) 	skb = __netdev_alloc_skb(queue->info->netdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  271) 				 RX_COPY_THRESHOLD + NET_IP_ALIGN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  272) 				 GFP_ATOMIC | __GFP_NOWARN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  273) 	if (unlikely(!skb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  274) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  275) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  276) 	page = page_pool_dev_alloc_pages(queue->page_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  277) 	if (unlikely(!page)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  278) 		kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  279) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  280) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  281) 	skb_add_rx_frag(skb, 0, page, 0, 0, PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  282) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  283) 	/* Align ip header to a 16 bytes boundary */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  284) 	skb_reserve(skb, NET_IP_ALIGN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  285) 	skb->dev = queue->info->netdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  286) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  287) 	return skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  288) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  289) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  290) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  291) static void xennet_alloc_rx_buffers(struct netfront_queue *queue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  292) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  293) 	RING_IDX req_prod = queue->rx.req_prod_pvt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  294) 	int notify;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  295) 	int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  296) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  297) 	if (unlikely(!netif_carrier_ok(queue->info->netdev)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  298) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  299) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  300) 	for (req_prod = queue->rx.req_prod_pvt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  301) 	     req_prod - queue->rx.rsp_cons < NET_RX_RING_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  302) 	     req_prod++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  303) 		struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  304) 		unsigned short id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  305) 		grant_ref_t ref;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  306) 		struct page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  307) 		struct xen_netif_rx_request *req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  308) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  309) 		skb = xennet_alloc_one_rx_buffer(queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  310) 		if (!skb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  311) 			err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  312) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  313) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  314) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  315) 		id = xennet_rxidx(req_prod);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  316) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  317) 		BUG_ON(queue->rx_skbs[id]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  318) 		queue->rx_skbs[id] = skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  319) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  320) 		ref = gnttab_claim_grant_reference(&queue->gref_rx_head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  321) 		WARN_ON_ONCE(IS_ERR_VALUE((unsigned long)(int)ref));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  322) 		queue->grant_rx_ref[id] = ref;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  323) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  324) 		page = skb_frag_page(&skb_shinfo(skb)->frags[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  325) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  326) 		req = RING_GET_REQUEST(&queue->rx, req_prod);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  327) 		gnttab_page_grant_foreign_access_ref_one(ref,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  328) 							 queue->info->xbdev->otherend_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  329) 							 page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  330) 							 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  331) 		req->id = id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  332) 		req->gref = ref;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  333) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  334) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  335) 	queue->rx.req_prod_pvt = req_prod;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  336) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  337) 	/* Try again later if there are not enough requests or skb allocation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  338) 	 * failed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  339) 	 * Enough requests is quantified as the sum of newly created slots and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  340) 	 * the unconsumed slots at the backend.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  341) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  342) 	if (req_prod - queue->rx.rsp_cons < NET_RX_SLOTS_MIN ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  343) 	    unlikely(err)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  344) 		mod_timer(&queue->rx_refill_timer, jiffies + (HZ/10));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  345) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  346) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  347) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  348) 	RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&queue->rx, notify);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  349) 	if (notify)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  350) 		notify_remote_via_irq(queue->rx_irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  351) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  352) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  353) static int xennet_open(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  354) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  355) 	struct netfront_info *np = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  356) 	unsigned int num_queues = dev->real_num_tx_queues;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  357) 	unsigned int i = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  358) 	struct netfront_queue *queue = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  359) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  360) 	if (!np->queues || np->broken)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  361) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  362) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  363) 	for (i = 0; i < num_queues; ++i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  364) 		queue = &np->queues[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  365) 		napi_enable(&queue->napi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  366) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  367) 		spin_lock_bh(&queue->rx_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  368) 		if (netif_carrier_ok(dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  369) 			xennet_alloc_rx_buffers(queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  370) 			queue->rx.sring->rsp_event = queue->rx.rsp_cons + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  371) 			if (RING_HAS_UNCONSUMED_RESPONSES(&queue->rx))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  372) 				napi_schedule(&queue->napi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  373) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  374) 		spin_unlock_bh(&queue->rx_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  375) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  376) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  377) 	netif_tx_start_all_queues(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  378) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  379) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  380) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  381) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  382) static bool xennet_tx_buf_gc(struct netfront_queue *queue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  383) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  384) 	RING_IDX cons, prod;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  385) 	unsigned short id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  386) 	struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  387) 	bool more_to_do;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  388) 	bool work_done = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  389) 	const struct device *dev = &queue->info->netdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  390) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  391) 	BUG_ON(!netif_carrier_ok(queue->info->netdev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  392) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  393) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  394) 		prod = queue->tx.sring->rsp_prod;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  395) 		if (RING_RESPONSE_PROD_OVERFLOW(&queue->tx, prod)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  396) 			dev_alert(dev, "Illegal number of responses %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  397) 				  prod - queue->tx.rsp_cons);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  398) 			goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  399) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  400) 		rmb(); /* Ensure we see responses up to 'rp'. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  401) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  402) 		for (cons = queue->tx.rsp_cons; cons != prod; cons++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  403) 			struct xen_netif_tx_response txrsp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  404) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  405) 			work_done = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  406) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  407) 			RING_COPY_RESPONSE(&queue->tx, cons, &txrsp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  408) 			if (txrsp.status == XEN_NETIF_RSP_NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  409) 				continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  410) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  411) 			id = txrsp.id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  412) 			if (id >= RING_SIZE(&queue->tx)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  413) 				dev_alert(dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  414) 					  "Response has incorrect id (%u)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  415) 					  id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  416) 				goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  417) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  418) 			if (queue->tx_link[id] != TX_PENDING) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  419) 				dev_alert(dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  420) 					  "Response for inactive request\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  421) 				goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  422) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  423) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  424) 			queue->tx_link[id] = TX_LINK_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  425) 			skb = queue->tx_skbs[id];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  426) 			queue->tx_skbs[id] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  427) 			if (unlikely(!gnttab_end_foreign_access_ref(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  428) 				queue->grant_tx_ref[id], GNTMAP_readonly))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  429) 				dev_alert(dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  430) 					  "Grant still in use by backend domain\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  431) 				goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  432) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  433) 			gnttab_release_grant_reference(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  434) 				&queue->gref_tx_head, queue->grant_tx_ref[id]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  435) 			queue->grant_tx_ref[id] = GRANT_INVALID_REF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  436) 			queue->grant_tx_page[id] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  437) 			add_id_to_list(&queue->tx_skb_freelist, queue->tx_link, id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  438) 			dev_kfree_skb_irq(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  439) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  440) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  441) 		queue->tx.rsp_cons = prod;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  442) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  443) 		RING_FINAL_CHECK_FOR_RESPONSES(&queue->tx, more_to_do);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  444) 	} while (more_to_do);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  445) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  446) 	xennet_maybe_wake_tx(queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  447) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  448) 	return work_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  449) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  450)  err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  451) 	queue->info->broken = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  452) 	dev_alert(dev, "Disabled for further use\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  453) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  454) 	return work_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  455) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  456) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  457) struct xennet_gnttab_make_txreq {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  458) 	struct netfront_queue *queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  459) 	struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  460) 	struct page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  461) 	struct xen_netif_tx_request *tx;      /* Last request on ring page */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  462) 	struct xen_netif_tx_request tx_local; /* Last request local copy*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  463) 	unsigned int size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  464) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  465) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  466) static void xennet_tx_setup_grant(unsigned long gfn, unsigned int offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  467) 				  unsigned int len, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  468) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  469) 	struct xennet_gnttab_make_txreq *info = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  470) 	unsigned int id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  471) 	struct xen_netif_tx_request *tx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  472) 	grant_ref_t ref;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  473) 	/* convenient aliases */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  474) 	struct page *page = info->page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  475) 	struct netfront_queue *queue = info->queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  476) 	struct sk_buff *skb = info->skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  477) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  478) 	id = get_id_from_list(&queue->tx_skb_freelist, queue->tx_link);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  479) 	tx = RING_GET_REQUEST(&queue->tx, queue->tx.req_prod_pvt++);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  480) 	ref = gnttab_claim_grant_reference(&queue->gref_tx_head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  481) 	WARN_ON_ONCE(IS_ERR_VALUE((unsigned long)(int)ref));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  482) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  483) 	gnttab_grant_foreign_access_ref(ref, queue->info->xbdev->otherend_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  484) 					gfn, GNTMAP_readonly);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  485) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  486) 	queue->tx_skbs[id] = skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  487) 	queue->grant_tx_page[id] = page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  488) 	queue->grant_tx_ref[id] = ref;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  489) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  490) 	info->tx_local.id = id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  491) 	info->tx_local.gref = ref;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  492) 	info->tx_local.offset = offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  493) 	info->tx_local.size = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  494) 	info->tx_local.flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  495) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  496) 	*tx = info->tx_local;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  497) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  498) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  499) 	 * Put the request in the pending queue, it will be set to be pending
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  500) 	 * when the producer index is about to be raised.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  501) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  502) 	add_id_to_list(&queue->tx_pend_queue, queue->tx_link, id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  503) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  504) 	info->tx = tx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  505) 	info->size += info->tx_local.size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  506) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  507) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  508) static struct xen_netif_tx_request *xennet_make_first_txreq(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  509) 	struct xennet_gnttab_make_txreq *info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  510) 	unsigned int offset, unsigned int len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  511) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  512) 	info->size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  513) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  514) 	gnttab_for_one_grant(info->page, offset, len, xennet_tx_setup_grant, info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  515) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  516) 	return info->tx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  517) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  518) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  519) static void xennet_make_one_txreq(unsigned long gfn, unsigned int offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  520) 				  unsigned int len, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  521) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  522) 	struct xennet_gnttab_make_txreq *info = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  523) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  524) 	info->tx->flags |= XEN_NETTXF_more_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  525) 	skb_get(info->skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  526) 	xennet_tx_setup_grant(gfn, offset, len, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  527) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  528) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  529) static void xennet_make_txreqs(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  530) 	struct xennet_gnttab_make_txreq *info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  531) 	struct page *page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  532) 	unsigned int offset, unsigned int len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  533) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  534) 	/* Skip unused frames from start of page */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  535) 	page += offset >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  536) 	offset &= ~PAGE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  537) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  538) 	while (len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  539) 		info->page = page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  540) 		info->size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  541) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  542) 		gnttab_foreach_grant_in_range(page, offset, len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  543) 					      xennet_make_one_txreq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  544) 					      info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  545) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  546) 		page++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  547) 		offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  548) 		len -= info->size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  549) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  550) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  551) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  552) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  553)  * Count how many ring slots are required to send this skb. Each frag
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  554)  * might be a compound page.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  555)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  556) static int xennet_count_skb_slots(struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  557) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  558) 	int i, frags = skb_shinfo(skb)->nr_frags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  559) 	int slots;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  560) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  561) 	slots = gnttab_count_grant(offset_in_page(skb->data),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  562) 				   skb_headlen(skb));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  563) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  564) 	for (i = 0; i < frags; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  565) 		skb_frag_t *frag = skb_shinfo(skb)->frags + i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  566) 		unsigned long size = skb_frag_size(frag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  567) 		unsigned long offset = skb_frag_off(frag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  568) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  569) 		/* Skip unused frames from start of page */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  570) 		offset &= ~PAGE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  571) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  572) 		slots += gnttab_count_grant(offset, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  573) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  574) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  575) 	return slots;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  576) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  577) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  578) static u16 xennet_select_queue(struct net_device *dev, struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  579) 			       struct net_device *sb_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  580) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  581) 	unsigned int num_queues = dev->real_num_tx_queues;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  582) 	u32 hash;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  583) 	u16 queue_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  584) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  585) 	/* First, check if there is only one queue */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  586) 	if (num_queues == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  587) 		queue_idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  588) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  589) 		hash = skb_get_hash(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  590) 		queue_idx = hash % num_queues;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  591) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  592) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  593) 	return queue_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  594) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  595) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  596) static void xennet_mark_tx_pending(struct netfront_queue *queue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  597) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  598) 	unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  599) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  600) 	while ((i = get_id_from_list(&queue->tx_pend_queue, queue->tx_link)) !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  601) 	       TX_LINK_NONE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  602) 		queue->tx_link[i] = TX_PENDING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  603) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  604) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  605) static int xennet_xdp_xmit_one(struct net_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  606) 			       struct netfront_queue *queue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  607) 			       struct xdp_frame *xdpf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  608) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  609) 	struct netfront_info *np = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  610) 	struct netfront_stats *tx_stats = this_cpu_ptr(np->tx_stats);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  611) 	struct xennet_gnttab_make_txreq info = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  612) 		.queue = queue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  613) 		.skb = NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  614) 		.page = virt_to_page(xdpf->data),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  615) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  616) 	int notify;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  617) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  618) 	xennet_make_first_txreq(&info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  619) 				offset_in_page(xdpf->data),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  620) 				xdpf->len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  621) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  622) 	xennet_mark_tx_pending(queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  623) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  624) 	RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&queue->tx, notify);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  625) 	if (notify)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  626) 		notify_remote_via_irq(queue->tx_irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  627) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  628) 	u64_stats_update_begin(&tx_stats->syncp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  629) 	tx_stats->bytes += xdpf->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  630) 	tx_stats->packets++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  631) 	u64_stats_update_end(&tx_stats->syncp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  632) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  633) 	xennet_tx_buf_gc(queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  634) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  635) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  636) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  637) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  638) static int xennet_xdp_xmit(struct net_device *dev, int n,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  639) 			   struct xdp_frame **frames, u32 flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  640) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  641) 	unsigned int num_queues = dev->real_num_tx_queues;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  642) 	struct netfront_info *np = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  643) 	struct netfront_queue *queue = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  644) 	unsigned long irq_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  645) 	int drops = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  646) 	int i, err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  647) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  648) 	if (unlikely(np->broken))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  649) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  650) 	if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  651) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  652) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  653) 	queue = &np->queues[smp_processor_id() % num_queues];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  654) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  655) 	spin_lock_irqsave(&queue->tx_lock, irq_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  656) 	for (i = 0; i < n; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  657) 		struct xdp_frame *xdpf = frames[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  658) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  659) 		if (!xdpf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  660) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  661) 		err = xennet_xdp_xmit_one(dev, queue, xdpf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  662) 		if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  663) 			xdp_return_frame_rx_napi(xdpf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  664) 			drops++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  665) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  666) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  667) 	spin_unlock_irqrestore(&queue->tx_lock, irq_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  668) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  669) 	return n - drops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  670) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  671) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  672) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  673) #define MAX_XEN_SKB_FRAGS (65536 / XEN_PAGE_SIZE + 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  674) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  675) static netdev_tx_t xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  676) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  677) 	struct netfront_info *np = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  678) 	struct netfront_stats *tx_stats = this_cpu_ptr(np->tx_stats);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  679) 	struct xen_netif_tx_request *first_tx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  680) 	unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  681) 	int notify;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  682) 	int slots;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  683) 	struct page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  684) 	unsigned int offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  685) 	unsigned int len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  686) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  687) 	struct netfront_queue *queue = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  688) 	struct xennet_gnttab_make_txreq info = { };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  689) 	unsigned int num_queues = dev->real_num_tx_queues;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  690) 	u16 queue_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  691) 	struct sk_buff *nskb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  692) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  693) 	/* Drop the packet if no queues are set up */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  694) 	if (num_queues < 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  695) 		goto drop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  696) 	if (unlikely(np->broken))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  697) 		goto drop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  698) 	/* Determine which queue to transmit this SKB on */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  699) 	queue_index = skb_get_queue_mapping(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  700) 	queue = &np->queues[queue_index];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  701) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  702) 	/* If skb->len is too big for wire format, drop skb and alert
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  703) 	 * user about misconfiguration.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  704) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  705) 	if (unlikely(skb->len > XEN_NETIF_MAX_TX_SIZE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  706) 		net_alert_ratelimited(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  707) 			"xennet: skb->len = %u, too big for wire format\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  708) 			skb->len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  709) 		goto drop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  710) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  711) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  712) 	slots = xennet_count_skb_slots(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  713) 	if (unlikely(slots > MAX_XEN_SKB_FRAGS + 1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  714) 		net_dbg_ratelimited("xennet: skb rides the rocket: %d slots, %d bytes\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  715) 				    slots, skb->len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  716) 		if (skb_linearize(skb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  717) 			goto drop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  718) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  719) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  720) 	page = virt_to_page(skb->data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  721) 	offset = offset_in_page(skb->data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  722) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  723) 	/* The first req should be at least ETH_HLEN size or the packet will be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  724) 	 * dropped by netback.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  725) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  726) 	if (unlikely(PAGE_SIZE - offset < ETH_HLEN)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  727) 		nskb = skb_copy(skb, GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  728) 		if (!nskb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  729) 			goto drop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  730) 		dev_consume_skb_any(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  731) 		skb = nskb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  732) 		page = virt_to_page(skb->data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  733) 		offset = offset_in_page(skb->data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  734) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  735) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  736) 	len = skb_headlen(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  737) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  738) 	spin_lock_irqsave(&queue->tx_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  739) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  740) 	if (unlikely(!netif_carrier_ok(dev) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  741) 		     (slots > 1 && !xennet_can_sg(dev)) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  742) 		     netif_needs_gso(skb, netif_skb_features(skb)))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  743) 		spin_unlock_irqrestore(&queue->tx_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  744) 		goto drop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  745) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  746) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  747) 	/* First request for the linear area. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  748) 	info.queue = queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  749) 	info.skb = skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  750) 	info.page = page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  751) 	first_tx = xennet_make_first_txreq(&info, offset, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  752) 	offset += info.tx_local.size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  753) 	if (offset == PAGE_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  754) 		page++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  755) 		offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  756) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  757) 	len -= info.tx_local.size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  758) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  759) 	if (skb->ip_summed == CHECKSUM_PARTIAL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  760) 		/* local packet? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  761) 		first_tx->flags |= XEN_NETTXF_csum_blank |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  762) 				   XEN_NETTXF_data_validated;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  763) 	else if (skb->ip_summed == CHECKSUM_UNNECESSARY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  764) 		/* remote but checksummed. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  765) 		first_tx->flags |= XEN_NETTXF_data_validated;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  766) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  767) 	/* Optional extra info after the first request. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  768) 	if (skb_shinfo(skb)->gso_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  769) 		struct xen_netif_extra_info *gso;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  770) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  771) 		gso = (struct xen_netif_extra_info *)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  772) 			RING_GET_REQUEST(&queue->tx, queue->tx.req_prod_pvt++);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  773) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  774) 		first_tx->flags |= XEN_NETTXF_extra_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  775) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  776) 		gso->u.gso.size = skb_shinfo(skb)->gso_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  777) 		gso->u.gso.type = (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  778) 			XEN_NETIF_GSO_TYPE_TCPV6 :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  779) 			XEN_NETIF_GSO_TYPE_TCPV4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  780) 		gso->u.gso.pad = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  781) 		gso->u.gso.features = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  782) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  783) 		gso->type = XEN_NETIF_EXTRA_TYPE_GSO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  784) 		gso->flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  785) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  786) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  787) 	/* Requests for the rest of the linear area. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  788) 	xennet_make_txreqs(&info, page, offset, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  789) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  790) 	/* Requests for all the frags. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  791) 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  792) 		skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  793) 		xennet_make_txreqs(&info, skb_frag_page(frag),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  794) 					skb_frag_off(frag),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  795) 					skb_frag_size(frag));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  796) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  797) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  798) 	/* First request has the packet length. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  799) 	first_tx->size = skb->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  800) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  801) 	/* timestamp packet in software */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  802) 	skb_tx_timestamp(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  803) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  804) 	xennet_mark_tx_pending(queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  805) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  806) 	RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&queue->tx, notify);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  807) 	if (notify)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  808) 		notify_remote_via_irq(queue->tx_irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  809) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  810) 	u64_stats_update_begin(&tx_stats->syncp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  811) 	tx_stats->bytes += skb->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  812) 	tx_stats->packets++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  813) 	u64_stats_update_end(&tx_stats->syncp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  814) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  815) 	/* Note: It is not safe to access skb after xennet_tx_buf_gc()! */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  816) 	xennet_tx_buf_gc(queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  817) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  818) 	if (!netfront_tx_slot_available(queue))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  819) 		netif_tx_stop_queue(netdev_get_tx_queue(dev, queue->id));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  820) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  821) 	spin_unlock_irqrestore(&queue->tx_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  822) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  823) 	return NETDEV_TX_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  824) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  825)  drop:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  826) 	dev->stats.tx_dropped++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  827) 	dev_kfree_skb_any(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  828) 	return NETDEV_TX_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  829) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  830) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  831) static int xennet_close(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  832) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  833) 	struct netfront_info *np = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  834) 	unsigned int num_queues = dev->real_num_tx_queues;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  835) 	unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  836) 	struct netfront_queue *queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  837) 	netif_tx_stop_all_queues(np->netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  838) 	for (i = 0; i < num_queues; ++i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  839) 		queue = &np->queues[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  840) 		napi_disable(&queue->napi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  841) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  842) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  843) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  844) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  845) static void xennet_destroy_queues(struct netfront_info *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  846) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  847) 	unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  848) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  849) 	for (i = 0; i < info->netdev->real_num_tx_queues; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  850) 		struct netfront_queue *queue = &info->queues[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  851) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  852) 		if (netif_running(info->netdev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  853) 			napi_disable(&queue->napi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  854) 		netif_napi_del(&queue->napi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  855) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  856) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  857) 	kfree(info->queues);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  858) 	info->queues = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  859) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  860) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  861) static void xennet_uninit(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  862) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  863) 	struct netfront_info *np = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  864) 	xennet_destroy_queues(np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  865) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  866) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  867) static void xennet_set_rx_rsp_cons(struct netfront_queue *queue, RING_IDX val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  868) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  869) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  870) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  871) 	spin_lock_irqsave(&queue->rx_cons_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  872) 	queue->rx.rsp_cons = val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  873) 	queue->rx_rsp_unconsumed = RING_HAS_UNCONSUMED_RESPONSES(&queue->rx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  874) 	spin_unlock_irqrestore(&queue->rx_cons_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  875) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  876) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  877) static void xennet_move_rx_slot(struct netfront_queue *queue, struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  878) 				grant_ref_t ref)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  879) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  880) 	int new = xennet_rxidx(queue->rx.req_prod_pvt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  881) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  882) 	BUG_ON(queue->rx_skbs[new]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  883) 	queue->rx_skbs[new] = skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  884) 	queue->grant_rx_ref[new] = ref;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  885) 	RING_GET_REQUEST(&queue->rx, queue->rx.req_prod_pvt)->id = new;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  886) 	RING_GET_REQUEST(&queue->rx, queue->rx.req_prod_pvt)->gref = ref;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  887) 	queue->rx.req_prod_pvt++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  888) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  889) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  890) static int xennet_get_extras(struct netfront_queue *queue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  891) 			     struct xen_netif_extra_info *extras,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  892) 			     RING_IDX rp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  893) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  894) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  895) 	struct xen_netif_extra_info extra;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  896) 	struct device *dev = &queue->info->netdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  897) 	RING_IDX cons = queue->rx.rsp_cons;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  898) 	int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  899) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  900) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  901) 		struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  902) 		grant_ref_t ref;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  903) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  904) 		if (unlikely(cons + 1 == rp)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  905) 			if (net_ratelimit())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  906) 				dev_warn(dev, "Missing extra info\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  907) 			err = -EBADR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  908) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  909) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  910) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  911) 		RING_COPY_RESPONSE(&queue->rx, ++cons, &extra);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  912) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  913) 		if (unlikely(!extra.type ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  914) 			     extra.type >= XEN_NETIF_EXTRA_TYPE_MAX)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  915) 			if (net_ratelimit())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  916) 				dev_warn(dev, "Invalid extra type: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  917) 					 extra.type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  918) 			err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  919) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  920) 			extras[extra.type - 1] = extra;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  921) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  922) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  923) 		skb = xennet_get_rx_skb(queue, cons);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  924) 		ref = xennet_get_rx_ref(queue, cons);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  925) 		xennet_move_rx_slot(queue, skb, ref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  926) 	} while (extra.flags & XEN_NETIF_EXTRA_FLAG_MORE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  927) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  928) 	xennet_set_rx_rsp_cons(queue, cons);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  929) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  930) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  931) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  932) static u32 xennet_run_xdp(struct netfront_queue *queue, struct page *pdata,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  933) 		   struct xen_netif_rx_response *rx, struct bpf_prog *prog,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  934) 		   struct xdp_buff *xdp, bool *need_xdp_flush)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  935) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  936) 	struct xdp_frame *xdpf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  937) 	u32 len = rx->status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  938) 	u32 act;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  939) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  940) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  941) 	xdp->data_hard_start = page_address(pdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  942) 	xdp->data = xdp->data_hard_start + XDP_PACKET_HEADROOM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  943) 	xdp_set_data_meta_invalid(xdp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  944) 	xdp->data_end = xdp->data + len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  945) 	xdp->rxq = &queue->xdp_rxq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  946) 	xdp->frame_sz = XEN_PAGE_SIZE - XDP_PACKET_HEADROOM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  947) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  948) 	act = bpf_prog_run_xdp(prog, xdp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  949) 	switch (act) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  950) 	case XDP_TX:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  951) 		get_page(pdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  952) 		xdpf = xdp_convert_buff_to_frame(xdp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  953) 		err = xennet_xdp_xmit(queue->info->netdev, 1, &xdpf, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  954) 		if (unlikely(err < 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  955) 			trace_xdp_exception(queue->info->netdev, prog, act);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  956) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  957) 	case XDP_REDIRECT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  958) 		get_page(pdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  959) 		err = xdp_do_redirect(queue->info->netdev, xdp, prog);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  960) 		*need_xdp_flush = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  961) 		if (unlikely(err))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  962) 			trace_xdp_exception(queue->info->netdev, prog, act);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  963) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  964) 	case XDP_PASS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  965) 	case XDP_DROP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  966) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  967) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  968) 	case XDP_ABORTED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  969) 		trace_xdp_exception(queue->info->netdev, prog, act);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  970) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  971) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  972) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  973) 		bpf_warn_invalid_xdp_action(act);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  974) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  975) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  976) 	return act;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  977) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  978) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  979) static int xennet_get_responses(struct netfront_queue *queue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  980) 				struct netfront_rx_info *rinfo, RING_IDX rp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  981) 				struct sk_buff_head *list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  982) 				bool *need_xdp_flush)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  983) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  984) 	struct xen_netif_rx_response *rx = &rinfo->rx, rx_local;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  985) 	int max = XEN_NETIF_NR_SLOTS_MIN + (rx->status <= RX_COPY_THRESHOLD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  986) 	RING_IDX cons = queue->rx.rsp_cons;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  987) 	struct sk_buff *skb = xennet_get_rx_skb(queue, cons);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  988) 	struct xen_netif_extra_info *extras = rinfo->extras;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  989) 	grant_ref_t ref = xennet_get_rx_ref(queue, cons);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  990) 	struct device *dev = &queue->info->netdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  991) 	struct bpf_prog *xdp_prog;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  992) 	struct xdp_buff xdp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  993) 	int slots = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  994) 	int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  995) 	u32 verdict;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  996) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  997) 	if (rx->flags & XEN_NETRXF_extra_info) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  998) 		err = xennet_get_extras(queue, extras, rp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  999) 		if (!err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) 			if (extras[XEN_NETIF_EXTRA_TYPE_XDP - 1].type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) 				struct xen_netif_extra_info *xdp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) 				xdp = &extras[XEN_NETIF_EXTRA_TYPE_XDP - 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) 				rx->offset = xdp->u.xdp.headroom;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) 		cons = queue->rx.rsp_cons;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) 	for (;;) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) 		if (unlikely(rx->status < 0 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) 			     rx->offset + rx->status > XEN_PAGE_SIZE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) 			if (net_ratelimit())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) 				dev_warn(dev, "rx->offset: %u, size: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) 					 rx->offset, rx->status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) 			xennet_move_rx_slot(queue, skb, ref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) 			err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) 			goto next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) 		 * This definitely indicates a bug, either in this driver or in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) 		 * the backend driver. In future this should flag the bad
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) 		 * situation to the system controller to reboot the backend.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) 		if (ref == GRANT_INVALID_REF) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) 			if (net_ratelimit())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) 				dev_warn(dev, "Bad rx response id %d.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) 					 rx->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) 			err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) 			goto next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) 		if (!gnttab_end_foreign_access_ref(ref, 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) 			dev_alert(dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) 				  "Grant still in use by backend domain\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) 			queue->info->broken = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) 			dev_alert(dev, "Disabled for further use\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) 		gnttab_release_grant_reference(&queue->gref_rx_head, ref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) 		rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) 		xdp_prog = rcu_dereference(queue->xdp_prog);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) 		if (xdp_prog) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) 			if (!(rx->flags & XEN_NETRXF_more_data)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) 				/* currently only a single page contains data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) 				verdict = xennet_run_xdp(queue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) 							 skb_frag_page(&skb_shinfo(skb)->frags[0]),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) 							 rx, xdp_prog, &xdp, need_xdp_flush);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) 				if (verdict != XDP_PASS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) 					err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) 			} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) 				/* drop the frame */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) 				err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) 		rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) next:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) 		__skb_queue_tail(list, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) 		if (!(rx->flags & XEN_NETRXF_more_data))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) 		if (cons + slots == rp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) 			if (net_ratelimit())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) 				dev_warn(dev, "Need more slots\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) 			err = -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) 		RING_COPY_RESPONSE(&queue->rx, cons + slots, &rx_local);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) 		rx = &rx_local;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) 		skb = xennet_get_rx_skb(queue, cons + slots);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) 		ref = xennet_get_rx_ref(queue, cons + slots);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) 		slots++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) 	if (unlikely(slots > max)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) 		if (net_ratelimit())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) 			dev_warn(dev, "Too many slots\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) 		err = -E2BIG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) 	if (unlikely(err))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) 		xennet_set_rx_rsp_cons(queue, cons + slots);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) static int xennet_set_skb_gso(struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) 			      struct xen_netif_extra_info *gso)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) 	if (!gso->u.gso.size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) 		if (net_ratelimit())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) 			pr_warn("GSO size must not be zero\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) 	if (gso->u.gso.type != XEN_NETIF_GSO_TYPE_TCPV4 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) 	    gso->u.gso.type != XEN_NETIF_GSO_TYPE_TCPV6) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) 		if (net_ratelimit())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) 			pr_warn("Bad GSO type %d\n", gso->u.gso.type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) 	skb_shinfo(skb)->gso_size = gso->u.gso.size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) 	skb_shinfo(skb)->gso_type =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) 		(gso->u.gso.type == XEN_NETIF_GSO_TYPE_TCPV4) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) 		SKB_GSO_TCPV4 :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) 		SKB_GSO_TCPV6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) 	/* Header must be checked, and gso_segs computed. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) 	skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) 	skb_shinfo(skb)->gso_segs = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) static int xennet_fill_frags(struct netfront_queue *queue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) 			     struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) 			     struct sk_buff_head *list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) 	RING_IDX cons = queue->rx.rsp_cons;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) 	struct sk_buff *nskb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) 	while ((nskb = __skb_dequeue(list))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) 		struct xen_netif_rx_response rx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) 		skb_frag_t *nfrag = &skb_shinfo(nskb)->frags[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) 		RING_COPY_RESPONSE(&queue->rx, ++cons, &rx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) 		if (skb_shinfo(skb)->nr_frags == MAX_SKB_FRAGS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) 			unsigned int pull_to = NETFRONT_SKB_CB(skb)->pull_to;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) 			BUG_ON(pull_to < skb_headlen(skb));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) 			__pskb_pull_tail(skb, pull_to - skb_headlen(skb));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) 		if (unlikely(skb_shinfo(skb)->nr_frags >= MAX_SKB_FRAGS)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) 			xennet_set_rx_rsp_cons(queue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) 					       ++cons + skb_queue_len(list));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) 			kfree_skb(nskb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) 			return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) 		skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) 				skb_frag_page(nfrag),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) 				rx.offset, rx.status, PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) 		skb_shinfo(nskb)->nr_frags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) 		kfree_skb(nskb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) 	xennet_set_rx_rsp_cons(queue, cons);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) static int checksum_setup(struct net_device *dev, struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) 	bool recalculate_partial_csum = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) 	 * A GSO SKB must be CHECKSUM_PARTIAL. However some buggy
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) 	 * peers can fail to set NETRXF_csum_blank when sending a GSO
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) 	 * frame. In this case force the SKB to CHECKSUM_PARTIAL and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) 	 * recalculate the partial checksum.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) 	if (skb->ip_summed != CHECKSUM_PARTIAL && skb_is_gso(skb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) 		struct netfront_info *np = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) 		atomic_inc(&np->rx_gso_checksum_fixup);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) 		skb->ip_summed = CHECKSUM_PARTIAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) 		recalculate_partial_csum = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) 	/* A non-CHECKSUM_PARTIAL SKB does not require setup. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) 	if (skb->ip_summed != CHECKSUM_PARTIAL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) 	return skb_checksum_setup(skb, recalculate_partial_csum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) static int handle_incoming_queue(struct netfront_queue *queue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) 				 struct sk_buff_head *rxq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) 	struct netfront_stats *rx_stats = this_cpu_ptr(queue->info->rx_stats);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) 	int packets_dropped = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) 	struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) 	while ((skb = __skb_dequeue(rxq)) != NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) 		int pull_to = NETFRONT_SKB_CB(skb)->pull_to;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) 		if (pull_to > skb_headlen(skb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) 			__pskb_pull_tail(skb, pull_to - skb_headlen(skb));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) 		/* Ethernet work: Delayed to here as it peeks the header. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) 		skb->protocol = eth_type_trans(skb, queue->info->netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) 		skb_reset_network_header(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) 		if (checksum_setup(queue->info->netdev, skb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) 			kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) 			packets_dropped++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) 			queue->info->netdev->stats.rx_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) 		u64_stats_update_begin(&rx_stats->syncp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) 		rx_stats->packets++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) 		rx_stats->bytes += skb->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) 		u64_stats_update_end(&rx_stats->syncp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) 		/* Pass it up. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) 		napi_gro_receive(&queue->napi, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) 	return packets_dropped;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) static int xennet_poll(struct napi_struct *napi, int budget)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) 	struct netfront_queue *queue = container_of(napi, struct netfront_queue, napi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) 	struct net_device *dev = queue->info->netdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) 	struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) 	struct netfront_rx_info rinfo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) 	struct xen_netif_rx_response *rx = &rinfo.rx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) 	struct xen_netif_extra_info *extras = rinfo.extras;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) 	RING_IDX i, rp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) 	int work_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) 	struct sk_buff_head rxq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) 	struct sk_buff_head errq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) 	struct sk_buff_head tmpq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) 	bool need_xdp_flush = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) 	spin_lock(&queue->rx_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) 	skb_queue_head_init(&rxq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) 	skb_queue_head_init(&errq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) 	skb_queue_head_init(&tmpq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) 	rp = queue->rx.sring->rsp_prod;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) 	if (RING_RESPONSE_PROD_OVERFLOW(&queue->rx, rp)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) 		dev_alert(&dev->dev, "Illegal number of responses %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) 			  rp - queue->rx.rsp_cons);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) 		queue->info->broken = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) 		spin_unlock(&queue->rx_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) 	rmb(); /* Ensure we see queued responses up to 'rp'. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) 	i = queue->rx.rsp_cons;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) 	work_done = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) 	while ((i != rp) && (work_done < budget)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) 		RING_COPY_RESPONSE(&queue->rx, i, rx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) 		memset(extras, 0, sizeof(rinfo.extras));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) 		err = xennet_get_responses(queue, &rinfo, rp, &tmpq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) 					   &need_xdp_flush);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) 		if (unlikely(err)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) 			if (queue->info->broken) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) 				spin_unlock(&queue->rx_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) 				return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) 			while ((skb = __skb_dequeue(&tmpq)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) 				__skb_queue_tail(&errq, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) 			dev->stats.rx_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) 			i = queue->rx.rsp_cons;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) 		skb = __skb_dequeue(&tmpq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) 		if (extras[XEN_NETIF_EXTRA_TYPE_GSO - 1].type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) 			struct xen_netif_extra_info *gso;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) 			gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) 			if (unlikely(xennet_set_skb_gso(skb, gso))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) 				__skb_queue_head(&tmpq, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) 				xennet_set_rx_rsp_cons(queue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) 						       queue->rx.rsp_cons +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) 						       skb_queue_len(&tmpq));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) 				goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) 		NETFRONT_SKB_CB(skb)->pull_to = rx->status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) 		if (NETFRONT_SKB_CB(skb)->pull_to > RX_COPY_THRESHOLD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) 			NETFRONT_SKB_CB(skb)->pull_to = RX_COPY_THRESHOLD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) 		skb_frag_off_set(&skb_shinfo(skb)->frags[0], rx->offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) 		skb_frag_size_set(&skb_shinfo(skb)->frags[0], rx->status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) 		skb->data_len = rx->status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) 		skb->len += rx->status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) 		if (unlikely(xennet_fill_frags(queue, skb, &tmpq)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) 			goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) 		if (rx->flags & XEN_NETRXF_csum_blank)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) 			skb->ip_summed = CHECKSUM_PARTIAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) 		else if (rx->flags & XEN_NETRXF_data_validated)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) 			skb->ip_summed = CHECKSUM_UNNECESSARY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) 		__skb_queue_tail(&rxq, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) 		i = queue->rx.rsp_cons + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) 		xennet_set_rx_rsp_cons(queue, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) 		work_done++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) 	if (need_xdp_flush)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) 		xdp_do_flush();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) 	__skb_queue_purge(&errq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) 	work_done -= handle_incoming_queue(queue, &rxq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) 	xennet_alloc_rx_buffers(queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) 	if (work_done < budget) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) 		int more_to_do = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) 		napi_complete_done(napi, work_done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) 		RING_FINAL_CHECK_FOR_RESPONSES(&queue->rx, more_to_do);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) 		if (more_to_do)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) 			napi_schedule(napi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) 	spin_unlock(&queue->rx_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) 	return work_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) static int xennet_change_mtu(struct net_device *dev, int mtu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) 	int max = xennet_can_sg(dev) ? XEN_NETIF_MAX_TX_SIZE : ETH_DATA_LEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) 	if (mtu > max)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) 	dev->mtu = mtu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) static void xennet_get_stats64(struct net_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) 			       struct rtnl_link_stats64 *tot)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) 	struct netfront_info *np = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) 	int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) 	for_each_possible_cpu(cpu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) 		struct netfront_stats *rx_stats = per_cpu_ptr(np->rx_stats, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) 		struct netfront_stats *tx_stats = per_cpu_ptr(np->tx_stats, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) 		u64 rx_packets, rx_bytes, tx_packets, tx_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) 		unsigned int start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) 		do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) 			start = u64_stats_fetch_begin_irq(&tx_stats->syncp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) 			tx_packets = tx_stats->packets;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) 			tx_bytes = tx_stats->bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) 		} while (u64_stats_fetch_retry_irq(&tx_stats->syncp, start));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) 		do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) 			start = u64_stats_fetch_begin_irq(&rx_stats->syncp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) 			rx_packets = rx_stats->packets;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) 			rx_bytes = rx_stats->bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) 		} while (u64_stats_fetch_retry_irq(&rx_stats->syncp, start));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) 		tot->rx_packets += rx_packets;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) 		tot->tx_packets += tx_packets;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) 		tot->rx_bytes   += rx_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) 		tot->tx_bytes   += tx_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) 	tot->rx_errors  = dev->stats.rx_errors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) 	tot->tx_dropped = dev->stats.tx_dropped;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) static void xennet_release_tx_bufs(struct netfront_queue *queue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) 	struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) 	for (i = 0; i < NET_TX_RING_SIZE; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) 		/* Skip over entries which are actually freelist references */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) 		if (!queue->tx_skbs[i])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) 		skb = queue->tx_skbs[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) 		queue->tx_skbs[i] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) 		get_page(queue->grant_tx_page[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) 		gnttab_end_foreign_access(queue->grant_tx_ref[i],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) 					  GNTMAP_readonly,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) 					  (unsigned long)page_address(queue->grant_tx_page[i]));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) 		queue->grant_tx_page[i] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) 		queue->grant_tx_ref[i] = GRANT_INVALID_REF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) 		add_id_to_list(&queue->tx_skb_freelist, queue->tx_link, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) 		dev_kfree_skb_irq(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) static void xennet_release_rx_bufs(struct netfront_queue *queue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) 	int id, ref;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) 	spin_lock_bh(&queue->rx_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) 	for (id = 0; id < NET_RX_RING_SIZE; id++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) 		struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) 		struct page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) 		skb = queue->rx_skbs[id];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) 		if (!skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) 		ref = queue->grant_rx_ref[id];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) 		if (ref == GRANT_INVALID_REF)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) 		page = skb_frag_page(&skb_shinfo(skb)->frags[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) 		/* gnttab_end_foreign_access() needs a page ref until
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) 		 * foreign access is ended (which may be deferred).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) 		get_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) 		gnttab_end_foreign_access(ref, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) 					  (unsigned long)page_address(page));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) 		queue->grant_rx_ref[id] = GRANT_INVALID_REF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) 		kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) 	spin_unlock_bh(&queue->rx_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) static netdev_features_t xennet_fix_features(struct net_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) 	netdev_features_t features)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) 	struct netfront_info *np = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) 	if (features & NETIF_F_SG &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) 	    !xenbus_read_unsigned(np->xbdev->otherend, "feature-sg", 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) 		features &= ~NETIF_F_SG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) 	if (features & NETIF_F_IPV6_CSUM &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) 	    !xenbus_read_unsigned(np->xbdev->otherend,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) 				  "feature-ipv6-csum-offload", 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) 		features &= ~NETIF_F_IPV6_CSUM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) 	if (features & NETIF_F_TSO &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) 	    !xenbus_read_unsigned(np->xbdev->otherend, "feature-gso-tcpv4", 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) 		features &= ~NETIF_F_TSO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) 	if (features & NETIF_F_TSO6 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) 	    !xenbus_read_unsigned(np->xbdev->otherend, "feature-gso-tcpv6", 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) 		features &= ~NETIF_F_TSO6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) 	return features;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) static int xennet_set_features(struct net_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) 	netdev_features_t features)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) 	if (!(features & NETIF_F_SG) && dev->mtu > ETH_DATA_LEN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) 		netdev_info(dev, "Reducing MTU because no SG offload");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) 		dev->mtu = ETH_DATA_LEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) static bool xennet_handle_tx(struct netfront_queue *queue, unsigned int *eoi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) 	if (unlikely(queue->info->broken))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) 	spin_lock_irqsave(&queue->tx_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) 	if (xennet_tx_buf_gc(queue))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) 		*eoi = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) 	spin_unlock_irqrestore(&queue->tx_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) 	return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) static irqreturn_t xennet_tx_interrupt(int irq, void *dev_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) 	unsigned int eoiflag = XEN_EOI_FLAG_SPURIOUS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) 	if (likely(xennet_handle_tx(dev_id, &eoiflag)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) 		xen_irq_lateeoi(irq, eoiflag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) 	return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) static bool xennet_handle_rx(struct netfront_queue *queue, unsigned int *eoi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) 	unsigned int work_queued;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) 	if (unlikely(queue->info->broken))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) 	spin_lock_irqsave(&queue->rx_cons_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) 	work_queued = RING_HAS_UNCONSUMED_RESPONSES(&queue->rx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) 	if (work_queued > queue->rx_rsp_unconsumed) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) 		queue->rx_rsp_unconsumed = work_queued;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) 		*eoi = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) 	} else if (unlikely(work_queued < queue->rx_rsp_unconsumed)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) 		const struct device *dev = &queue->info->netdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) 		spin_unlock_irqrestore(&queue->rx_cons_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) 		dev_alert(dev, "RX producer index going backwards\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) 		dev_alert(dev, "Disabled for further use\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) 		queue->info->broken = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) 	spin_unlock_irqrestore(&queue->rx_cons_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) 	if (likely(netif_carrier_ok(queue->info->netdev) && work_queued))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) 		napi_schedule(&queue->napi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) 	return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) static irqreturn_t xennet_rx_interrupt(int irq, void *dev_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) 	unsigned int eoiflag = XEN_EOI_FLAG_SPURIOUS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) 	if (likely(xennet_handle_rx(dev_id, &eoiflag)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) 		xen_irq_lateeoi(irq, eoiflag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) 	return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) static irqreturn_t xennet_interrupt(int irq, void *dev_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) 	unsigned int eoiflag = XEN_EOI_FLAG_SPURIOUS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) 	if (xennet_handle_tx(dev_id, &eoiflag) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) 	    xennet_handle_rx(dev_id, &eoiflag))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) 		xen_irq_lateeoi(irq, eoiflag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) 	return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) #ifdef CONFIG_NET_POLL_CONTROLLER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) static void xennet_poll_controller(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) 	/* Poll each queue */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) 	struct netfront_info *info = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) 	unsigned int num_queues = dev->real_num_tx_queues;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) 	unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) 	if (info->broken)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) 	for (i = 0; i < num_queues; ++i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) 		xennet_interrupt(0, &info->queues[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) #define NETBACK_XDP_HEADROOM_DISABLE	0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) #define NETBACK_XDP_HEADROOM_ENABLE	1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) static int talk_to_netback_xdp(struct netfront_info *np, int xdp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) 	unsigned short headroom;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) 	headroom = xdp ? XDP_PACKET_HEADROOM : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) 	err = xenbus_printf(XBT_NIL, np->xbdev->nodename,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) 			    "xdp-headroom", "%hu",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) 			    headroom);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) 		pr_warn("Error writing xdp-headroom\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) static int xennet_xdp_set(struct net_device *dev, struct bpf_prog *prog,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) 			  struct netlink_ext_ack *extack)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) 	unsigned long max_mtu = XEN_PAGE_SIZE - XDP_PACKET_HEADROOM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) 	struct netfront_info *np = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) 	struct bpf_prog *old_prog;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) 	unsigned int i, err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) 	if (dev->mtu > max_mtu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) 		netdev_warn(dev, "XDP requires MTU less than %lu\n", max_mtu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) 	if (!np->netback_has_xdp_headroom)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) 	xenbus_switch_state(np->xbdev, XenbusStateReconfiguring);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) 	err = talk_to_netback_xdp(np, prog ? NETBACK_XDP_HEADROOM_ENABLE :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) 				  NETBACK_XDP_HEADROOM_DISABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) 	/* avoid the race with XDP headroom adjustment */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) 	wait_event(module_wq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) 		   xenbus_read_driver_state(np->xbdev->otherend) ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) 		   XenbusStateReconfigured);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) 	np->netfront_xdp_enabled = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) 	old_prog = rtnl_dereference(np->queues[0].xdp_prog);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) 	if (prog)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) 		bpf_prog_add(prog, dev->real_num_tx_queues);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) 	for (i = 0; i < dev->real_num_tx_queues; ++i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) 		rcu_assign_pointer(np->queues[i].xdp_prog, prog);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) 	if (old_prog)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) 		for (i = 0; i < dev->real_num_tx_queues; ++i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) 			bpf_prog_put(old_prog);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) 	xenbus_switch_state(np->xbdev, XenbusStateConnected);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) static int xennet_xdp(struct net_device *dev, struct netdev_bpf *xdp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) 	struct netfront_info *np = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) 	if (np->broken)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) 	switch (xdp->command) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) 	case XDP_SETUP_PROG:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) 		return xennet_xdp_set(dev, xdp->prog, xdp->extack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) static const struct net_device_ops xennet_netdev_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) 	.ndo_uninit          = xennet_uninit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) 	.ndo_open            = xennet_open,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) 	.ndo_stop            = xennet_close,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) 	.ndo_start_xmit      = xennet_start_xmit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) 	.ndo_change_mtu	     = xennet_change_mtu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) 	.ndo_get_stats64     = xennet_get_stats64,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) 	.ndo_set_mac_address = eth_mac_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) 	.ndo_validate_addr   = eth_validate_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) 	.ndo_fix_features    = xennet_fix_features,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) 	.ndo_set_features    = xennet_set_features,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) 	.ndo_select_queue    = xennet_select_queue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) 	.ndo_bpf            = xennet_xdp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) 	.ndo_xdp_xmit	    = xennet_xdp_xmit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) #ifdef CONFIG_NET_POLL_CONTROLLER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) 	.ndo_poll_controller = xennet_poll_controller,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) static void xennet_free_netdev(struct net_device *netdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) 	struct netfront_info *np = netdev_priv(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) 	free_percpu(np->rx_stats);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) 	free_percpu(np->tx_stats);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) 	free_netdev(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) static struct net_device *xennet_create_dev(struct xenbus_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) 	struct net_device *netdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) 	struct netfront_info *np;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) 	netdev = alloc_etherdev_mq(sizeof(struct netfront_info), xennet_max_queues);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) 	if (!netdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) 		return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) 	np                   = netdev_priv(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) 	np->xbdev            = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) 	np->queues = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) 	err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) 	np->rx_stats = netdev_alloc_pcpu_stats(struct netfront_stats);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) 	if (np->rx_stats == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) 		goto exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) 	np->tx_stats = netdev_alloc_pcpu_stats(struct netfront_stats);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) 	if (np->tx_stats == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) 		goto exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) 	netdev->netdev_ops	= &xennet_netdev_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) 	netdev->features        = NETIF_F_IP_CSUM | NETIF_F_RXCSUM |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) 				  NETIF_F_GSO_ROBUST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) 	netdev->hw_features	= NETIF_F_SG |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) 				  NETIF_F_IPV6_CSUM |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) 				  NETIF_F_TSO | NETIF_F_TSO6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703)          * Assume that all hw features are available for now. This set
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704)          * will be adjusted by the call to netdev_update_features() in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705)          * xennet_connect() which is the earliest point where we can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706)          * negotiate with the backend regarding supported features.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707)          */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) 	netdev->features |= netdev->hw_features;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) 	netdev->ethtool_ops = &xennet_ethtool_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) 	netdev->min_mtu = ETH_MIN_MTU;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) 	netdev->max_mtu = XEN_NETIF_MAX_TX_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) 	SET_NETDEV_DEV(netdev, &dev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) 	np->netdev = netdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) 	np->netfront_xdp_enabled = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) 	netif_carrier_off(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) 		xenbus_switch_state(dev, XenbusStateInitialising);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) 		err = wait_event_timeout(module_wq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) 				 xenbus_read_driver_state(dev->otherend) !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) 				 XenbusStateClosed &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) 				 xenbus_read_driver_state(dev->otherend) !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) 				 XenbusStateUnknown, XENNET_TIMEOUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) 	} while (!err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) 	return netdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731)  exit:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) 	xennet_free_netdev(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) 	return ERR_PTR(err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737)  * Entry point to this code when a new device is created.  Allocate the basic
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738)  * structures and the ring buffers for communication with the backend, and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739)  * inform the backend of the appropriate details for those.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) static int netfront_probe(struct xenbus_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) 			  const struct xenbus_device_id *id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) 	struct net_device *netdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) 	struct netfront_info *info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) 	netdev = xennet_create_dev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) 	if (IS_ERR(netdev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) 		err = PTR_ERR(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) 		xenbus_dev_fatal(dev, err, "creating netdev");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) 	info = netdev_priv(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) 	dev_set_drvdata(&dev->dev, info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) #ifdef CONFIG_SYSFS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) 	info->netdev->sysfs_groups[0] = &xennet_dev_group;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) static void xennet_end_access(int ref, void *page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) 	/* This frees the page as a side-effect */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) 	if (ref != GRANT_INVALID_REF)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) 		gnttab_end_foreign_access(ref, 0, (unsigned long)page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) static void xennet_disconnect_backend(struct netfront_info *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) 	unsigned int i = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) 	unsigned int num_queues = info->netdev->real_num_tx_queues;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) 	netif_carrier_off(info->netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) 	for (i = 0; i < num_queues && info->queues; ++i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) 		struct netfront_queue *queue = &info->queues[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) 		del_timer_sync(&queue->rx_refill_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) 		if (queue->tx_irq && (queue->tx_irq == queue->rx_irq))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) 			unbind_from_irqhandler(queue->tx_irq, queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785) 		if (queue->tx_irq && (queue->tx_irq != queue->rx_irq)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) 			unbind_from_irqhandler(queue->tx_irq, queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) 			unbind_from_irqhandler(queue->rx_irq, queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) 		queue->tx_evtchn = queue->rx_evtchn = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) 		queue->tx_irq = queue->rx_irq = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) 		if (netif_running(info->netdev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) 			napi_synchronize(&queue->napi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) 		xennet_release_tx_bufs(queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) 		xennet_release_rx_bufs(queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) 		gnttab_free_grant_references(queue->gref_tx_head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) 		gnttab_free_grant_references(queue->gref_rx_head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) 		/* End access and free the pages */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) 		xennet_end_access(queue->tx_ring_ref, queue->tx.sring);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) 		xennet_end_access(queue->rx_ring_ref, queue->rx.sring);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) 		queue->tx_ring_ref = GRANT_INVALID_REF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) 		queue->rx_ring_ref = GRANT_INVALID_REF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) 		queue->tx.sring = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) 		queue->rx.sring = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) 		page_pool_destroy(queue->page_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814)  * We are reconnecting to the backend, due to a suspend/resume, or a backend
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815)  * driver restart.  We tear down our netif structure and recreate it, but
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816)  * leave the device-layer structures intact so that this is transparent to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817)  * rest of the kernel.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) static int netfront_resume(struct xenbus_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) 	struct netfront_info *info = dev_get_drvdata(&dev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) 	dev_dbg(&dev->dev, "%s\n", dev->nodename);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) 	netif_tx_lock_bh(info->netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) 	netif_device_detach(info->netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) 	netif_tx_unlock_bh(info->netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) 	xennet_disconnect_backend(info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) static int xen_net_read_mac(struct xenbus_device *dev, u8 mac[])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) 	char *s, *e, *macstr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838) 	macstr = s = xenbus_read(XBT_NIL, dev->nodename, "mac", NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) 	if (IS_ERR(macstr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) 		return PTR_ERR(macstr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) 	for (i = 0; i < ETH_ALEN; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843) 		mac[i] = simple_strtoul(s, &e, 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) 		if ((s == e) || (*e != ((i == ETH_ALEN-1) ? '\0' : ':'))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) 			kfree(macstr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846) 			return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848) 		s = e+1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) 	kfree(macstr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) static int setup_netfront_single(struct netfront_queue *queue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859) 	err = xenbus_alloc_evtchn(queue->info->xbdev, &queue->tx_evtchn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860) 	if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) 		goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863) 	err = bind_evtchn_to_irqhandler_lateeoi(queue->tx_evtchn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) 						xennet_interrupt, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865) 						queue->info->netdev->name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) 						queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867) 	if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868) 		goto bind_fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869) 	queue->rx_evtchn = queue->tx_evtchn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870) 	queue->rx_irq = queue->tx_irq = err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874) bind_fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875) 	xenbus_free_evtchn(queue->info->xbdev, queue->tx_evtchn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876) 	queue->tx_evtchn = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877) fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881) static int setup_netfront_split(struct netfront_queue *queue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885) 	err = xenbus_alloc_evtchn(queue->info->xbdev, &queue->tx_evtchn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886) 	if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887) 		goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) 	err = xenbus_alloc_evtchn(queue->info->xbdev, &queue->rx_evtchn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889) 	if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890) 		goto alloc_rx_evtchn_fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892) 	snprintf(queue->tx_irq_name, sizeof(queue->tx_irq_name),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893) 		 "%s-tx", queue->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894) 	err = bind_evtchn_to_irqhandler_lateeoi(queue->tx_evtchn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895) 						xennet_tx_interrupt, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896) 						queue->tx_irq_name, queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897) 	if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898) 		goto bind_tx_fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899) 	queue->tx_irq = err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901) 	snprintf(queue->rx_irq_name, sizeof(queue->rx_irq_name),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902) 		 "%s-rx", queue->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903) 	err = bind_evtchn_to_irqhandler_lateeoi(queue->rx_evtchn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904) 						xennet_rx_interrupt, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) 						queue->rx_irq_name, queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906) 	if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907) 		goto bind_rx_fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908) 	queue->rx_irq = err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912) bind_rx_fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913) 	unbind_from_irqhandler(queue->tx_irq, queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914) 	queue->tx_irq = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915) bind_tx_fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916) 	xenbus_free_evtchn(queue->info->xbdev, queue->rx_evtchn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917) 	queue->rx_evtchn = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918) alloc_rx_evtchn_fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919) 	xenbus_free_evtchn(queue->info->xbdev, queue->tx_evtchn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920) 	queue->tx_evtchn = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921) fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925) static int setup_netfront(struct xenbus_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926) 			struct netfront_queue *queue, unsigned int feature_split_evtchn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928) 	struct xen_netif_tx_sring *txs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929) 	struct xen_netif_rx_sring *rxs = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930) 	grant_ref_t gref;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933) 	queue->tx_ring_ref = GRANT_INVALID_REF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934) 	queue->rx_ring_ref = GRANT_INVALID_REF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935) 	queue->rx.sring = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936) 	queue->tx.sring = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938) 	txs = (struct xen_netif_tx_sring *)get_zeroed_page(GFP_NOIO | __GFP_HIGH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939) 	if (!txs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940) 		err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941) 		xenbus_dev_fatal(dev, err, "allocating tx ring page");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942) 		goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944) 	SHARED_RING_INIT(txs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945) 	FRONT_RING_INIT(&queue->tx, txs, XEN_PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947) 	err = xenbus_grant_ring(dev, txs, 1, &gref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948) 	if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949) 		goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950) 	queue->tx_ring_ref = gref;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952) 	rxs = (struct xen_netif_rx_sring *)get_zeroed_page(GFP_NOIO | __GFP_HIGH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953) 	if (!rxs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954) 		err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955) 		xenbus_dev_fatal(dev, err, "allocating rx ring page");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956) 		goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958) 	SHARED_RING_INIT(rxs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959) 	FRONT_RING_INIT(&queue->rx, rxs, XEN_PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961) 	err = xenbus_grant_ring(dev, rxs, 1, &gref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962) 	if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963) 		goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964) 	queue->rx_ring_ref = gref;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966) 	if (feature_split_evtchn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967) 		err = setup_netfront_split(queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968) 	/* setup single event channel if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969) 	 *  a) feature-split-event-channels == 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970) 	 *  b) feature-split-event-channels == 1 but failed to setup
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972) 	if (!feature_split_evtchn || (feature_split_evtchn && err))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973) 		err = setup_netfront_single(queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976) 		goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980) 	/* If we fail to setup netfront, it is safe to just revoke access to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981) 	 * granted pages because backend is not accessing it at this point.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983)  fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984) 	if (queue->rx_ring_ref != GRANT_INVALID_REF) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985) 		gnttab_end_foreign_access(queue->rx_ring_ref, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986) 					  (unsigned long)rxs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987) 		queue->rx_ring_ref = GRANT_INVALID_REF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989) 		free_page((unsigned long)rxs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991) 	if (queue->tx_ring_ref != GRANT_INVALID_REF) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992) 		gnttab_end_foreign_access(queue->tx_ring_ref, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993) 					  (unsigned long)txs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994) 		queue->tx_ring_ref = GRANT_INVALID_REF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996) 		free_page((unsigned long)txs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001) /* Queue-specific initialisation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002)  * This used to be done in xennet_create_dev() but must now
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003)  * be run per-queue.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005) static int xennet_init_queue(struct netfront_queue *queue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007) 	unsigned short i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008) 	int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009) 	char *devid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011) 	spin_lock_init(&queue->tx_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2012) 	spin_lock_init(&queue->rx_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2013) 	spin_lock_init(&queue->rx_cons_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2014) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2015) 	timer_setup(&queue->rx_refill_timer, rx_refill_timeout, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2016) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2017) 	devid = strrchr(queue->info->xbdev->nodename, '/') + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2018) 	snprintf(queue->name, sizeof(queue->name), "vif%s-q%u",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2019) 		 devid, queue->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2020) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2021) 	/* Initialise tx_skb_freelist as a free chain containing every entry. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2022) 	queue->tx_skb_freelist = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2023) 	queue->tx_pend_queue = TX_LINK_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2024) 	for (i = 0; i < NET_TX_RING_SIZE; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2025) 		queue->tx_link[i] = i + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2026) 		queue->grant_tx_ref[i] = GRANT_INVALID_REF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2027) 		queue->grant_tx_page[i] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2028) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2029) 	queue->tx_link[NET_TX_RING_SIZE - 1] = TX_LINK_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2030) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2031) 	/* Clear out rx_skbs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2032) 	for (i = 0; i < NET_RX_RING_SIZE; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2033) 		queue->rx_skbs[i] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2034) 		queue->grant_rx_ref[i] = GRANT_INVALID_REF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2035) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2036) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2037) 	/* A grant for every tx ring slot */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2038) 	if (gnttab_alloc_grant_references(NET_TX_RING_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2039) 					  &queue->gref_tx_head) < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2040) 		pr_alert("can't alloc tx grant refs\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2041) 		err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2042) 		goto exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2043) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2044) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2045) 	/* A grant for every rx ring slot */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2046) 	if (gnttab_alloc_grant_references(NET_RX_RING_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2047) 					  &queue->gref_rx_head) < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2048) 		pr_alert("can't alloc rx grant refs\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2049) 		err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2050) 		goto exit_free_tx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2051) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2052) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2053) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2054) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2055)  exit_free_tx:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2056) 	gnttab_free_grant_references(queue->gref_tx_head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2057)  exit:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2058) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2059) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2060) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2061) static int write_queue_xenstore_keys(struct netfront_queue *queue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2062) 			   struct xenbus_transaction *xbt, int write_hierarchical)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2063) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2064) 	/* Write the queue-specific keys into XenStore in the traditional
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2065) 	 * way for a single queue, or in a queue subkeys for multiple
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2066) 	 * queues.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2067) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2068) 	struct xenbus_device *dev = queue->info->xbdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2069) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2070) 	const char *message;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2071) 	char *path;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2072) 	size_t pathsize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2073) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2074) 	/* Choose the correct place to write the keys */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2075) 	if (write_hierarchical) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2076) 		pathsize = strlen(dev->nodename) + 10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2077) 		path = kzalloc(pathsize, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2078) 		if (!path) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2079) 			err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2080) 			message = "out of memory while writing ring references";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2081) 			goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2082) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2083) 		snprintf(path, pathsize, "%s/queue-%u",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2084) 				dev->nodename, queue->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2085) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2086) 		path = (char *)dev->nodename;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2087) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2088) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2089) 	/* Write ring references */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2090) 	err = xenbus_printf(*xbt, path, "tx-ring-ref", "%u",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2091) 			queue->tx_ring_ref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2092) 	if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2093) 		message = "writing tx-ring-ref";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2094) 		goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2095) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2096) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2097) 	err = xenbus_printf(*xbt, path, "rx-ring-ref", "%u",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2098) 			queue->rx_ring_ref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2099) 	if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2100) 		message = "writing rx-ring-ref";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2101) 		goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2102) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2103) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2104) 	/* Write event channels; taking into account both shared
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2105) 	 * and split event channel scenarios.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2106) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2107) 	if (queue->tx_evtchn == queue->rx_evtchn) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2108) 		/* Shared event channel */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2109) 		err = xenbus_printf(*xbt, path,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2110) 				"event-channel", "%u", queue->tx_evtchn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2111) 		if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2112) 			message = "writing event-channel";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2113) 			goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2114) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2115) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2116) 		/* Split event channels */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2117) 		err = xenbus_printf(*xbt, path,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2118) 				"event-channel-tx", "%u", queue->tx_evtchn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2119) 		if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2120) 			message = "writing event-channel-tx";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2121) 			goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2122) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2123) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2124) 		err = xenbus_printf(*xbt, path,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2125) 				"event-channel-rx", "%u", queue->rx_evtchn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2126) 		if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2127) 			message = "writing event-channel-rx";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2128) 			goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2129) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2130) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2131) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2132) 	if (write_hierarchical)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2133) 		kfree(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2134) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2135) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2136) error:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2137) 	if (write_hierarchical)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2138) 		kfree(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2139) 	xenbus_dev_fatal(dev, err, "%s", message);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2140) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2141) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2142) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2143) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2144) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2145) static int xennet_create_page_pool(struct netfront_queue *queue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2146) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2147) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2148) 	struct page_pool_params pp_params = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2149) 		.order = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2150) 		.flags = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2151) 		.pool_size = NET_RX_RING_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2152) 		.nid = NUMA_NO_NODE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2153) 		.dev = &queue->info->netdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2154) 		.offset = XDP_PACKET_HEADROOM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2155) 		.max_len = XEN_PAGE_SIZE - XDP_PACKET_HEADROOM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2156) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2157) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2158) 	queue->page_pool = page_pool_create(&pp_params);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2159) 	if (IS_ERR(queue->page_pool)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2160) 		err = PTR_ERR(queue->page_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2161) 		queue->page_pool = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2162) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2163) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2164) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2165) 	err = xdp_rxq_info_reg(&queue->xdp_rxq, queue->info->netdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2166) 			       queue->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2167) 	if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2168) 		netdev_err(queue->info->netdev, "xdp_rxq_info_reg failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2169) 		goto err_free_pp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2170) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2171) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2172) 	err = xdp_rxq_info_reg_mem_model(&queue->xdp_rxq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2173) 					 MEM_TYPE_PAGE_POOL, queue->page_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2174) 	if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2175) 		netdev_err(queue->info->netdev, "xdp_rxq_info_reg_mem_model failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2176) 		goto err_unregister_rxq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2177) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2178) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2179) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2180) err_unregister_rxq:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2181) 	xdp_rxq_info_unreg(&queue->xdp_rxq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2182) err_free_pp:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2183) 	page_pool_destroy(queue->page_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2184) 	queue->page_pool = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2185) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2186) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2187) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2188) static int xennet_create_queues(struct netfront_info *info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2189) 				unsigned int *num_queues)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2190) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2191) 	unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2192) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2193) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2194) 	info->queues = kcalloc(*num_queues, sizeof(struct netfront_queue),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2195) 			       GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2196) 	if (!info->queues)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2197) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2198) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2199) 	for (i = 0; i < *num_queues; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2200) 		struct netfront_queue *queue = &info->queues[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2201) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2202) 		queue->id = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2203) 		queue->info = info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2204) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2205) 		ret = xennet_init_queue(queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2206) 		if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2207) 			dev_warn(&info->xbdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2208) 				 "only created %d queues\n", i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2209) 			*num_queues = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2210) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2211) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2212) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2213) 		/* use page pool recycling instead of buddy allocator */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2214) 		ret = xennet_create_page_pool(queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2215) 		if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2216) 			dev_err(&info->xbdev->dev, "can't allocate page pool\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2217) 			*num_queues = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2218) 			return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2219) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2220) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2221) 		netif_napi_add(queue->info->netdev, &queue->napi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2222) 			       xennet_poll, 64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2223) 		if (netif_running(info->netdev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2224) 			napi_enable(&queue->napi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2225) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2226) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2227) 	netif_set_real_num_tx_queues(info->netdev, *num_queues);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2228) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2229) 	if (*num_queues == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2230) 		dev_err(&info->xbdev->dev, "no queues\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2231) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2232) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2233) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2234) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2235) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2236) /* Common code used when first setting up, and when resuming. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2237) static int talk_to_netback(struct xenbus_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2238) 			   struct netfront_info *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2239) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2240) 	const char *message;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2241) 	struct xenbus_transaction xbt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2242) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2243) 	unsigned int feature_split_evtchn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2244) 	unsigned int i = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2245) 	unsigned int max_queues = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2246) 	struct netfront_queue *queue = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2247) 	unsigned int num_queues = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2248) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2249) 	info->netdev->irq = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2250) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2251) 	/* Check if backend supports multiple queues */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2252) 	max_queues = xenbus_read_unsigned(info->xbdev->otherend,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2253) 					  "multi-queue-max-queues", 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2254) 	num_queues = min(max_queues, xennet_max_queues);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2255) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2256) 	/* Check feature-split-event-channels */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2257) 	feature_split_evtchn = xenbus_read_unsigned(info->xbdev->otherend,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2258) 					"feature-split-event-channels", 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2259) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2260) 	/* Read mac addr. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2261) 	err = xen_net_read_mac(dev, info->netdev->dev_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2262) 	if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2263) 		xenbus_dev_fatal(dev, err, "parsing %s/mac", dev->nodename);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2264) 		goto out_unlocked;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2265) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2266) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2267) 	info->netback_has_xdp_headroom = xenbus_read_unsigned(info->xbdev->otherend,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2268) 							      "feature-xdp-headroom", 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2269) 	if (info->netback_has_xdp_headroom) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2270) 		/* set the current xen-netfront xdp state */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2271) 		err = talk_to_netback_xdp(info, info->netfront_xdp_enabled ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2272) 					  NETBACK_XDP_HEADROOM_ENABLE :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2273) 					  NETBACK_XDP_HEADROOM_DISABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2274) 		if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2275) 			goto out_unlocked;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2276) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2277) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2278) 	rtnl_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2279) 	if (info->queues)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2280) 		xennet_destroy_queues(info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2281) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2282) 	/* For the case of a reconnect reset the "broken" indicator. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2283) 	info->broken = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2284) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2285) 	err = xennet_create_queues(info, &num_queues);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2286) 	if (err < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2287) 		xenbus_dev_fatal(dev, err, "creating queues");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2288) 		kfree(info->queues);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2289) 		info->queues = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2290) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2291) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2292) 	rtnl_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2293) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2294) 	/* Create shared ring, alloc event channel -- for each queue */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2295) 	for (i = 0; i < num_queues; ++i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2296) 		queue = &info->queues[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2297) 		err = setup_netfront(dev, queue, feature_split_evtchn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2298) 		if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2299) 			goto destroy_ring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2300) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2301) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2302) again:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2303) 	err = xenbus_transaction_start(&xbt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2304) 	if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2305) 		xenbus_dev_fatal(dev, err, "starting transaction");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2306) 		goto destroy_ring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2307) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2308) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2309) 	if (xenbus_exists(XBT_NIL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2310) 			  info->xbdev->otherend, "multi-queue-max-queues")) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2311) 		/* Write the number of queues */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2312) 		err = xenbus_printf(xbt, dev->nodename,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2313) 				    "multi-queue-num-queues", "%u", num_queues);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2314) 		if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2315) 			message = "writing multi-queue-num-queues";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2316) 			goto abort_transaction_no_dev_fatal;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2317) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2318) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2319) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2320) 	if (num_queues == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2321) 		err = write_queue_xenstore_keys(&info->queues[0], &xbt, 0); /* flat */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2322) 		if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2323) 			goto abort_transaction_no_dev_fatal;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2324) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2325) 		/* Write the keys for each queue */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2326) 		for (i = 0; i < num_queues; ++i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2327) 			queue = &info->queues[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2328) 			err = write_queue_xenstore_keys(queue, &xbt, 1); /* hierarchical */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2329) 			if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2330) 				goto abort_transaction_no_dev_fatal;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2331) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2332) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2333) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2334) 	/* The remaining keys are not queue-specific */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2335) 	err = xenbus_printf(xbt, dev->nodename, "request-rx-copy", "%u",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2336) 			    1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2337) 	if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2338) 		message = "writing request-rx-copy";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2339) 		goto abort_transaction;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2340) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2341) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2342) 	err = xenbus_printf(xbt, dev->nodename, "feature-rx-notify", "%d", 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2343) 	if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2344) 		message = "writing feature-rx-notify";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2345) 		goto abort_transaction;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2346) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2347) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2348) 	err = xenbus_printf(xbt, dev->nodename, "feature-sg", "%d", 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2349) 	if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2350) 		message = "writing feature-sg";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2351) 		goto abort_transaction;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2352) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2353) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2354) 	err = xenbus_printf(xbt, dev->nodename, "feature-gso-tcpv4", "%d", 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2355) 	if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2356) 		message = "writing feature-gso-tcpv4";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2357) 		goto abort_transaction;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2358) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2359) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2360) 	err = xenbus_write(xbt, dev->nodename, "feature-gso-tcpv6", "1");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2361) 	if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2362) 		message = "writing feature-gso-tcpv6";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2363) 		goto abort_transaction;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2364) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2365) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2366) 	err = xenbus_write(xbt, dev->nodename, "feature-ipv6-csum-offload",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2367) 			   "1");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2368) 	if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2369) 		message = "writing feature-ipv6-csum-offload";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2370) 		goto abort_transaction;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2371) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2372) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2373) 	err = xenbus_transaction_end(xbt, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2374) 	if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2375) 		if (err == -EAGAIN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2376) 			goto again;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2377) 		xenbus_dev_fatal(dev, err, "completing transaction");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2378) 		goto destroy_ring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2379) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2380) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2381) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2382) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2383)  abort_transaction:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2384) 	xenbus_dev_fatal(dev, err, "%s", message);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2385) abort_transaction_no_dev_fatal:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2386) 	xenbus_transaction_end(xbt, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2387)  destroy_ring:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2388) 	xennet_disconnect_backend(info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2389) 	rtnl_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2390) 	xennet_destroy_queues(info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2391)  out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2392) 	rtnl_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2393) out_unlocked:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2394) 	device_unregister(&dev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2395) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2396) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2397) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2398) static int xennet_connect(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2399) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2400) 	struct netfront_info *np = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2401) 	unsigned int num_queues = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2402) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2403) 	unsigned int j = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2404) 	struct netfront_queue *queue = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2405) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2406) 	if (!xenbus_read_unsigned(np->xbdev->otherend, "feature-rx-copy", 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2407) 		dev_info(&dev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2408) 			 "backend does not support copying receive path\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2409) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2410) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2411) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2412) 	err = talk_to_netback(np->xbdev, np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2413) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2414) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2415) 	if (np->netback_has_xdp_headroom)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2416) 		pr_info("backend supports XDP headroom\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2417) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2418) 	/* talk_to_netback() sets the correct number of queues */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2419) 	num_queues = dev->real_num_tx_queues;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2420) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2421) 	if (dev->reg_state == NETREG_UNINITIALIZED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2422) 		err = register_netdev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2423) 		if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2424) 			pr_warn("%s: register_netdev err=%d\n", __func__, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2425) 			device_unregister(&np->xbdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2426) 			return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2427) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2428) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2429) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2430) 	rtnl_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2431) 	netdev_update_features(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2432) 	rtnl_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2433) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2434) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2435) 	 * All public and private state should now be sane.  Get
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2436) 	 * ready to start sending and receiving packets and give the driver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2437) 	 * domain a kick because we've probably just requeued some
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2438) 	 * packets.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2439) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2440) 	netif_tx_lock_bh(np->netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2441) 	netif_device_attach(np->netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2442) 	netif_tx_unlock_bh(np->netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2443) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2444) 	netif_carrier_on(np->netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2445) 	for (j = 0; j < num_queues; ++j) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2446) 		queue = &np->queues[j];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2447) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2448) 		notify_remote_via_irq(queue->tx_irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2449) 		if (queue->tx_irq != queue->rx_irq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2450) 			notify_remote_via_irq(queue->rx_irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2451) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2452) 		spin_lock_irq(&queue->tx_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2453) 		xennet_tx_buf_gc(queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2454) 		spin_unlock_irq(&queue->tx_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2455) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2456) 		spin_lock_bh(&queue->rx_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2457) 		xennet_alloc_rx_buffers(queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2458) 		spin_unlock_bh(&queue->rx_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2459) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2460) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2461) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2462) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2463) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2464) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2465)  * Callback received when the backend's state changes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2466)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2467) static void netback_changed(struct xenbus_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2468) 			    enum xenbus_state backend_state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2469) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2470) 	struct netfront_info *np = dev_get_drvdata(&dev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2471) 	struct net_device *netdev = np->netdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2472) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2473) 	dev_dbg(&dev->dev, "%s\n", xenbus_strstate(backend_state));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2474) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2475) 	wake_up_all(&module_wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2476) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2477) 	switch (backend_state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2478) 	case XenbusStateInitialising:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2479) 	case XenbusStateInitialised:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2480) 	case XenbusStateReconfiguring:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2481) 	case XenbusStateReconfigured:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2482) 	case XenbusStateUnknown:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2483) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2484) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2485) 	case XenbusStateInitWait:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2486) 		if (dev->state != XenbusStateInitialising)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2487) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2488) 		if (xennet_connect(netdev) != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2489) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2490) 		xenbus_switch_state(dev, XenbusStateConnected);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2491) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2492) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2493) 	case XenbusStateConnected:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2494) 		netdev_notify_peers(netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2495) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2496) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2497) 	case XenbusStateClosed:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2498) 		if (dev->state == XenbusStateClosed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2499) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2500) 		fallthrough;	/* Missed the backend's CLOSING state */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2501) 	case XenbusStateClosing:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2502) 		xenbus_frontend_closed(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2503) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2504) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2505) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2506) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2507) static const struct xennet_stat {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2508) 	char name[ETH_GSTRING_LEN];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2509) 	u16 offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2510) } xennet_stats[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2511) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2512) 		"rx_gso_checksum_fixup",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2513) 		offsetof(struct netfront_info, rx_gso_checksum_fixup)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2514) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2515) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2516) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2517) static int xennet_get_sset_count(struct net_device *dev, int string_set)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2518) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2519) 	switch (string_set) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2520) 	case ETH_SS_STATS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2521) 		return ARRAY_SIZE(xennet_stats);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2522) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2523) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2524) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2525) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2526) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2527) static void xennet_get_ethtool_stats(struct net_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2528) 				     struct ethtool_stats *stats, u64 * data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2529) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2530) 	void *np = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2531) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2532) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2533) 	for (i = 0; i < ARRAY_SIZE(xennet_stats); i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2534) 		data[i] = atomic_read((atomic_t *)(np + xennet_stats[i].offset));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2535) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2536) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2537) static void xennet_get_strings(struct net_device *dev, u32 stringset, u8 * data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2538) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2539) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2540) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2541) 	switch (stringset) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2542) 	case ETH_SS_STATS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2543) 		for (i = 0; i < ARRAY_SIZE(xennet_stats); i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2544) 			memcpy(data + i * ETH_GSTRING_LEN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2545) 			       xennet_stats[i].name, ETH_GSTRING_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2546) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2547) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2548) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2549) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2550) static const struct ethtool_ops xennet_ethtool_ops =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2551) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2552) 	.get_link = ethtool_op_get_link,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2553) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2554) 	.get_sset_count = xennet_get_sset_count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2555) 	.get_ethtool_stats = xennet_get_ethtool_stats,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2556) 	.get_strings = xennet_get_strings,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2557) 	.get_ts_info = ethtool_op_get_ts_info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2558) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2559) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2560) #ifdef CONFIG_SYSFS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2561) static ssize_t show_rxbuf(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2562) 			  struct device_attribute *attr, char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2563) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2564) 	return sprintf(buf, "%lu\n", NET_RX_RING_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2565) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2566) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2567) static ssize_t store_rxbuf(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2568) 			   struct device_attribute *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2569) 			   const char *buf, size_t len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2570) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2571) 	char *endp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2572) 	unsigned long target;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2573) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2574) 	if (!capable(CAP_NET_ADMIN))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2575) 		return -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2576) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2577) 	target = simple_strtoul(buf, &endp, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2578) 	if (endp == buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2579) 		return -EBADMSG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2580) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2581) 	/* rxbuf_min and rxbuf_max are no longer configurable. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2582) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2583) 	return len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2584) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2585) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2586) static DEVICE_ATTR(rxbuf_min, 0644, show_rxbuf, store_rxbuf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2587) static DEVICE_ATTR(rxbuf_max, 0644, show_rxbuf, store_rxbuf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2588) static DEVICE_ATTR(rxbuf_cur, 0444, show_rxbuf, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2589) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2590) static struct attribute *xennet_dev_attrs[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2591) 	&dev_attr_rxbuf_min.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2592) 	&dev_attr_rxbuf_max.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2593) 	&dev_attr_rxbuf_cur.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2594) 	NULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2595) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2596) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2597) static const struct attribute_group xennet_dev_group = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2598) 	.attrs = xennet_dev_attrs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2599) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2600) #endif /* CONFIG_SYSFS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2601) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2602) static void xennet_bus_close(struct xenbus_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2603) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2604) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2605) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2606) 	if (xenbus_read_driver_state(dev->otherend) == XenbusStateClosed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2607) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2608) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2609) 		xenbus_switch_state(dev, XenbusStateClosing);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2610) 		ret = wait_event_timeout(module_wq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2611) 				   xenbus_read_driver_state(dev->otherend) ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2612) 				   XenbusStateClosing ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2613) 				   xenbus_read_driver_state(dev->otherend) ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2614) 				   XenbusStateClosed ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2615) 				   xenbus_read_driver_state(dev->otherend) ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2616) 				   XenbusStateUnknown,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2617) 				   XENNET_TIMEOUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2618) 	} while (!ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2619) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2620) 	if (xenbus_read_driver_state(dev->otherend) == XenbusStateClosed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2621) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2622) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2623) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2624) 		xenbus_switch_state(dev, XenbusStateClosed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2625) 		ret = wait_event_timeout(module_wq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2626) 				   xenbus_read_driver_state(dev->otherend) ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2627) 				   XenbusStateClosed ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2628) 				   xenbus_read_driver_state(dev->otherend) ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2629) 				   XenbusStateUnknown,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2630) 				   XENNET_TIMEOUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2631) 	} while (!ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2632) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2633) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2634) static int xennet_remove(struct xenbus_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2635) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2636) 	struct netfront_info *info = dev_get_drvdata(&dev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2637) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2638) 	xennet_bus_close(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2639) 	xennet_disconnect_backend(info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2640) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2641) 	if (info->netdev->reg_state == NETREG_REGISTERED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2642) 		unregister_netdev(info->netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2643) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2644) 	if (info->queues) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2645) 		rtnl_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2646) 		xennet_destroy_queues(info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2647) 		rtnl_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2648) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2649) 	xennet_free_netdev(info->netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2650) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2651) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2652) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2653) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2654) static const struct xenbus_device_id netfront_ids[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2655) 	{ "vif" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2656) 	{ "" }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2657) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2658) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2659) static struct xenbus_driver netfront_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2660) 	.ids = netfront_ids,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2661) 	.probe = netfront_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2662) 	.remove = xennet_remove,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2663) 	.resume = netfront_resume,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2664) 	.otherend_changed = netback_changed,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2665) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2666) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2667) static int __init netif_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2668) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2669) 	if (!xen_domain())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2670) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2671) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2672) 	if (!xen_has_pv_nic_devices())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2673) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2674) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2675) 	pr_info("Initialising Xen virtual ethernet driver\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2676) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2677) 	/* Allow as many queues as there are CPUs inut max. 8 if user has not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2678) 	 * specified a value.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2679) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2680) 	if (xennet_max_queues == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2681) 		xennet_max_queues = min_t(unsigned int, MAX_QUEUES_DEFAULT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2682) 					  num_online_cpus());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2683) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2684) 	return xenbus_register_frontend(&netfront_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2685) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2686) module_init(netif_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2687) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2688) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2689) static void __exit netif_exit(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2690) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2691) 	xenbus_unregister_driver(&netfront_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2692) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2693) module_exit(netif_exit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2694) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2695) MODULE_DESCRIPTION("Xen virtual network device frontend");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2696) MODULE_LICENSE("GPL");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2697) MODULE_ALIAS("xen:vif");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2698) MODULE_ALIAS("xennet");