^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) * Network-device interface management.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Copyright (c) 2004-2005, Keir Fraser
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * This program is free software; you can redistribute it and/or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * modify it under the terms of the GNU General Public License version 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * as published by the Free Software Foundation; or, when distributed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * separately from the Linux kernel or incorporated into other
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * software packages, subject to the following license:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) * Permission is hereby granted, free of charge, to any person obtaining a copy
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) * of this source file (the "Software"), to deal in the Software without
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) * restriction, including without limitation the rights to use, copy, modify,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) * merge, publish, distribute, sublicense, and/or sell copies of the Software,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) * and to permit persons to whom the Software is furnished to do so, subject to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) * the following conditions:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) * The above copyright notice and this permission notice shall be included in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) * all copies or substantial portions of the Software.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) * IN THE SOFTWARE.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #include "common.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #include <linux/kthread.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #include <linux/sched/task.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #include <linux/ethtool.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #include <linux/rtnetlink.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #include <linux/if_vlan.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #include <linux/vmalloc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) #include <xen/events.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) #include <asm/xen/hypercall.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) #include <xen/balloon.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) #define XENVIF_QUEUE_LENGTH 32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) #define XENVIF_NAPI_WEIGHT 64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) /* Number of bytes allowed on the internal guest Rx queue. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) #define XENVIF_RX_QUEUE_BYTES (XEN_NETIF_RX_RING_SIZE/2 * PAGE_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) /* This function is used to set SKBTX_DEV_ZEROCOPY as well as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) * increasing the inflight counter. We need to increase the inflight
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) * counter because core driver calls into xenvif_zerocopy_callback
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) * which calls xenvif_skb_zerocopy_complete.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) void xenvif_skb_zerocopy_prepare(struct xenvif_queue *queue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) atomic_inc(&queue->inflight_packets);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) void xenvif_skb_zerocopy_complete(struct xenvif_queue *queue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) atomic_dec(&queue->inflight_packets);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) /* Wake the dealloc thread _after_ decrementing inflight_packets so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) * that if kthread_stop() has already been called, the dealloc thread
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) * does not wait forever with nothing to wake it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) wake_up(&queue->dealloc_wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) int xenvif_schedulable(struct xenvif *vif)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) return netif_running(vif->dev) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) test_bit(VIF_STATUS_CONNECTED, &vif->status) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) !vif->disabled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) static bool xenvif_handle_tx_interrupt(struct xenvif_queue *queue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) bool rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) rc = RING_HAS_UNCONSUMED_REQUESTS(&queue->tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) napi_schedule(&queue->napi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) static irqreturn_t xenvif_tx_interrupt(int irq, void *dev_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) struct xenvif_queue *queue = dev_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) int old;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) old = atomic_fetch_or(NETBK_TX_EOI, &queue->eoi_pending);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) WARN(old & NETBK_TX_EOI, "Interrupt while EOI pending\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) if (!xenvif_handle_tx_interrupt(queue)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) atomic_andnot(NETBK_TX_EOI, &queue->eoi_pending);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) xen_irq_lateeoi(irq, XEN_EOI_FLAG_SPURIOUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) static int xenvif_poll(struct napi_struct *napi, int budget)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) struct xenvif_queue *queue =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) container_of(napi, struct xenvif_queue, napi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) int work_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) /* This vif is rogue, we pretend we've there is nothing to do
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) * for this vif to deschedule it from NAPI. But this interface
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) * will be turned off in thread context later.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) if (unlikely(queue->vif->disabled)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) napi_complete(napi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) work_done = xenvif_tx_action(queue, budget);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) if (work_done < budget) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) napi_complete_done(napi, work_done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) /* If the queue is rate-limited, it shall be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) * rescheduled in the timer callback.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) if (likely(!queue->rate_limited))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) xenvif_napi_schedule_or_enable_events(queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) return work_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) static bool xenvif_handle_rx_interrupt(struct xenvif_queue *queue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) bool rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) rc = xenvif_have_rx_work(queue, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) xenvif_kick_thread(queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) static irqreturn_t xenvif_rx_interrupt(int irq, void *dev_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) struct xenvif_queue *queue = dev_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) int old;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) old = atomic_fetch_or(NETBK_RX_EOI, &queue->eoi_pending);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) WARN(old & NETBK_RX_EOI, "Interrupt while EOI pending\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) if (!xenvif_handle_rx_interrupt(queue)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) atomic_andnot(NETBK_RX_EOI, &queue->eoi_pending);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) xen_irq_lateeoi(irq, XEN_EOI_FLAG_SPURIOUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) irqreturn_t xenvif_interrupt(int irq, void *dev_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) struct xenvif_queue *queue = dev_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) int old;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) bool has_rx, has_tx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) old = atomic_fetch_or(NETBK_COMMON_EOI, &queue->eoi_pending);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) WARN(old, "Interrupt while EOI pending\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) has_tx = xenvif_handle_tx_interrupt(queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) has_rx = xenvif_handle_rx_interrupt(queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) if (!has_rx && !has_tx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) atomic_andnot(NETBK_COMMON_EOI, &queue->eoi_pending);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) xen_irq_lateeoi(irq, XEN_EOI_FLAG_SPURIOUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) int xenvif_queue_stopped(struct xenvif_queue *queue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) struct net_device *dev = queue->vif->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) unsigned int id = queue->id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) return netif_tx_queue_stopped(netdev_get_tx_queue(dev, id));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) void xenvif_wake_queue(struct xenvif_queue *queue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) struct net_device *dev = queue->vif->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) unsigned int id = queue->id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) netif_tx_wake_queue(netdev_get_tx_queue(dev, id));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) static u16 xenvif_select_queue(struct net_device *dev, struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) struct net_device *sb_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) struct xenvif *vif = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) unsigned int size = vif->hash.size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) unsigned int num_queues;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) /* If queues are not set up internally - always return 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) * as the packet going to be dropped anyway */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) num_queues = READ_ONCE(vif->num_queues);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) if (num_queues < 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) if (vif->hash.alg == XEN_NETIF_CTRL_HASH_ALGORITHM_NONE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) return netdev_pick_tx(dev, skb, NULL) %
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) dev->real_num_tx_queues;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) xenvif_set_skb_hash(vif, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) if (size == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) return skb_get_hash_raw(skb) % dev->real_num_tx_queues;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) return vif->hash.mapping[vif->hash.mapping_sel]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) [skb_get_hash_raw(skb) % size];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) static netdev_tx_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) struct xenvif *vif = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) struct xenvif_queue *queue = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) unsigned int num_queues;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) u16 index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) struct xenvif_rx_cb *cb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) BUG_ON(skb->dev != dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) /* Drop the packet if queues are not set up.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) * This handler should be called inside an RCU read section
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) * so we don't need to enter it here explicitly.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) num_queues = READ_ONCE(vif->num_queues);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) if (num_queues < 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) goto drop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) /* Obtain the queue to be used to transmit this packet */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) index = skb_get_queue_mapping(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) if (index >= num_queues) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) pr_warn_ratelimited("Invalid queue %hu for packet on interface %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) index, vif->dev->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) index %= num_queues;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) queue = &vif->queues[index];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) /* Drop the packet if queue is not ready */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) if (queue->task == NULL ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) queue->dealloc_task == NULL ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) !xenvif_schedulable(vif))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) goto drop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) if (vif->multicast_control && skb->pkt_type == PACKET_MULTICAST) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) struct ethhdr *eth = (struct ethhdr *)skb->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) if (!xenvif_mcast_match(vif, eth->h_dest))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) goto drop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) cb = XENVIF_RX_CB(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) cb->expires = jiffies + vif->drain_timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) /* If there is no hash algorithm configured then make sure there
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) * is no hash information in the socket buffer otherwise it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) * would be incorrectly forwarded to the frontend.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) if (vif->hash.alg == XEN_NETIF_CTRL_HASH_ALGORITHM_NONE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) skb_clear_hash(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) xenvif_rx_queue_tail(queue, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) xenvif_kick_thread(queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) return NETDEV_TX_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) drop:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) vif->dev->stats.tx_dropped++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) dev_kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) return NETDEV_TX_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) static struct net_device_stats *xenvif_get_stats(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) struct xenvif *vif = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) struct xenvif_queue *queue = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) unsigned int num_queues;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) u64 rx_bytes = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) u64 rx_packets = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) u64 tx_bytes = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) u64 tx_packets = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) unsigned int index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) num_queues = READ_ONCE(vif->num_queues);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) /* Aggregate tx and rx stats from each queue */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) for (index = 0; index < num_queues; ++index) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) queue = &vif->queues[index];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) rx_bytes += queue->stats.rx_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) rx_packets += queue->stats.rx_packets;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) tx_bytes += queue->stats.tx_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) tx_packets += queue->stats.tx_packets;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) vif->dev->stats.rx_bytes = rx_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) vif->dev->stats.rx_packets = rx_packets;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) vif->dev->stats.tx_bytes = tx_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) vif->dev->stats.tx_packets = tx_packets;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) return &vif->dev->stats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) static void xenvif_up(struct xenvif *vif)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) struct xenvif_queue *queue = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) unsigned int num_queues = vif->num_queues;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) unsigned int queue_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) for (queue_index = 0; queue_index < num_queues; ++queue_index) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) queue = &vif->queues[queue_index];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) napi_enable(&queue->napi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) enable_irq(queue->tx_irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) if (queue->tx_irq != queue->rx_irq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) enable_irq(queue->rx_irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) xenvif_napi_schedule_or_enable_events(queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) static void xenvif_down(struct xenvif *vif)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) struct xenvif_queue *queue = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) unsigned int num_queues = vif->num_queues;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) unsigned int queue_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) for (queue_index = 0; queue_index < num_queues; ++queue_index) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) queue = &vif->queues[queue_index];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) disable_irq(queue->tx_irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) if (queue->tx_irq != queue->rx_irq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) disable_irq(queue->rx_irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) napi_disable(&queue->napi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) del_timer_sync(&queue->credit_timeout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) static int xenvif_open(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) struct xenvif *vif = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) if (test_bit(VIF_STATUS_CONNECTED, &vif->status))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) xenvif_up(vif);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) netif_tx_start_all_queues(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) static int xenvif_close(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) struct xenvif *vif = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) if (test_bit(VIF_STATUS_CONNECTED, &vif->status))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) xenvif_down(vif);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) netif_tx_stop_all_queues(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) static int xenvif_change_mtu(struct net_device *dev, int mtu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) struct xenvif *vif = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) int max = vif->can_sg ? ETH_MAX_MTU - VLAN_ETH_HLEN : ETH_DATA_LEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) if (mtu > max)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) dev->mtu = mtu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) static netdev_features_t xenvif_fix_features(struct net_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) netdev_features_t features)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) struct xenvif *vif = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) if (!vif->can_sg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) features &= ~NETIF_F_SG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) if (~(vif->gso_mask) & GSO_BIT(TCPV4))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) features &= ~NETIF_F_TSO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) if (~(vif->gso_mask) & GSO_BIT(TCPV6))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) features &= ~NETIF_F_TSO6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) if (!vif->ip_csum)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) features &= ~NETIF_F_IP_CSUM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) if (!vif->ipv6_csum)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) features &= ~NETIF_F_IPV6_CSUM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) return features;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) static const struct xenvif_stat {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) char name[ETH_GSTRING_LEN];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) u16 offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) } xenvif_stats[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) "rx_gso_checksum_fixup",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) offsetof(struct xenvif_stats, rx_gso_checksum_fixup)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) /* If (sent != success + fail), there are probably packets never
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) * freed up properly!
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) "tx_zerocopy_sent",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) offsetof(struct xenvif_stats, tx_zerocopy_sent),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) "tx_zerocopy_success",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) offsetof(struct xenvif_stats, tx_zerocopy_success),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) "tx_zerocopy_fail",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) offsetof(struct xenvif_stats, tx_zerocopy_fail)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) /* Number of packets exceeding MAX_SKB_FRAG slots. You should use
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) * a guest with the same MAX_SKB_FRAG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) "tx_frag_overflow",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) offsetof(struct xenvif_stats, tx_frag_overflow)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) static int xenvif_get_sset_count(struct net_device *dev, int string_set)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) switch (string_set) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) case ETH_SS_STATS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) return ARRAY_SIZE(xenvif_stats);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) static void xenvif_get_ethtool_stats(struct net_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) struct ethtool_stats *stats, u64 * data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) struct xenvif *vif = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) unsigned int num_queues;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) unsigned int queue_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) num_queues = READ_ONCE(vif->num_queues);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) for (i = 0; i < ARRAY_SIZE(xenvif_stats); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) unsigned long accum = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) for (queue_index = 0; queue_index < num_queues; ++queue_index) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) void *vif_stats = &vif->queues[queue_index].stats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) accum += *(unsigned long *)(vif_stats + xenvif_stats[i].offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) data[i] = accum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) static void xenvif_get_strings(struct net_device *dev, u32 stringset, u8 * data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) switch (stringset) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) case ETH_SS_STATS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) for (i = 0; i < ARRAY_SIZE(xenvif_stats); i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) memcpy(data + i * ETH_GSTRING_LEN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) xenvif_stats[i].name, ETH_GSTRING_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) static const struct ethtool_ops xenvif_ethtool_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) .get_link = ethtool_op_get_link,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) .get_sset_count = xenvif_get_sset_count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) .get_ethtool_stats = xenvif_get_ethtool_stats,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) .get_strings = xenvif_get_strings,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) static const struct net_device_ops xenvif_netdev_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) .ndo_select_queue = xenvif_select_queue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) .ndo_start_xmit = xenvif_start_xmit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) .ndo_get_stats = xenvif_get_stats,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) .ndo_open = xenvif_open,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) .ndo_stop = xenvif_close,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) .ndo_change_mtu = xenvif_change_mtu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) .ndo_fix_features = xenvif_fix_features,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) .ndo_set_mac_address = eth_mac_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) .ndo_validate_addr = eth_validate_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) struct xenvif *xenvif_alloc(struct device *parent, domid_t domid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) unsigned int handle)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) struct net_device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) struct xenvif *vif;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) char name[IFNAMSIZ] = {};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) snprintf(name, IFNAMSIZ - 1, "vif%u.%u", domid, handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) /* Allocate a netdev with the max. supported number of queues.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) * When the guest selects the desired number, it will be updated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) * via netif_set_real_num_*_queues().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) dev = alloc_netdev_mq(sizeof(struct xenvif), name, NET_NAME_UNKNOWN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) ether_setup, xenvif_max_queues);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) if (dev == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) pr_warn("Could not allocate netdev for %s\n", name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) SET_NETDEV_DEV(dev, parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) vif = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) vif->domid = domid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) vif->handle = handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) vif->can_sg = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) vif->ip_csum = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) vif->dev = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) vif->disabled = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) vif->drain_timeout = msecs_to_jiffies(rx_drain_timeout_msecs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) vif->stall_timeout = msecs_to_jiffies(rx_stall_timeout_msecs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) /* Start out with no queues. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) vif->queues = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) vif->num_queues = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) vif->xdp_headroom = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) spin_lock_init(&vif->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) INIT_LIST_HEAD(&vif->fe_mcast_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) dev->netdev_ops = &xenvif_netdev_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) dev->hw_features = NETIF_F_SG |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_FRAGLIST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) dev->features = dev->hw_features | NETIF_F_RXCSUM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) dev->ethtool_ops = &xenvif_ethtool_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) dev->tx_queue_len = XENVIF_QUEUE_LENGTH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) dev->min_mtu = ETH_MIN_MTU;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) dev->max_mtu = ETH_MAX_MTU - VLAN_ETH_HLEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) * Initialise a dummy MAC address. We choose the numerically
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) * largest non-broadcast address to prevent the address getting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) * stolen by an Ethernet bridge for STP purposes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) * (FE:FF:FF:FF:FF:FF)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) eth_broadcast_addr(dev->dev_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) dev->dev_addr[0] &= ~0x01;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) netif_carrier_off(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) err = register_netdev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) netdev_warn(dev, "Could not register device: err=%d\n", err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) free_netdev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) return ERR_PTR(err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) netdev_dbg(dev, "Successfully created xenvif\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) __module_get(THIS_MODULE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) return vif;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) int xenvif_init_queue(struct xenvif_queue *queue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) int err, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) queue->credit_bytes = queue->remaining_credit = ~0UL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) queue->credit_usec = 0UL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) timer_setup(&queue->credit_timeout, xenvif_tx_credit_callback, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) queue->credit_window_start = get_jiffies_64();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) queue->rx_queue_max = XENVIF_RX_QUEUE_BYTES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) skb_queue_head_init(&queue->rx_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) skb_queue_head_init(&queue->tx_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) queue->pending_cons = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) queue->pending_prod = MAX_PENDING_REQS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) for (i = 0; i < MAX_PENDING_REQS; ++i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) queue->pending_ring[i] = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) spin_lock_init(&queue->callback_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) spin_lock_init(&queue->response_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) /* If ballooning is disabled, this will consume real memory, so you
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) * better enable it. The long term solution would be to use just a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) * bunch of valid page descriptors, without dependency on ballooning
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) err = gnttab_alloc_pages(MAX_PENDING_REQS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) queue->mmap_pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) netdev_err(queue->vif->dev, "Could not reserve mmap_pages\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) for (i = 0; i < MAX_PENDING_REQS; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) queue->pending_tx_info[i].callback_struct = (struct ubuf_info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) { .callback = xenvif_zerocopy_callback,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) { { .ctx = NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) .desc = i } } };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) queue->grant_tx_handle[i] = NETBACK_INVALID_HANDLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) void xenvif_carrier_on(struct xenvif *vif)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) rtnl_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) if (!vif->can_sg && vif->dev->mtu > ETH_DATA_LEN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) dev_set_mtu(vif->dev, ETH_DATA_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) netdev_update_features(vif->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) set_bit(VIF_STATUS_CONNECTED, &vif->status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) if (netif_running(vif->dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) xenvif_up(vif);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) rtnl_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) int xenvif_connect_ctrl(struct xenvif *vif, grant_ref_t ring_ref,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) unsigned int evtchn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) struct net_device *dev = vif->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) void *addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) struct xen_netif_ctrl_sring *shared;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) RING_IDX rsp_prod, req_prod;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) err = xenbus_map_ring_valloc(xenvif_to_xenbus_device(vif),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) &ring_ref, 1, &addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) shared = (struct xen_netif_ctrl_sring *)addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) rsp_prod = READ_ONCE(shared->rsp_prod);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) req_prod = READ_ONCE(shared->req_prod);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) BACK_RING_ATTACH(&vif->ctrl, shared, rsp_prod, XEN_PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) err = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) if (req_prod - rsp_prod > RING_SIZE(&vif->ctrl))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) goto err_unmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) err = bind_interdomain_evtchn_to_irq_lateeoi(vif->domid, evtchn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) goto err_unmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) vif->ctrl_irq = err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) xenvif_init_hash(vif);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) err = request_threaded_irq(vif->ctrl_irq, NULL, xenvif_ctrl_irq_fn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) IRQF_ONESHOT, "xen-netback-ctrl", vif);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) pr_warn("Could not setup irq handler for %s\n", dev->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) goto err_deinit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) err_deinit:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) xenvif_deinit_hash(vif);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) unbind_from_irqhandler(vif->ctrl_irq, vif);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) vif->ctrl_irq = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) err_unmap:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(vif),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) vif->ctrl.sring);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) vif->ctrl.sring = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) static void xenvif_disconnect_queue(struct xenvif_queue *queue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) if (queue->task) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) kthread_stop(queue->task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) put_task_struct(queue->task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) queue->task = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) if (queue->dealloc_task) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) kthread_stop(queue->dealloc_task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) queue->dealloc_task = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) if (queue->napi.poll) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) netif_napi_del(&queue->napi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) queue->napi.poll = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) if (queue->tx_irq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) unbind_from_irqhandler(queue->tx_irq, queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) if (queue->tx_irq == queue->rx_irq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) queue->rx_irq = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) queue->tx_irq = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) if (queue->rx_irq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) unbind_from_irqhandler(queue->rx_irq, queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) queue->rx_irq = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) xenvif_unmap_frontend_data_rings(queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) int xenvif_connect_data(struct xenvif_queue *queue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) unsigned long tx_ring_ref,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) unsigned long rx_ring_ref,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) unsigned int tx_evtchn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) unsigned int rx_evtchn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) struct task_struct *task;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) BUG_ON(queue->tx_irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) BUG_ON(queue->task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) BUG_ON(queue->dealloc_task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) err = xenvif_map_frontend_data_rings(queue, tx_ring_ref,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) rx_ring_ref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) init_waitqueue_head(&queue->wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) init_waitqueue_head(&queue->dealloc_wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) atomic_set(&queue->inflight_packets, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) netif_napi_add(queue->vif->dev, &queue->napi, xenvif_poll,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) XENVIF_NAPI_WEIGHT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) queue->stalled = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) task = kthread_run(xenvif_kthread_guest_rx, queue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) "%s-guest-rx", queue->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) if (IS_ERR(task))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) goto kthread_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) queue->task = task;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) * Take a reference to the task in order to prevent it from being freed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) * if the thread function returns before kthread_stop is called.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) get_task_struct(task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) task = kthread_run(xenvif_dealloc_kthread, queue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) "%s-dealloc", queue->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) if (IS_ERR(task))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) goto kthread_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) queue->dealloc_task = task;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) if (tx_evtchn == rx_evtchn) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) /* feature-split-event-channels == 0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) err = bind_interdomain_evtchn_to_irqhandler_lateeoi(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) queue->vif->domid, tx_evtchn, xenvif_interrupt, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) queue->name, queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) queue->tx_irq = queue->rx_irq = err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) disable_irq(queue->tx_irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) /* feature-split-event-channels == 1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) snprintf(queue->tx_irq_name, sizeof(queue->tx_irq_name),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) "%s-tx", queue->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) err = bind_interdomain_evtchn_to_irqhandler_lateeoi(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) queue->vif->domid, tx_evtchn, xenvif_tx_interrupt, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) queue->tx_irq_name, queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) queue->tx_irq = err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) disable_irq(queue->tx_irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) snprintf(queue->rx_irq_name, sizeof(queue->rx_irq_name),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) "%s-rx", queue->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) err = bind_interdomain_evtchn_to_irqhandler_lateeoi(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) queue->vif->domid, rx_evtchn, xenvif_rx_interrupt, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) queue->rx_irq_name, queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) queue->rx_irq = err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) disable_irq(queue->rx_irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) kthread_err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) pr_warn("Could not allocate kthread for %s\n", queue->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) err = PTR_ERR(task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) xenvif_disconnect_queue(queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) void xenvif_carrier_off(struct xenvif *vif)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) struct net_device *dev = vif->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) rtnl_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) if (test_and_clear_bit(VIF_STATUS_CONNECTED, &vif->status)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) netif_carrier_off(dev); /* discard queued packets */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) if (netif_running(dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) xenvif_down(vif);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) rtnl_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) void xenvif_disconnect_data(struct xenvif *vif)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) struct xenvif_queue *queue = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) unsigned int num_queues = vif->num_queues;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) unsigned int queue_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) xenvif_carrier_off(vif);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) for (queue_index = 0; queue_index < num_queues; ++queue_index) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) queue = &vif->queues[queue_index];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) xenvif_disconnect_queue(queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) xenvif_mcast_addr_list_free(vif);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) void xenvif_disconnect_ctrl(struct xenvif *vif)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) if (vif->ctrl_irq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) xenvif_deinit_hash(vif);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) unbind_from_irqhandler(vif->ctrl_irq, vif);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) vif->ctrl_irq = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) if (vif->ctrl.sring) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(vif),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) vif->ctrl.sring);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) vif->ctrl.sring = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) /* Reverse the relevant parts of xenvif_init_queue().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) * Used for queue teardown from xenvif_free(), and on the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) * error handling paths in xenbus.c:connect().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) void xenvif_deinit_queue(struct xenvif_queue *queue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) gnttab_free_pages(MAX_PENDING_REQS, queue->mmap_pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) void xenvif_free(struct xenvif *vif)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) struct xenvif_queue *queues = vif->queues;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) unsigned int num_queues = vif->num_queues;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) unsigned int queue_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) unregister_netdev(vif->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) free_netdev(vif->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) for (queue_index = 0; queue_index < num_queues; ++queue_index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) xenvif_deinit_queue(&queues[queue_index]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) vfree(queues);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) module_put(THIS_MODULE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) }