^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) * Copyright (c) 2016 Citrix Systems Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Copyright (c) 2002-2005, K A Fraser
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * This program is free software; you can redistribute it and/or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * modify it under the terms of the GNU General Public License version 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * as published by the Free Software Foundation; or, when distributed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * separately from the Linux kernel or incorporated into other
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * software packages, subject to the following license:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) * Permission is hereby granted, free of charge, to any person obtaining a copy
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) * of this source file (the "Software"), to deal in the Software without
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) * restriction, including without limitation the rights to use, copy, modify,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) * merge, publish, distribute, sublicense, and/or sell copies of the Software,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) * and to permit persons to whom the Software is furnished to do so, subject to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) * the following conditions:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) * The above copyright notice and this permission notice shall be included in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) * all copies or substantial portions of the Software.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) * IN THE SOFTWARE.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #include "common.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #include <linux/kthread.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #include <xen/xen.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #include <xen/events.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) * Update the needed ring page slots for the first SKB queued.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) * Note that any call sequence outside the RX thread calling this function
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) * needs to wake up the RX thread via a call of xenvif_kick_thread()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) * afterwards in order to avoid a race with putting the thread to sleep.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) static void xenvif_update_needed_slots(struct xenvif_queue *queue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) const struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) unsigned int needed = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) if (skb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) needed = DIV_ROUND_UP(skb->len, XEN_PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) if (skb_is_gso(skb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) needed++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) if (skb->sw_hash)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) needed++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) WRITE_ONCE(queue->rx_slots_needed, needed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) static bool xenvif_rx_ring_slots_available(struct xenvif_queue *queue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) RING_IDX prod, cons;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) unsigned int needed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) needed = READ_ONCE(queue->rx_slots_needed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) if (!needed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) prod = queue->rx.sring->req_prod;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) cons = queue->rx.req_cons;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) if (prod - cons >= needed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) queue->rx.sring->req_event = prod + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) /* Make sure event is visible before we check prod
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) * again.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) } while (queue->rx.sring->req_prod != prod);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) void xenvif_rx_queue_tail(struct xenvif_queue *queue, struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) spin_lock_irqsave(&queue->rx_queue.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) if (queue->rx_queue_len >= queue->rx_queue_max) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) struct net_device *dev = queue->vif->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) netif_tx_stop_queue(netdev_get_tx_queue(dev, queue->id));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) queue->vif->dev->stats.rx_dropped++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) if (skb_queue_empty(&queue->rx_queue))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) xenvif_update_needed_slots(queue, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) __skb_queue_tail(&queue->rx_queue, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) queue->rx_queue_len += skb->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) spin_unlock_irqrestore(&queue->rx_queue.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) static struct sk_buff *xenvif_rx_dequeue(struct xenvif_queue *queue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) spin_lock_irq(&queue->rx_queue.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) skb = __skb_dequeue(&queue->rx_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) if (skb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) xenvif_update_needed_slots(queue, skb_peek(&queue->rx_queue));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) queue->rx_queue_len -= skb->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) if (queue->rx_queue_len < queue->rx_queue_max) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) struct netdev_queue *txq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) txq = netdev_get_tx_queue(queue->vif->dev, queue->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) netif_tx_wake_queue(txq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) spin_unlock_irq(&queue->rx_queue.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) return skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) static void xenvif_rx_queue_purge(struct xenvif_queue *queue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) while ((skb = xenvif_rx_dequeue(queue)) != NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) static void xenvif_rx_queue_drop_expired(struct xenvif_queue *queue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) for (;;) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) skb = skb_peek(&queue->rx_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) if (!skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) if (time_before(jiffies, XENVIF_RX_CB(skb)->expires))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) xenvif_rx_dequeue(queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) queue->vif->dev->stats.rx_dropped++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) static void xenvif_rx_copy_flush(struct xenvif_queue *queue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) int notify;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) gnttab_batch_copy(queue->rx_copy.op, queue->rx_copy.num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) for (i = 0; i < queue->rx_copy.num; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) struct gnttab_copy *op;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) op = &queue->rx_copy.op[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) /* If the copy failed, overwrite the status field in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) * the corresponding response.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) if (unlikely(op->status != GNTST_okay)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) struct xen_netif_rx_response *rsp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) rsp = RING_GET_RESPONSE(&queue->rx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) queue->rx_copy.idx[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) rsp->status = op->status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) queue->rx_copy.num = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) /* Push responses for all completed packets. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&queue->rx, notify);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) if (notify)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) notify_remote_via_irq(queue->rx_irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) __skb_queue_purge(queue->rx_copy.completed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) static void xenvif_rx_copy_add(struct xenvif_queue *queue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) struct xen_netif_rx_request *req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) unsigned int offset, void *data, size_t len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) struct gnttab_copy *op;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) struct page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) struct xen_page_foreign *foreign;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) if (queue->rx_copy.num == COPY_BATCH_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) xenvif_rx_copy_flush(queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) op = &queue->rx_copy.op[queue->rx_copy.num];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) page = virt_to_page(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) op->flags = GNTCOPY_dest_gref;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) foreign = xen_page_foreign(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) if (foreign) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) op->source.domid = foreign->domid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) op->source.u.ref = foreign->gref;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) op->flags |= GNTCOPY_source_gref;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) op->source.u.gmfn = virt_to_gfn(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) op->source.domid = DOMID_SELF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) op->source.offset = xen_offset_in_page(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) op->dest.u.ref = req->gref;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) op->dest.domid = queue->vif->domid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) op->dest.offset = offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) op->len = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) queue->rx_copy.idx[queue->rx_copy.num] = queue->rx.req_cons;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) queue->rx_copy.num++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) static unsigned int xenvif_gso_type(struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) if (skb_is_gso(skb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) return XEN_NETIF_GSO_TYPE_TCPV4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) return XEN_NETIF_GSO_TYPE_TCPV6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) return XEN_NETIF_GSO_TYPE_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) struct xenvif_pkt_state {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) size_t remaining_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) struct sk_buff *frag_iter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) int frag; /* frag == -1 => frag_iter->head */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) unsigned int frag_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) struct xen_netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX - 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) unsigned int extra_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) unsigned int slot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) static void xenvif_rx_next_skb(struct xenvif_queue *queue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) struct xenvif_pkt_state *pkt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) unsigned int gso_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) skb = xenvif_rx_dequeue(queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) queue->stats.tx_bytes += skb->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) queue->stats.tx_packets++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) /* Reset packet state. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) memset(pkt, 0, sizeof(struct xenvif_pkt_state));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) pkt->skb = skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) pkt->frag_iter = skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) pkt->remaining_len = skb->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) pkt->frag = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) gso_type = xenvif_gso_type(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) if ((1 << gso_type) & queue->vif->gso_mask) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) struct xen_netif_extra_info *extra;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) extra = &pkt->extras[XEN_NETIF_EXTRA_TYPE_GSO - 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) extra->u.gso.type = gso_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) extra->u.gso.size = skb_shinfo(skb)->gso_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) extra->u.gso.pad = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) extra->u.gso.features = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) extra->type = XEN_NETIF_EXTRA_TYPE_GSO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) extra->flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) pkt->extra_count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) if (queue->vif->xdp_headroom) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) struct xen_netif_extra_info *extra;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) extra = &pkt->extras[XEN_NETIF_EXTRA_TYPE_XDP - 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) memset(extra, 0, sizeof(struct xen_netif_extra_info));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) extra->u.xdp.headroom = queue->vif->xdp_headroom;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) extra->type = XEN_NETIF_EXTRA_TYPE_XDP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) extra->flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) pkt->extra_count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) if (skb->sw_hash) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) struct xen_netif_extra_info *extra;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) extra = &pkt->extras[XEN_NETIF_EXTRA_TYPE_HASH - 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) extra->u.hash.algorithm =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) XEN_NETIF_CTRL_HASH_ALGORITHM_TOEPLITZ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) if (skb->l4_hash)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) extra->u.hash.type =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) skb->protocol == htons(ETH_P_IP) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) _XEN_NETIF_CTRL_HASH_TYPE_IPV4_TCP :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) _XEN_NETIF_CTRL_HASH_TYPE_IPV6_TCP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) extra->u.hash.type =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) skb->protocol == htons(ETH_P_IP) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) _XEN_NETIF_CTRL_HASH_TYPE_IPV4 :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) _XEN_NETIF_CTRL_HASH_TYPE_IPV6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) *(uint32_t *)extra->u.hash.value = skb_get_hash_raw(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) extra->type = XEN_NETIF_EXTRA_TYPE_HASH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) extra->flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) pkt->extra_count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) static void xenvif_rx_complete(struct xenvif_queue *queue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) struct xenvif_pkt_state *pkt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) /* All responses are ready to be pushed. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) queue->rx.rsp_prod_pvt = queue->rx.req_cons;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) __skb_queue_tail(queue->rx_copy.completed, pkt->skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) static void xenvif_rx_next_frag(struct xenvif_pkt_state *pkt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) struct sk_buff *frag_iter = pkt->frag_iter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) unsigned int nr_frags = skb_shinfo(frag_iter)->nr_frags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) pkt->frag++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) pkt->frag_offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) if (pkt->frag >= nr_frags) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) if (frag_iter == pkt->skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) pkt->frag_iter = skb_shinfo(frag_iter)->frag_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) pkt->frag_iter = frag_iter->next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) pkt->frag = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) static void xenvif_rx_next_chunk(struct xenvif_queue *queue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) struct xenvif_pkt_state *pkt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) unsigned int offset, void **data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) size_t *len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) struct sk_buff *frag_iter = pkt->frag_iter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) void *frag_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) size_t frag_len, chunk_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) BUG_ON(!frag_iter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) if (pkt->frag == -1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) frag_data = frag_iter->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) frag_len = skb_headlen(frag_iter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) skb_frag_t *frag = &skb_shinfo(frag_iter)->frags[pkt->frag];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) frag_data = skb_frag_address(frag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) frag_len = skb_frag_size(frag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) frag_data += pkt->frag_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) frag_len -= pkt->frag_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) chunk_len = min_t(size_t, frag_len, XEN_PAGE_SIZE - offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) chunk_len = min_t(size_t, chunk_len, XEN_PAGE_SIZE -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) xen_offset_in_page(frag_data));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) pkt->frag_offset += chunk_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) /* Advance to next frag? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) if (frag_len == chunk_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) xenvif_rx_next_frag(pkt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) *data = frag_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) *len = chunk_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) static void xenvif_rx_data_slot(struct xenvif_queue *queue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) struct xenvif_pkt_state *pkt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) struct xen_netif_rx_request *req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) struct xen_netif_rx_response *rsp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) unsigned int offset = queue->vif->xdp_headroom;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) unsigned int flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) size_t len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) void *data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) xenvif_rx_next_chunk(queue, pkt, offset, &data, &len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) xenvif_rx_copy_add(queue, req, offset, data, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) offset += len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) pkt->remaining_len -= len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) } while (offset < XEN_PAGE_SIZE && pkt->remaining_len > 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) if (pkt->remaining_len > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) flags = XEN_NETRXF_more_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) if (pkt->slot == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) struct sk_buff *skb = pkt->skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) if (skb->ip_summed == CHECKSUM_PARTIAL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) flags |= XEN_NETRXF_csum_blank |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) XEN_NETRXF_data_validated;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) else if (skb->ip_summed == CHECKSUM_UNNECESSARY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) flags |= XEN_NETRXF_data_validated;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) if (pkt->extra_count != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) flags |= XEN_NETRXF_extra_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) rsp->offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) rsp->flags = flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) rsp->id = req->id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) rsp->status = (s16)offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) static void xenvif_rx_extra_slot(struct xenvif_queue *queue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) struct xenvif_pkt_state *pkt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) struct xen_netif_rx_request *req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) struct xen_netif_rx_response *rsp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) struct xen_netif_extra_info *extra = (void *)rsp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) pkt->extra_count--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) for (i = 0; i < ARRAY_SIZE(pkt->extras); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) if (pkt->extras[i].type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) *extra = pkt->extras[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) if (pkt->extra_count != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) extra->flags |= XEN_NETIF_EXTRA_FLAG_MORE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) pkt->extras[i].type = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) static void xenvif_rx_skb(struct xenvif_queue *queue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) struct xenvif_pkt_state pkt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) xenvif_rx_next_skb(queue, &pkt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) queue->last_rx_time = jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) struct xen_netif_rx_request *req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) struct xen_netif_rx_response *rsp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) req = RING_GET_REQUEST(&queue->rx, queue->rx.req_cons);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) rsp = RING_GET_RESPONSE(&queue->rx, queue->rx.req_cons);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) /* Extras must go after the first data slot */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) if (pkt.slot != 0 && pkt.extra_count != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) xenvif_rx_extra_slot(queue, &pkt, req, rsp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) xenvif_rx_data_slot(queue, &pkt, req, rsp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) queue->rx.req_cons++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) pkt.slot++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) } while (pkt.remaining_len > 0 || pkt.extra_count != 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) xenvif_rx_complete(queue, &pkt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) #define RX_BATCH_SIZE 64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) void xenvif_rx_action(struct xenvif_queue *queue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) struct sk_buff_head completed_skbs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) unsigned int work_done = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) __skb_queue_head_init(&completed_skbs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) queue->rx_copy.completed = &completed_skbs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) while (xenvif_rx_ring_slots_available(queue) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) work_done < RX_BATCH_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) xenvif_rx_skb(queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) work_done++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) /* Flush any pending copies and complete all skbs. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) xenvif_rx_copy_flush(queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) static RING_IDX xenvif_rx_queue_slots(const struct xenvif_queue *queue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) RING_IDX prod, cons;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) prod = queue->rx.sring->req_prod;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) cons = queue->rx.req_cons;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) return prod - cons;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) static bool xenvif_rx_queue_stalled(const struct xenvif_queue *queue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) unsigned int needed = READ_ONCE(queue->rx_slots_needed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) return !queue->stalled &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) xenvif_rx_queue_slots(queue) < needed &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) time_after(jiffies,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) queue->last_rx_time + queue->vif->stall_timeout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) static bool xenvif_rx_queue_ready(struct xenvif_queue *queue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) unsigned int needed = READ_ONCE(queue->rx_slots_needed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) return queue->stalled && xenvif_rx_queue_slots(queue) >= needed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) bool xenvif_have_rx_work(struct xenvif_queue *queue, bool test_kthread)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) return xenvif_rx_ring_slots_available(queue) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) (queue->vif->stall_timeout &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) (xenvif_rx_queue_stalled(queue) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) xenvif_rx_queue_ready(queue))) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) (test_kthread && kthread_should_stop()) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) queue->vif->disabled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) static long xenvif_rx_queue_timeout(struct xenvif_queue *queue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) long timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) skb = skb_peek(&queue->rx_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) if (!skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) return MAX_SCHEDULE_TIMEOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) timeout = XENVIF_RX_CB(skb)->expires - jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) return timeout < 0 ? 0 : timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) /* Wait until the guest Rx thread has work.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) * The timeout needs to be adjusted based on the current head of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) * queue (and not just the head at the beginning). In particular, if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) * the queue is initially empty an infinite timeout is used and this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) * needs to be reduced when a skb is queued.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) * This cannot be done with wait_event_timeout() because it only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) * calculates the timeout once.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) static void xenvif_wait_for_rx_work(struct xenvif_queue *queue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) DEFINE_WAIT(wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) if (xenvif_have_rx_work(queue, true))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) for (;;) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) long ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) prepare_to_wait(&queue->wq, &wait, TASK_INTERRUPTIBLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) if (xenvif_have_rx_work(queue, true))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) if (atomic_fetch_andnot(NETBK_RX_EOI | NETBK_COMMON_EOI,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) &queue->eoi_pending) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) (NETBK_RX_EOI | NETBK_COMMON_EOI))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) xen_irq_lateeoi(queue->rx_irq, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) ret = schedule_timeout(xenvif_rx_queue_timeout(queue));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) if (!ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) finish_wait(&queue->wq, &wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) static void xenvif_queue_carrier_off(struct xenvif_queue *queue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) struct xenvif *vif = queue->vif;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) queue->stalled = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) /* At least one queue has stalled? Disable the carrier. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) spin_lock(&vif->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) if (vif->stalled_queues++ == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) netdev_info(vif->dev, "Guest Rx stalled");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) netif_carrier_off(vif->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) spin_unlock(&vif->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) static void xenvif_queue_carrier_on(struct xenvif_queue *queue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) struct xenvif *vif = queue->vif;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) queue->last_rx_time = jiffies; /* Reset Rx stall detection. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) queue->stalled = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) /* All queues are ready? Enable the carrier. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) spin_lock(&vif->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) if (--vif->stalled_queues == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) netdev_info(vif->dev, "Guest Rx ready");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) netif_carrier_on(vif->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) spin_unlock(&vif->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) int xenvif_kthread_guest_rx(void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) struct xenvif_queue *queue = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) struct xenvif *vif = queue->vif;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) if (!vif->stall_timeout)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) xenvif_queue_carrier_on(queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) for (;;) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) xenvif_wait_for_rx_work(queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) if (kthread_should_stop())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) /* This frontend is found to be rogue, disable it in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) * kthread context. Currently this is only set when
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) * netback finds out frontend sends malformed packet,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) * but we cannot disable the interface in softirq
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) * context so we defer it here, if this thread is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) * associated with queue 0.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) if (unlikely(vif->disabled && queue->id == 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) xenvif_carrier_off(vif);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) if (!skb_queue_empty(&queue->rx_queue))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) xenvif_rx_action(queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) /* If the guest hasn't provided any Rx slots for a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) * while it's probably not responsive, drop the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) * carrier so packets are dropped earlier.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) if (vif->stall_timeout) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) if (xenvif_rx_queue_stalled(queue))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) xenvif_queue_carrier_off(queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) else if (xenvif_rx_queue_ready(queue))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) xenvif_queue_carrier_on(queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) /* Queued packets may have foreign pages from other
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) * domains. These cannot be queued indefinitely as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) * this would starve guests of grant refs and transmit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) * slots.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) xenvif_rx_queue_drop_expired(queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) cond_resched();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) /* Bin any remaining skbs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) xenvif_rx_queue_purge(queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) }