^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /* Copyright (C) 2013-2020 B.A.T.M.A.N. contributors:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Martin Hundebøll <martin@hundeboll.net>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include "fragmentation.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include "main.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/atomic.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/byteorder/generic.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/errno.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/etherdevice.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/gfp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/if_ether.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/jiffies.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/lockdep.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/netdevice.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/skbuff.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <linux/spinlock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <linux/string.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <uapi/linux/batadv_packet.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include "hard-interface.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include "originator.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #include "routing.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #include "send.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #include "soft-interface.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) * batadv_frag_clear_chain() - delete entries in the fragment buffer chain
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) * @head: head of chain with entries.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) * @dropped: whether the chain is cleared because all fragments are dropped
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) * Free fragments in the passed hlist. Should be called with appropriate lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) static void batadv_frag_clear_chain(struct hlist_head *head, bool dropped)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) struct batadv_frag_list_entry *entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) struct hlist_node *node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) hlist_for_each_entry_safe(entry, node, head, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) hlist_del(&entry->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) if (dropped)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) kfree_skb(entry->skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) consume_skb(entry->skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) kfree(entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) * batadv_frag_purge_orig() - free fragments associated to an orig
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) * @orig_node: originator to free fragments from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) * @check_cb: optional function to tell if an entry should be purged
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) void batadv_frag_purge_orig(struct batadv_orig_node *orig_node,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) bool (*check_cb)(struct batadv_frag_table_entry *))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) struct batadv_frag_table_entry *chain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) u8 i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) for (i = 0; i < BATADV_FRAG_BUFFER_COUNT; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) chain = &orig_node->fragments[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) spin_lock_bh(&chain->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) if (!check_cb || check_cb(chain)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) batadv_frag_clear_chain(&chain->fragment_list, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) chain->size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) spin_unlock_bh(&chain->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) * batadv_frag_size_limit() - maximum possible size of packet to be fragmented
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) * Return: the maximum size of payload that can be fragmented.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) static int batadv_frag_size_limit(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) int limit = BATADV_FRAG_MAX_FRAG_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) limit -= sizeof(struct batadv_frag_packet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) limit *= BATADV_FRAG_MAX_FRAGMENTS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) return limit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) * batadv_frag_init_chain() - check and prepare fragment chain for new fragment
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) * @chain: chain in fragments table to init
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) * @seqno: sequence number of the received fragment
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) * Make chain ready for a fragment with sequence number "seqno". Delete existing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) * entries if they have an "old" sequence number.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) * Caller must hold chain->lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) * Return: true if chain is empty and the caller can just insert the new
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) * fragment without searching for the right position.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) static bool batadv_frag_init_chain(struct batadv_frag_table_entry *chain,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) u16 seqno)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) lockdep_assert_held(&chain->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) if (chain->seqno == seqno)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) if (!hlist_empty(&chain->fragment_list))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) batadv_frag_clear_chain(&chain->fragment_list, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) chain->size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) chain->seqno = seqno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) * batadv_frag_insert_packet() - insert a fragment into a fragment chain
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) * @orig_node: originator that the fragment was received from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) * @skb: skb to insert
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) * @chain_out: list head to attach complete chains of fragments to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) * Insert a new fragment into the reverse ordered chain in the right table
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) * entry. The hash table entry is cleared if "old" fragments exist in it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) * Return: true if skb is buffered, false on error. If the chain has all the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) * fragments needed to merge the packet, the chain is moved to the passed head
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) * to avoid locking the chain in the table.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) static bool batadv_frag_insert_packet(struct batadv_orig_node *orig_node,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) struct hlist_head *chain_out)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) struct batadv_frag_table_entry *chain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) struct batadv_frag_list_entry *frag_entry_new = NULL, *frag_entry_curr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) struct batadv_frag_list_entry *frag_entry_last = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) struct batadv_frag_packet *frag_packet;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) u8 bucket;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) u16 seqno, hdr_size = sizeof(struct batadv_frag_packet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) bool ret = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) /* Linearize packet to avoid linearizing 16 packets in a row when doing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) * the later merge. Non-linear merge should be added to remove this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) * linearization.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) if (skb_linearize(skb) < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) frag_packet = (struct batadv_frag_packet *)skb->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) seqno = ntohs(frag_packet->seqno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) bucket = seqno % BATADV_FRAG_BUFFER_COUNT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) frag_entry_new = kmalloc(sizeof(*frag_entry_new), GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) if (!frag_entry_new)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) frag_entry_new->skb = skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) frag_entry_new->no = frag_packet->no;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) /* Select entry in the "chain table" and delete any prior fragments
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) * with another sequence number. batadv_frag_init_chain() returns true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) * if the list is empty at return.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) chain = &orig_node->fragments[bucket];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) spin_lock_bh(&chain->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) if (batadv_frag_init_chain(chain, seqno)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) hlist_add_head(&frag_entry_new->list, &chain->fragment_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) chain->size = skb->len - hdr_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) chain->timestamp = jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) chain->total_size = ntohs(frag_packet->total_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) ret = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) /* Find the position for the new fragment. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) hlist_for_each_entry(frag_entry_curr, &chain->fragment_list, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) /* Drop packet if fragment already exists. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) if (frag_entry_curr->no == frag_entry_new->no)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) goto err_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) /* Order fragments from highest to lowest. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) if (frag_entry_curr->no < frag_entry_new->no) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) hlist_add_before(&frag_entry_new->list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) &frag_entry_curr->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) chain->size += skb->len - hdr_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) chain->timestamp = jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) ret = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) /* store current entry because it could be the last in list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) frag_entry_last = frag_entry_curr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) /* Reached the end of the list, so insert after 'frag_entry_last'. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) if (likely(frag_entry_last)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) hlist_add_behind(&frag_entry_new->list, &frag_entry_last->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) chain->size += skb->len - hdr_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) chain->timestamp = jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) ret = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) if (chain->size > batadv_frag_size_limit() ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) chain->total_size != ntohs(frag_packet->total_size) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) chain->total_size > batadv_frag_size_limit()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) /* Clear chain if total size of either the list or the packet
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) * exceeds the maximum size of one merged packet. Don't allow
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) * packets to have different total_size.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) batadv_frag_clear_chain(&chain->fragment_list, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) chain->size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) } else if (ntohs(frag_packet->total_size) == chain->size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) /* All fragments received. Hand over chain to caller. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) hlist_move_list(&chain->fragment_list, chain_out);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) chain->size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) err_unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) spin_unlock_bh(&chain->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) if (!ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) kfree(frag_entry_new);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) * batadv_frag_merge_packets() - merge a chain of fragments
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) * @chain: head of chain with fragments
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) * Expand the first skb in the chain and copy the content of the remaining
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) * skb's into the expanded one. After doing so, clear the chain.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) * Return: the merged skb or NULL on error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) static struct sk_buff *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) batadv_frag_merge_packets(struct hlist_head *chain)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) struct batadv_frag_packet *packet;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) struct batadv_frag_list_entry *entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) struct sk_buff *skb_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) int size, hdr_size = sizeof(struct batadv_frag_packet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) bool dropped = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) /* Remove first entry, as this is the destination for the rest of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) * fragments.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) entry = hlist_entry(chain->first, struct batadv_frag_list_entry, list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) hlist_del(&entry->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) skb_out = entry->skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) kfree(entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) packet = (struct batadv_frag_packet *)skb_out->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) size = ntohs(packet->total_size) + hdr_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) /* Make room for the rest of the fragments. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) if (pskb_expand_head(skb_out, 0, size - skb_out->len, GFP_ATOMIC) < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) kfree_skb(skb_out);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) skb_out = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) dropped = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) goto free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) /* Move the existing MAC header to just before the payload. (Override
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) * the fragment header.)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) skb_pull(skb_out, hdr_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) skb_out->ip_summed = CHECKSUM_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) memmove(skb_out->data - ETH_HLEN, skb_mac_header(skb_out), ETH_HLEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) skb_set_mac_header(skb_out, -ETH_HLEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) skb_reset_network_header(skb_out);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) skb_reset_transport_header(skb_out);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) /* Copy the payload of the each fragment into the last skb */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) hlist_for_each_entry(entry, chain, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) size = entry->skb->len - hdr_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) skb_put_data(skb_out, entry->skb->data + hdr_size, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) free:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) /* Locking is not needed, because 'chain' is not part of any orig. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) batadv_frag_clear_chain(chain, dropped);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) return skb_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) * batadv_frag_skb_buffer() - buffer fragment for later merge
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) * @skb: skb to buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) * @orig_node_src: originator that the skb is received from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) * Add fragment to buffer and merge fragments if possible.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) * There are three possible outcomes: 1) Packet is merged: Return true and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) * set *skb to merged packet; 2) Packet is buffered: Return true and set *skb
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) * to NULL; 3) Error: Return false and free skb.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) * Return: true when the packet is merged or buffered, false when skb is not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) * used.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) bool batadv_frag_skb_buffer(struct sk_buff **skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) struct batadv_orig_node *orig_node_src)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) struct sk_buff *skb_out = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) struct hlist_head head = HLIST_HEAD_INIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) bool ret = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) /* Add packet to buffer and table entry if merge is possible. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) if (!batadv_frag_insert_packet(orig_node_src, *skb, &head))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) goto out_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) /* Leave if more fragments are needed to merge. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) if (hlist_empty(&head))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) skb_out = batadv_frag_merge_packets(&head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) if (!skb_out)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) goto out_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) ret = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) out_err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) *skb = skb_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) * batadv_frag_skb_fwd() - forward fragments that would exceed MTU when merged
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) * @skb: skb to forward
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) * @recv_if: interface that the skb is received on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) * @orig_node_src: originator that the skb is received from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) * Look up the next-hop of the fragments payload and check if the merged packet
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) * will exceed the MTU towards the next-hop. If so, the fragment is forwarded
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) * without merging it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) * Return: true if the fragment is consumed/forwarded, false otherwise.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) bool batadv_frag_skb_fwd(struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) struct batadv_hard_iface *recv_if,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) struct batadv_orig_node *orig_node_src)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) struct batadv_priv *bat_priv = netdev_priv(recv_if->soft_iface);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) struct batadv_orig_node *orig_node_dst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) struct batadv_neigh_node *neigh_node = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) struct batadv_frag_packet *packet;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) u16 total_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) bool ret = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) packet = (struct batadv_frag_packet *)skb->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) orig_node_dst = batadv_orig_hash_find(bat_priv, packet->dest);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) if (!orig_node_dst)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) neigh_node = batadv_find_router(bat_priv, orig_node_dst, recv_if);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) if (!neigh_node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) /* Forward the fragment, if the merged packet would be too big to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) * be assembled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) total_size = ntohs(packet->total_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) if (total_size > neigh_node->if_incoming->net_dev->mtu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) batadv_inc_counter(bat_priv, BATADV_CNT_FRAG_FWD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) batadv_add_counter(bat_priv, BATADV_CNT_FRAG_FWD_BYTES,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) skb->len + ETH_HLEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) packet->ttl--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) batadv_send_unicast_skb(skb, neigh_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) ret = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) if (orig_node_dst)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) batadv_orig_node_put(orig_node_dst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) if (neigh_node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) batadv_neigh_node_put(neigh_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) * batadv_frag_create() - create a fragment from skb
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) * @net_dev: outgoing device for fragment
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) * @skb: skb to create fragment from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) * @frag_head: header to use in new fragment
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) * @fragment_size: size of new fragment
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) * Split the passed skb into two fragments: A new one with size matching the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) * passed mtu and the old one with the rest. The new skb contains data from the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) * tail of the old skb.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) * Return: the new fragment, NULL on error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) static struct sk_buff *batadv_frag_create(struct net_device *net_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) struct batadv_frag_packet *frag_head,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) unsigned int fragment_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) unsigned int ll_reserved = LL_RESERVED_SPACE(net_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) unsigned int tailroom = net_dev->needed_tailroom;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) struct sk_buff *skb_fragment;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) unsigned int header_size = sizeof(*frag_head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) unsigned int mtu = fragment_size + header_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) skb_fragment = dev_alloc_skb(ll_reserved + mtu + tailroom);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) if (!skb_fragment)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) skb_fragment->priority = skb->priority;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) /* Eat the last mtu-bytes of the skb */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) skb_reserve(skb_fragment, ll_reserved + header_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) skb_split(skb, skb_fragment, skb->len - fragment_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) /* Add the header */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) skb_push(skb_fragment, header_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) memcpy(skb_fragment->data, frag_head, header_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) return skb_fragment;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) * batadv_frag_send_packet() - create up to 16 fragments from the passed skb
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) * @skb: skb to create fragments from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) * @orig_node: final destination of the created fragments
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) * @neigh_node: next-hop of the created fragments
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) * Return: the netdev tx status or a negative errno code on a failure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) int batadv_frag_send_packet(struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) struct batadv_orig_node *orig_node,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) struct batadv_neigh_node *neigh_node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) struct net_device *net_dev = neigh_node->if_incoming->net_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) struct batadv_priv *bat_priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) struct batadv_hard_iface *primary_if = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) struct batadv_frag_packet frag_header;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) struct sk_buff *skb_fragment;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) unsigned int mtu = net_dev->mtu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) unsigned int header_size = sizeof(frag_header);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) unsigned int max_fragment_size, num_fragments;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) /* To avoid merge and refragmentation at next-hops we never send
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) * fragments larger than BATADV_FRAG_MAX_FRAG_SIZE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) mtu = min_t(unsigned int, mtu, BATADV_FRAG_MAX_FRAG_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) max_fragment_size = mtu - header_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) if (skb->len == 0 || max_fragment_size == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) num_fragments = (skb->len - 1) / max_fragment_size + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) max_fragment_size = (skb->len - 1) / num_fragments + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) /* Don't even try to fragment, if we need more than 16 fragments */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) if (num_fragments > BATADV_FRAG_MAX_FRAGMENTS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) ret = -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) goto free_skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) bat_priv = orig_node->bat_priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) primary_if = batadv_primary_if_get_selected(bat_priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) if (!primary_if) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) goto free_skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) /* Create one header to be copied to all fragments */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) frag_header.packet_type = BATADV_UNICAST_FRAG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) frag_header.version = BATADV_COMPAT_VERSION;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) frag_header.ttl = BATADV_TTL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) frag_header.seqno = htons(atomic_inc_return(&bat_priv->frag_seqno));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) frag_header.reserved = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) frag_header.no = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) frag_header.total_size = htons(skb->len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) /* skb->priority values from 256->263 are magic values to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) * directly indicate a specific 802.1d priority. This is used
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) * to allow 802.1d priority to be passed directly in from VLAN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) * tags, etc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) if (skb->priority >= 256 && skb->priority <= 263)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) frag_header.priority = skb->priority - 256;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) frag_header.priority = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) ether_addr_copy(frag_header.orig, primary_if->net_dev->dev_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) ether_addr_copy(frag_header.dest, orig_node->orig);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) /* Eat and send fragments from the tail of skb */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) while (skb->len > max_fragment_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) /* The initial check in this function should cover this case */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) if (unlikely(frag_header.no == BATADV_FRAG_MAX_FRAGMENTS - 1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) goto put_primary_if;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) skb_fragment = batadv_frag_create(net_dev, skb, &frag_header,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) max_fragment_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) if (!skb_fragment) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) goto put_primary_if;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) batadv_inc_counter(bat_priv, BATADV_CNT_FRAG_TX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) batadv_add_counter(bat_priv, BATADV_CNT_FRAG_TX_BYTES,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) skb_fragment->len + ETH_HLEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) ret = batadv_send_unicast_skb(skb_fragment, neigh_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) if (ret != NET_XMIT_SUCCESS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) ret = NET_XMIT_DROP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) goto put_primary_if;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) frag_header.no++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) /* make sure that there is at least enough head for the fragmentation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) * and ethernet headers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) ret = skb_cow_head(skb, ETH_HLEN + header_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) goto put_primary_if;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) skb_push(skb, header_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) memcpy(skb->data, &frag_header, header_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) /* Send the last fragment */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) batadv_inc_counter(bat_priv, BATADV_CNT_FRAG_TX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) batadv_add_counter(bat_priv, BATADV_CNT_FRAG_TX_BYTES,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) skb->len + ETH_HLEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) ret = batadv_send_unicast_skb(skb, neigh_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) /* skb was consumed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) skb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) put_primary_if:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) batadv_hardif_put(primary_if);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) free_skb:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) }