Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  * Copyright (c) 2008, 2009 open80211s Ltd.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  * Author:     Luis Carlos Cobo <luisca@cozybit.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7) #include <linux/etherdevice.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8) #include <linux/list.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9) #include <linux/random.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11) #include <linux/spinlock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) #include <linux/string.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) #include <net/mac80211.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) #include "wme.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) #include "ieee80211_i.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) #include "mesh.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) static void mesh_path_free_rcu(struct mesh_table *tbl, struct mesh_path *mpath);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) static u32 mesh_table_hash(const void *addr, u32 len, u32 seed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) 	/* Use last four bytes of hw addr as hash index */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) 	return jhash_1word(__get_unaligned_cpu32((u8 *)addr + 2), seed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) static const struct rhashtable_params mesh_rht_params = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) 	.nelem_hint = 2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) 	.automatic_shrinking = true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) 	.key_len = ETH_ALEN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) 	.key_offset = offsetof(struct mesh_path, dst),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) 	.head_offset = offsetof(struct mesh_path, rhash),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) 	.hashfn = mesh_table_hash,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) static inline bool mpath_expired(struct mesh_path *mpath)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) 	return (mpath->flags & MESH_PATH_ACTIVE) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) 	       time_after(jiffies, mpath->exp_time) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) 	       !(mpath->flags & MESH_PATH_FIXED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) static void mesh_path_rht_free(void *ptr, void *tblptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) 	struct mesh_path *mpath = ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) 	struct mesh_table *tbl = tblptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) 	mesh_path_free_rcu(tbl, mpath);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) static struct mesh_table *mesh_table_alloc(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) 	struct mesh_table *newtbl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) 	newtbl = kmalloc(sizeof(struct mesh_table), GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) 	if (!newtbl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) 	INIT_HLIST_HEAD(&newtbl->known_gates);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) 	INIT_HLIST_HEAD(&newtbl->walk_head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) 	atomic_set(&newtbl->entries,  0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) 	spin_lock_init(&newtbl->gates_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) 	spin_lock_init(&newtbl->walk_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) 	if (rhashtable_init(&newtbl->rhead, &mesh_rht_params)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) 		kfree(newtbl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) 	return newtbl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) static void mesh_table_free(struct mesh_table *tbl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) 	rhashtable_free_and_destroy(&tbl->rhead,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) 				    mesh_path_rht_free, tbl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) 	kfree(tbl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79)  * mesh_path_assign_nexthop - update mesh path next hop
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81)  * @mpath: mesh path to update
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82)  * @sta: next hop to assign
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84)  * Locking: mpath->state_lock must be held when calling this function
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) void mesh_path_assign_nexthop(struct mesh_path *mpath, struct sta_info *sta)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) 	struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) 	struct ieee80211_hdr *hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) 	rcu_assign_pointer(mpath->next_hop, sta);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) 	spin_lock_irqsave(&mpath->frame_queue.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) 	skb_queue_walk(&mpath->frame_queue, skb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) 		hdr = (struct ieee80211_hdr *) skb->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) 		memcpy(hdr->addr1, sta->sta.addr, ETH_ALEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) 		memcpy(hdr->addr2, mpath->sdata->vif.addr, ETH_ALEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) 		ieee80211_mps_set_frame_flags(sta->sdata, sta, hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) 	spin_unlock_irqrestore(&mpath->frame_queue.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) static void prepare_for_gate(struct sk_buff *skb, char *dst_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) 			     struct mesh_path *gate_mpath)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) 	struct ieee80211_hdr *hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) 	struct ieee80211s_hdr *mshdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) 	int mesh_hdrlen, hdrlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) 	char *next_hop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) 	hdr = (struct ieee80211_hdr *) skb->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) 	hdrlen = ieee80211_hdrlen(hdr->frame_control);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) 	mshdr = (struct ieee80211s_hdr *) (skb->data + hdrlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 	if (!(mshdr->flags & MESH_FLAGS_AE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) 		/* size of the fixed part of the mesh header */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 		mesh_hdrlen = 6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) 		/* make room for the two extended addresses */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 		skb_push(skb, 2 * ETH_ALEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) 		memmove(skb->data, hdr, hdrlen + mesh_hdrlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) 		hdr = (struct ieee80211_hdr *) skb->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) 		/* we preserve the previous mesh header and only add
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) 		 * the new addreses */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) 		mshdr = (struct ieee80211s_hdr *) (skb->data + hdrlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) 		mshdr->flags = MESH_FLAGS_AE_A5_A6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) 		memcpy(mshdr->eaddr1, hdr->addr3, ETH_ALEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) 		memcpy(mshdr->eaddr2, hdr->addr4, ETH_ALEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) 	/* update next hop */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 	hdr = (struct ieee80211_hdr *) skb->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) 	rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) 	next_hop = rcu_dereference(gate_mpath->next_hop)->sta.addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) 	memcpy(hdr->addr1, next_hop, ETH_ALEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) 	rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) 	memcpy(hdr->addr2, gate_mpath->sdata->vif.addr, ETH_ALEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) 	memcpy(hdr->addr3, dst_addr, ETH_ALEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146)  * mesh_path_move_to_queue - Move or copy frames from one mpath queue to another
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148)  * This function is used to transfer or copy frames from an unresolved mpath to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149)  * a gate mpath.  The function also adds the Address Extension field and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150)  * updates the next hop.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152)  * If a frame already has an Address Extension field, only the next hop and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153)  * destination addresses are updated.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155)  * The gate mpath must be an active mpath with a valid mpath->next_hop.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157)  * @gate_mpath: An active mpath the frames will be sent to (i.e. the gate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158)  * @from_mpath: The failed mpath
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159)  * @copy: When true, copy all the frames to the new mpath queue.  When false,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160)  * move them.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) static void mesh_path_move_to_queue(struct mesh_path *gate_mpath,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) 				    struct mesh_path *from_mpath,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) 				    bool copy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) 	struct sk_buff *skb, *fskb, *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) 	struct sk_buff_head failq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) 	if (WARN_ON(gate_mpath == from_mpath))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) 	if (WARN_ON(!gate_mpath->next_hop))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) 	__skb_queue_head_init(&failq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) 	spin_lock_irqsave(&from_mpath->frame_queue.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) 	skb_queue_splice_init(&from_mpath->frame_queue, &failq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) 	spin_unlock_irqrestore(&from_mpath->frame_queue.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) 	skb_queue_walk_safe(&failq, fskb, tmp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) 		if (skb_queue_len(&gate_mpath->frame_queue) >=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) 				  MESH_FRAME_QUEUE_LEN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) 			mpath_dbg(gate_mpath->sdata, "mpath queue full!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) 		skb = skb_copy(fskb, GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) 		if (WARN_ON(!skb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) 		prepare_for_gate(skb, gate_mpath->dst, gate_mpath);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) 		skb_queue_tail(&gate_mpath->frame_queue, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) 		if (copy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) 		__skb_unlink(fskb, &failq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) 		kfree_skb(fskb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) 	mpath_dbg(gate_mpath->sdata, "Mpath queue for gate %pM has %d frames\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) 		  gate_mpath->dst, skb_queue_len(&gate_mpath->frame_queue));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) 	if (!copy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) 	spin_lock_irqsave(&from_mpath->frame_queue.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) 	skb_queue_splice(&failq, &from_mpath->frame_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) 	spin_unlock_irqrestore(&from_mpath->frame_queue.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) static struct mesh_path *mpath_lookup(struct mesh_table *tbl, const u8 *dst,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) 				      struct ieee80211_sub_if_data *sdata)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) 	struct mesh_path *mpath;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) 	mpath = rhashtable_lookup(&tbl->rhead, dst, mesh_rht_params);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) 	if (mpath && mpath_expired(mpath)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) 		spin_lock_bh(&mpath->state_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) 		mpath->flags &= ~MESH_PATH_ACTIVE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) 		spin_unlock_bh(&mpath->state_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) 	return mpath;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230)  * mesh_path_lookup - look up a path in the mesh path table
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231)  * @sdata: local subif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232)  * @dst: hardware address (ETH_ALEN length) of destination
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234)  * Returns: pointer to the mesh path structure, or NULL if not found
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236)  * Locking: must be called within a read rcu section.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) struct mesh_path *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) mesh_path_lookup(struct ieee80211_sub_if_data *sdata, const u8 *dst)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) 	return mpath_lookup(sdata->u.mesh.mesh_paths, dst, sdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) struct mesh_path *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) mpp_path_lookup(struct ieee80211_sub_if_data *sdata, const u8 *dst)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) 	return mpath_lookup(sdata->u.mesh.mpp_paths, dst, sdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) static struct mesh_path *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) __mesh_path_lookup_by_idx(struct mesh_table *tbl, int idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) 	int i = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) 	struct mesh_path *mpath;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) 	hlist_for_each_entry_rcu(mpath, &tbl->walk_head, walk_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) 		if (i++ == idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) 	if (!mpath)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) 	if (mpath_expired(mpath)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) 		spin_lock_bh(&mpath->state_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) 		mpath->flags &= ~MESH_PATH_ACTIVE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) 		spin_unlock_bh(&mpath->state_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) 	return mpath;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273)  * mesh_path_lookup_by_idx - look up a path in the mesh path table by its index
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274)  * @idx: index
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275)  * @sdata: local subif, or NULL for all entries
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277)  * Returns: pointer to the mesh path structure, or NULL if not found.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279)  * Locking: must be called within a read rcu section.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) struct mesh_path *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) mesh_path_lookup_by_idx(struct ieee80211_sub_if_data *sdata, int idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) 	return __mesh_path_lookup_by_idx(sdata->u.mesh.mesh_paths, idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288)  * mpp_path_lookup_by_idx - look up a path in the proxy path table by its index
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289)  * @idx: index
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290)  * @sdata: local subif, or NULL for all entries
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292)  * Returns: pointer to the proxy path structure, or NULL if not found.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294)  * Locking: must be called within a read rcu section.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) struct mesh_path *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) mpp_path_lookup_by_idx(struct ieee80211_sub_if_data *sdata, int idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) 	return __mesh_path_lookup_by_idx(sdata->u.mesh.mpp_paths, idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303)  * mesh_path_add_gate - add the given mpath to a mesh gate to our path table
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304)  * @mpath: gate path to add to table
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) int mesh_path_add_gate(struct mesh_path *mpath)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) 	struct mesh_table *tbl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) 	rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) 	tbl = mpath->sdata->u.mesh.mesh_paths;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) 	spin_lock_bh(&mpath->state_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) 	if (mpath->is_gate) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) 		err = -EEXIST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) 		spin_unlock_bh(&mpath->state_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) 		goto err_rcu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) 	mpath->is_gate = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) 	mpath->sdata->u.mesh.num_gates++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) 	spin_lock(&tbl->gates_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) 	hlist_add_head_rcu(&mpath->gate_list, &tbl->known_gates);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) 	spin_unlock(&tbl->gates_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) 	spin_unlock_bh(&mpath->state_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) 	mpath_dbg(mpath->sdata,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) 		  "Mesh path: Recorded new gate: %pM. %d known gates\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) 		  mpath->dst, mpath->sdata->u.mesh.num_gates);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) 	err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) err_rcu:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) 	rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339)  * mesh_gate_del - remove a mesh gate from the list of known gates
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340)  * @tbl: table which holds our list of known gates
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341)  * @mpath: gate mpath
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) static void mesh_gate_del(struct mesh_table *tbl, struct mesh_path *mpath)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) 	lockdep_assert_held(&mpath->state_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) 	if (!mpath->is_gate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) 	mpath->is_gate = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) 	spin_lock_bh(&tbl->gates_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) 	hlist_del_rcu(&mpath->gate_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) 	mpath->sdata->u.mesh.num_gates--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) 	spin_unlock_bh(&tbl->gates_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) 	mpath_dbg(mpath->sdata,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) 		  "Mesh path: Deleted gate: %pM. %d known gates\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) 		  mpath->dst, mpath->sdata->u.mesh.num_gates);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361)  * mesh_gate_num - number of gates known to this interface
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362)  * @sdata: subif data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) int mesh_gate_num(struct ieee80211_sub_if_data *sdata)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) 	return sdata->u.mesh.num_gates;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) static
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) struct mesh_path *mesh_path_new(struct ieee80211_sub_if_data *sdata,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) 				const u8 *dst, gfp_t gfp_flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) 	struct mesh_path *new_mpath;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) 	new_mpath = kzalloc(sizeof(struct mesh_path), gfp_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) 	if (!new_mpath)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) 	memcpy(new_mpath->dst, dst, ETH_ALEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) 	eth_broadcast_addr(new_mpath->rann_snd_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) 	new_mpath->is_root = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) 	new_mpath->sdata = sdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) 	new_mpath->flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) 	skb_queue_head_init(&new_mpath->frame_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) 	new_mpath->exp_time = jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) 	spin_lock_init(&new_mpath->state_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) 	timer_setup(&new_mpath->timer, mesh_path_timer, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) 	return new_mpath;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393)  * mesh_path_add - allocate and add a new path to the mesh path table
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394)  * @dst: destination address of the path (ETH_ALEN length)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395)  * @sdata: local subif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397)  * Returns: 0 on success
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399)  * State: the initial state of the new path is set to 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) struct mesh_path *mesh_path_add(struct ieee80211_sub_if_data *sdata,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) 				const u8 *dst)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) 	struct mesh_table *tbl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) 	struct mesh_path *mpath, *new_mpath;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) 	if (ether_addr_equal(dst, sdata->vif.addr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) 		/* never add ourselves as neighbours */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) 		return ERR_PTR(-ENOTSUPP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) 	if (is_multicast_ether_addr(dst))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) 		return ERR_PTR(-ENOTSUPP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) 	if (atomic_add_unless(&sdata->u.mesh.mpaths, 1, MESH_MAX_MPATHS) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) 		return ERR_PTR(-ENOSPC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) 	new_mpath = mesh_path_new(sdata, dst, GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) 	if (!new_mpath)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) 		return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) 	tbl = sdata->u.mesh.mesh_paths;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) 	spin_lock_bh(&tbl->walk_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) 	mpath = rhashtable_lookup_get_insert_fast(&tbl->rhead,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) 						  &new_mpath->rhash,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) 						  mesh_rht_params);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) 	if (!mpath)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) 		hlist_add_head(&new_mpath->walk_list, &tbl->walk_head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) 	spin_unlock_bh(&tbl->walk_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) 	if (mpath) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) 		kfree(new_mpath);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) 		if (IS_ERR(mpath))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) 			return mpath;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) 		new_mpath = mpath;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) 	sdata->u.mesh.mesh_paths_generation++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) 	return new_mpath;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) int mpp_path_add(struct ieee80211_sub_if_data *sdata,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) 		 const u8 *dst, const u8 *mpp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) 	struct mesh_table *tbl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) 	struct mesh_path *new_mpath;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) 	if (ether_addr_equal(dst, sdata->vif.addr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) 		/* never add ourselves as neighbours */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) 		return -ENOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) 	if (is_multicast_ether_addr(dst))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) 		return -ENOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) 	new_mpath = mesh_path_new(sdata, dst, GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) 	if (!new_mpath)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) 	memcpy(new_mpath->mpp, mpp, ETH_ALEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) 	tbl = sdata->u.mesh.mpp_paths;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) 	spin_lock_bh(&tbl->walk_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) 	ret = rhashtable_lookup_insert_fast(&tbl->rhead,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) 					    &new_mpath->rhash,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) 					    mesh_rht_params);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) 	if (!ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) 		hlist_add_head_rcu(&new_mpath->walk_list, &tbl->walk_head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) 	spin_unlock_bh(&tbl->walk_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) 		kfree(new_mpath);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) 	sdata->u.mesh.mpp_paths_generation++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482)  * mesh_plink_broken - deactivates paths and sends perr when a link breaks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484)  * @sta: broken peer link
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486)  * This function must be called from the rate control algorithm if enough
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487)  * delivery errors suggest that a peer link is no longer usable.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) void mesh_plink_broken(struct sta_info *sta)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) 	struct ieee80211_sub_if_data *sdata = sta->sdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) 	struct mesh_table *tbl = sdata->u.mesh.mesh_paths;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) 	static const u8 bcast[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) 	struct mesh_path *mpath;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) 	rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) 	hlist_for_each_entry_rcu(mpath, &tbl->walk_head, walk_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) 		if (rcu_access_pointer(mpath->next_hop) == sta &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) 		    mpath->flags & MESH_PATH_ACTIVE &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) 		    !(mpath->flags & MESH_PATH_FIXED)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) 			spin_lock_bh(&mpath->state_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) 			mpath->flags &= ~MESH_PATH_ACTIVE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) 			++mpath->sn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) 			spin_unlock_bh(&mpath->state_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) 			mesh_path_error_tx(sdata,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) 				sdata->u.mesh.mshcfg.element_ttl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) 				mpath->dst, mpath->sn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) 				WLAN_REASON_MESH_PATH_DEST_UNREACHABLE, bcast);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) 	rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) static void mesh_path_free_rcu(struct mesh_table *tbl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) 			       struct mesh_path *mpath)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) 	struct ieee80211_sub_if_data *sdata = mpath->sdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) 	spin_lock_bh(&mpath->state_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) 	mpath->flags |= MESH_PATH_RESOLVING | MESH_PATH_DELETED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) 	mesh_gate_del(tbl, mpath);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) 	spin_unlock_bh(&mpath->state_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) 	del_timer_sync(&mpath->timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) 	atomic_dec(&sdata->u.mesh.mpaths);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) 	atomic_dec(&tbl->entries);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) 	mesh_path_flush_pending(mpath);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) 	kfree_rcu(mpath, rcu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) static void __mesh_path_del(struct mesh_table *tbl, struct mesh_path *mpath)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) 	hlist_del_rcu(&mpath->walk_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) 	rhashtable_remove_fast(&tbl->rhead, &mpath->rhash, mesh_rht_params);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) 	mesh_path_free_rcu(tbl, mpath);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538)  * mesh_path_flush_by_nexthop - Deletes mesh paths if their next hop matches
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540)  * @sta: mesh peer to match
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542)  * RCU notes: this function is called when a mesh plink transitions from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543)  * PLINK_ESTAB to any other state, since PLINK_ESTAB state is the only one that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544)  * allows path creation. This will happen before the sta can be freed (because
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545)  * sta_info_destroy() calls this) so any reader in a rcu read block will be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546)  * protected against the plink disappearing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) void mesh_path_flush_by_nexthop(struct sta_info *sta)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) 	struct ieee80211_sub_if_data *sdata = sta->sdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) 	struct mesh_table *tbl = sdata->u.mesh.mesh_paths;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) 	struct mesh_path *mpath;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) 	struct hlist_node *n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) 	spin_lock_bh(&tbl->walk_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) 	hlist_for_each_entry_safe(mpath, n, &tbl->walk_head, walk_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) 		if (rcu_access_pointer(mpath->next_hop) == sta)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) 			__mesh_path_del(tbl, mpath);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) 	spin_unlock_bh(&tbl->walk_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) static void mpp_flush_by_proxy(struct ieee80211_sub_if_data *sdata,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) 			       const u8 *proxy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) 	struct mesh_table *tbl = sdata->u.mesh.mpp_paths;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) 	struct mesh_path *mpath;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) 	struct hlist_node *n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) 	spin_lock_bh(&tbl->walk_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) 	hlist_for_each_entry_safe(mpath, n, &tbl->walk_head, walk_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) 		if (ether_addr_equal(mpath->mpp, proxy))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) 			__mesh_path_del(tbl, mpath);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) 	spin_unlock_bh(&tbl->walk_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) static void table_flush_by_iface(struct mesh_table *tbl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) 	struct mesh_path *mpath;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) 	struct hlist_node *n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) 	spin_lock_bh(&tbl->walk_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) 	hlist_for_each_entry_safe(mpath, n, &tbl->walk_head, walk_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) 		__mesh_path_del(tbl, mpath);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) 	spin_unlock_bh(&tbl->walk_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591)  * mesh_path_flush_by_iface - Deletes all mesh paths associated with a given iface
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593)  * This function deletes both mesh paths as well as mesh portal paths.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595)  * @sdata: interface data to match
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) void mesh_path_flush_by_iface(struct ieee80211_sub_if_data *sdata)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) 	table_flush_by_iface(sdata->u.mesh.mesh_paths);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) 	table_flush_by_iface(sdata->u.mesh.mpp_paths);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605)  * table_path_del - delete a path from the mesh or mpp table
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607)  * @tbl: mesh or mpp path table
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608)  * @sdata: local subif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609)  * @addr: dst address (ETH_ALEN length)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611)  * Returns: 0 if successful
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) static int table_path_del(struct mesh_table *tbl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) 			  struct ieee80211_sub_if_data *sdata,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) 			  const u8 *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) 	struct mesh_path *mpath;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) 	spin_lock_bh(&tbl->walk_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) 	mpath = rhashtable_lookup_fast(&tbl->rhead, addr, mesh_rht_params);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) 	if (!mpath) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) 		spin_unlock_bh(&tbl->walk_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) 		return -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) 	__mesh_path_del(tbl, mpath);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) 	spin_unlock_bh(&tbl->walk_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633)  * mesh_path_del - delete a mesh path from the table
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635)  * @addr: dst address (ETH_ALEN length)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636)  * @sdata: local subif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638)  * Returns: 0 if successful
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) int mesh_path_del(struct ieee80211_sub_if_data *sdata, const u8 *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) 	/* flush relevant mpp entries first */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) 	mpp_flush_by_proxy(sdata, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) 	err = table_path_del(sdata->u.mesh.mesh_paths, sdata, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) 	sdata->u.mesh.mesh_paths_generation++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653)  * mesh_path_tx_pending - sends pending frames in a mesh path queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655)  * @mpath: mesh path to activate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657)  * Locking: the state_lock of the mpath structure must NOT be held when calling
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658)  * this function.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) void mesh_path_tx_pending(struct mesh_path *mpath)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) 	if (mpath->flags & MESH_PATH_ACTIVE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) 		ieee80211_add_pending_skbs(mpath->sdata->local,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) 				&mpath->frame_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668)  * mesh_path_send_to_gates - sends pending frames to all known mesh gates
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670)  * @mpath: mesh path whose queue will be emptied
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672)  * If there is only one gate, the frames are transferred from the failed mpath
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673)  * queue to that gate's queue.  If there are more than one gates, the frames
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674)  * are copied from each gate to the next.  After frames are copied, the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675)  * mpath queues are emptied onto the transmission queue.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) int mesh_path_send_to_gates(struct mesh_path *mpath)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) 	struct ieee80211_sub_if_data *sdata = mpath->sdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) 	struct mesh_table *tbl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) 	struct mesh_path *from_mpath = mpath;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) 	struct mesh_path *gate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) 	bool copy = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) 	tbl = sdata->u.mesh.mesh_paths;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) 	rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) 	hlist_for_each_entry_rcu(gate, &tbl->known_gates, gate_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) 		if (gate->flags & MESH_PATH_ACTIVE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) 			mpath_dbg(sdata, "Forwarding to %pM\n", gate->dst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) 			mesh_path_move_to_queue(gate, from_mpath, copy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) 			from_mpath = gate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) 			copy = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) 			mpath_dbg(sdata,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) 				  "Not forwarding to %pM (flags %#x)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) 				  gate->dst, gate->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) 	hlist_for_each_entry_rcu(gate, &tbl->known_gates, gate_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) 		mpath_dbg(sdata, "Sending to %pM\n", gate->dst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) 		mesh_path_tx_pending(gate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) 	rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) 	return (from_mpath == mpath) ? -EHOSTUNREACH : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711)  * mesh_path_discard_frame - discard a frame whose path could not be resolved
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713)  * @skb: frame to discard
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714)  * @sdata: network subif the frame was to be sent through
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716)  * Locking: the function must me called within a rcu_read_lock region
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) void mesh_path_discard_frame(struct ieee80211_sub_if_data *sdata,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) 			     struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) 	kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) 	sdata->u.mesh.mshstats.dropped_frames_no_route++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726)  * mesh_path_flush_pending - free the pending queue of a mesh path
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728)  * @mpath: mesh path whose queue has to be freed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730)  * Locking: the function must me called within a rcu_read_lock region
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) void mesh_path_flush_pending(struct mesh_path *mpath)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) 	struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) 	while ((skb = skb_dequeue(&mpath->frame_queue)) != NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) 		mesh_path_discard_frame(mpath->sdata, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741)  * mesh_path_fix_nexthop - force a specific next hop for a mesh path
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743)  * @mpath: the mesh path to modify
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744)  * @next_hop: the next hop to force
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746)  * Locking: this function must be called holding mpath->state_lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) void mesh_path_fix_nexthop(struct mesh_path *mpath, struct sta_info *next_hop)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) 	spin_lock_bh(&mpath->state_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) 	mesh_path_assign_nexthop(mpath, next_hop);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) 	mpath->sn = 0xffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) 	mpath->metric = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) 	mpath->hop_count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) 	mpath->exp_time = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) 	mpath->flags = MESH_PATH_FIXED | MESH_PATH_SN_VALID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) 	mesh_path_activate(mpath);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) 	spin_unlock_bh(&mpath->state_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) 	ewma_mesh_fail_avg_init(&next_hop->mesh->fail_avg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) 	/* init it at a low value - 0 start is tricky */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) 	ewma_mesh_fail_avg_add(&next_hop->mesh->fail_avg, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) 	mesh_path_tx_pending(mpath);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) int mesh_pathtbl_init(struct ieee80211_sub_if_data *sdata)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) 	struct mesh_table *tbl_path, *tbl_mpp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) 	tbl_path = mesh_table_alloc();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) 	if (!tbl_path)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) 	tbl_mpp = mesh_table_alloc();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) 	if (!tbl_mpp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) 		ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) 		goto free_path;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) 	sdata->u.mesh.mesh_paths = tbl_path;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) 	sdata->u.mesh.mpp_paths = tbl_mpp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) free_path:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) 	mesh_table_free(tbl_path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) static
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) void mesh_path_tbl_expire(struct ieee80211_sub_if_data *sdata,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) 			  struct mesh_table *tbl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) 	struct mesh_path *mpath;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) 	struct hlist_node *n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) 	spin_lock_bh(&tbl->walk_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) 	hlist_for_each_entry_safe(mpath, n, &tbl->walk_head, walk_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) 		if ((!(mpath->flags & MESH_PATH_RESOLVING)) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) 		    (!(mpath->flags & MESH_PATH_FIXED)) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) 		     time_after(jiffies, mpath->exp_time + MESH_PATH_EXPIRE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) 			__mesh_path_del(tbl, mpath);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) 	spin_unlock_bh(&tbl->walk_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) void mesh_path_expire(struct ieee80211_sub_if_data *sdata)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) 	mesh_path_tbl_expire(sdata, sdata->u.mesh.mesh_paths);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) 	mesh_path_tbl_expire(sdata, sdata->u.mesh.mpp_paths);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) void mesh_pathtbl_unregister(struct ieee80211_sub_if_data *sdata)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) 	mesh_table_free(sdata->u.mesh.mesh_paths);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) 	mesh_table_free(sdata->u.mesh.mpp_paths);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) }