Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) // SPDX-License-Identifier: GPL-2.0-or-later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  *	History
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  *	03-01-2007	Added forwarding for x.25	Andrew Hendry
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7) #define pr_fmt(fmt) "X25: " fmt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9) #include <linux/if_arp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) #include <net/x25.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) LIST_HEAD(x25_forward_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) DEFINE_RWLOCK(x25_forward_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) int x25_forward_call(struct x25_address *dest_addr, struct x25_neigh *from,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) 			struct sk_buff *skb, int lci)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) 	struct x25_route *rt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) 	struct x25_neigh *neigh_new = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) 	struct list_head *entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) 	struct x25_forward *x25_frwd, *new_frwd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) 	struct sk_buff *skbn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) 	short same_lci = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) 	int rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) 	if ((rt = x25_get_route(dest_addr)) == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) 		goto out_no_route;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) 	if ((neigh_new = x25_get_neigh(rt->dev)) == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) 		/* This shouldn't happen, if it occurs somehow
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) 		 * do something sensible
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) 		goto out_put_route;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) 	/* Avoid a loop. This is the normal exit path for a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) 	 * system with only one x.25 iface and default route
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) 	if (rt->dev == from->dev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) 		goto out_put_nb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) 	/* Remote end sending a call request on an already
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) 	 * established LCI? It shouldn't happen, just in case..
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) 	read_lock_bh(&x25_forward_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) 	list_for_each(entry, &x25_forward_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) 		x25_frwd = list_entry(entry, struct x25_forward, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) 		if (x25_frwd->lci == lci) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) 			pr_warn("call request for lci which is already registered!, transmitting but not registering new pair\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) 			same_lci = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) 	read_unlock_bh(&x25_forward_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) 	/* Save the forwarding details for future traffic */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) 	if (!same_lci){
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) 		if ((new_frwd = kmalloc(sizeof(struct x25_forward),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) 						GFP_ATOMIC)) == NULL){
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) 			rc = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) 			goto out_put_nb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) 		new_frwd->lci = lci;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) 		new_frwd->dev1 = rt->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) 		new_frwd->dev2 = from->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) 		write_lock_bh(&x25_forward_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) 		list_add(&new_frwd->node, &x25_forward_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) 		write_unlock_bh(&x25_forward_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) 	/* Forward the call request */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) 	if ( (skbn = skb_clone(skb, GFP_ATOMIC)) == NULL){
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) 		goto out_put_nb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) 	x25_transmit_link(skbn, neigh_new);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) 	rc = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) out_put_nb:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) 	x25_neigh_put(neigh_new);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) out_put_route:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) 	x25_route_put(rt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) out_no_route:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) 	return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) int x25_forward_data(int lci, struct x25_neigh *from, struct sk_buff *skb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) 	struct x25_forward *frwd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) 	struct list_head *entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) 	struct net_device *peer = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) 	struct x25_neigh *nb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) 	struct sk_buff *skbn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) 	int rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) 	read_lock_bh(&x25_forward_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) 	list_for_each(entry, &x25_forward_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) 		frwd = list_entry(entry, struct x25_forward, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) 		if (frwd->lci == lci) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) 			/* The call is established, either side can send */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) 			if (from->dev == frwd->dev1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) 				peer = frwd->dev2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) 			} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) 				peer = frwd->dev1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) 	read_unlock_bh(&x25_forward_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 	if ( (nb = x25_get_neigh(peer)) == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 	if ( (skbn = pskb_copy(skb, GFP_ATOMIC)) == NULL){
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) 		goto output;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) 	x25_transmit_link(skbn, nb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) 	rc = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) output:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) 	x25_neigh_put(nb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) 	return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) void x25_clear_forward_by_lci(unsigned int lci)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) 	struct x25_forward *fwd, *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 	write_lock_bh(&x25_forward_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) 	list_for_each_entry_safe(fwd, tmp, &x25_forward_list, node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) 		if (fwd->lci == lci) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) 			list_del(&fwd->node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) 			kfree(fwd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) 	write_unlock_bh(&x25_forward_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) void x25_clear_forward_by_dev(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) 	struct x25_forward *fwd, *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) 	write_lock_bh(&x25_forward_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) 	list_for_each_entry_safe(fwd, tmp, &x25_forward_list, node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) 		if ((fwd->dev1 == dev) || (fwd->dev2 == dev)){
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) 			list_del(&fwd->node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) 			kfree(fwd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) 	write_unlock_bh(&x25_forward_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) }