Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  * CAIF Interface registration.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  * Copyright (C) ST-Ericsson AB 2010
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5)  * Author:	Sjur Brendeland
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7)  * Borrowed heavily from file: pn_dev.c. Thanks to Remi Denis-Courmont
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8)  *  and Sakari Ailus <sakari.ailus@nokia.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11) #define pr_fmt(fmt) KBUILD_MODNAME ":%s(): " fmt, __func__
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) #include <linux/if_arp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) #include <linux/net.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) #include <linux/netdevice.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) #include <linux/mutex.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) #include <linux/spinlock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) #include <net/netns/generic.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) #include <net/net_namespace.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) #include <net/pkt_sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) #include <net/caif/caif_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) #include <net/caif/caif_layer.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) #include <net/caif/caif_dev.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) #include <net/caif/cfpkt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) #include <net/caif/cfcnfg.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) #include <net/caif/cfserl.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) MODULE_LICENSE("GPL");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) /* Used for local tracking of the CAIF net devices */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) struct caif_device_entry {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) 	struct cflayer layer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) 	struct list_head list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) 	struct net_device *netdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) 	int __percpu *pcpu_refcnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) 	spinlock_t flow_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) 	struct sk_buff *xoff_skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) 	void (*xoff_skb_dtor)(struct sk_buff *skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) 	bool xoff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) struct caif_device_entry_list {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) 	struct list_head list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) 	/* Protects simulanous deletes in list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) 	struct mutex lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) struct caif_net {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) 	struct cfcnfg *cfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) 	struct caif_device_entry_list caifdevs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) static unsigned int caif_net_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) static int q_high = 50; /* Percent */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) struct cfcnfg *get_cfcnfg(struct net *net)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) 	struct caif_net *caifn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) 	caifn = net_generic(net, caif_net_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) 	return caifn->cfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) EXPORT_SYMBOL(get_cfcnfg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) static struct caif_device_entry_list *caif_device_list(struct net *net)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) 	struct caif_net *caifn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) 	caifn = net_generic(net, caif_net_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) 	return &caifn->caifdevs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) static void caifd_put(struct caif_device_entry *e)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) 	this_cpu_dec(*e->pcpu_refcnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) static void caifd_hold(struct caif_device_entry *e)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) 	this_cpu_inc(*e->pcpu_refcnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) static int caifd_refcnt_read(struct caif_device_entry *e)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) 	int i, refcnt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) 	for_each_possible_cpu(i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) 		refcnt += *per_cpu_ptr(e->pcpu_refcnt, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) 	return refcnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) /* Allocate new CAIF device. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) static struct caif_device_entry *caif_device_alloc(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) 	struct caif_device_entry *caifd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) 	caifd = kzalloc(sizeof(*caifd), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) 	if (!caifd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) 	caifd->pcpu_refcnt = alloc_percpu(int);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) 	if (!caifd->pcpu_refcnt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) 		kfree(caifd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) 	caifd->netdev = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) 	dev_hold(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) 	return caifd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) static struct caif_device_entry *caif_get(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) 	struct caif_device_entry_list *caifdevs =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) 	    caif_device_list(dev_net(dev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) 	struct caif_device_entry *caifd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) 	list_for_each_entry_rcu(caifd, &caifdevs->list, list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 				lockdep_rtnl_is_held()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 		if (caifd->netdev == dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) 			return caifd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) 	return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) static void caif_flow_cb(struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) 	struct caif_device_entry *caifd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) 	void (*dtor)(struct sk_buff *skb) = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) 	bool send_xoff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) 	WARN_ON(skb->dev == NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) 	rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) 	caifd = caif_get(skb->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) 	WARN_ON(caifd == NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) 	if (!caifd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 		rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) 	caifd_hold(caifd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) 	rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) 	spin_lock_bh(&caifd->flow_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) 	send_xoff = caifd->xoff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) 	caifd->xoff = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) 	dtor = caifd->xoff_skb_dtor;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) 	if (WARN_ON(caifd->xoff_skb != skb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) 		skb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) 	caifd->xoff_skb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) 	caifd->xoff_skb_dtor = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) 	spin_unlock_bh(&caifd->flow_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) 	if (dtor && skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) 		dtor(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) 	if (send_xoff)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) 		caifd->layer.up->
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) 			ctrlcmd(caifd->layer.up,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) 				_CAIF_CTRLCMD_PHYIF_FLOW_ON_IND,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) 				caifd->layer.id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) 	caifd_put(caifd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) static int transmit(struct cflayer *layer, struct cfpkt *pkt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) 	int err, high = 0, qlen = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) 	struct caif_device_entry *caifd =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) 	    container_of(layer, struct caif_device_entry, layer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) 	struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) 	struct netdev_queue *txq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) 	rcu_read_lock_bh();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) 	skb = cfpkt_tonative(pkt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) 	skb->dev = caifd->netdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) 	skb_reset_network_header(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) 	skb->protocol = htons(ETH_P_CAIF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) 	/* Check if we need to handle xoff */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) 	if (likely(caifd->netdev->priv_flags & IFF_NO_QUEUE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) 		goto noxoff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) 	if (unlikely(caifd->xoff))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) 		goto noxoff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) 	if (likely(!netif_queue_stopped(caifd->netdev))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) 		struct Qdisc *sch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) 		/* If we run with a TX queue, check if the queue is too long*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) 		txq = netdev_get_tx_queue(skb->dev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) 		sch = rcu_dereference_bh(txq->qdisc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) 		if (likely(qdisc_is_empty(sch)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) 			goto noxoff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) 		/* can check for explicit qdisc len value only !NOLOCK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) 		 * always set flow off otherwise
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) 		high = (caifd->netdev->tx_queue_len * q_high) / 100;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) 		if (!(sch->flags & TCQ_F_NOLOCK) && likely(sch->q.qlen < high))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) 			goto noxoff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) 	/* Hold lock while accessing xoff */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) 	spin_lock_bh(&caifd->flow_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) 	if (caifd->xoff) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) 		spin_unlock_bh(&caifd->flow_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) 		goto noxoff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) 	 * Handle flow off, we do this by temporary hi-jacking this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) 	 * skb's destructor function, and replace it with our own
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) 	 * flow-on callback. The callback will set flow-on and call
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) 	 * the original destructor.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) 	pr_debug("queue has stopped(%d) or is full (%d > %d)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) 			netif_queue_stopped(caifd->netdev),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) 			qlen, high);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) 	caifd->xoff = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) 	caifd->xoff_skb = skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) 	caifd->xoff_skb_dtor = skb->destructor;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) 	skb->destructor = caif_flow_cb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) 	spin_unlock_bh(&caifd->flow_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) 	caifd->layer.up->ctrlcmd(caifd->layer.up,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) 					_CAIF_CTRLCMD_PHYIF_FLOW_OFF_IND,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) 					caifd->layer.id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) noxoff:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) 	rcu_read_unlock_bh();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) 	err = dev_queue_xmit(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) 	if (err > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) 		err = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243)  * Stuff received packets into the CAIF stack.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244)  * On error, returns non-zero and releases the skb.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) static int receive(struct sk_buff *skb, struct net_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) 		   struct packet_type *pkttype, struct net_device *orig_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) 	struct cfpkt *pkt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) 	struct caif_device_entry *caifd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) 	pkt = cfpkt_fromnative(CAIF_DIR_IN, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) 	rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) 	caifd = caif_get(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) 	if (!caifd || !caifd->layer.up || !caifd->layer.up->receive ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) 			!netif_oper_up(caifd->netdev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) 		rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) 		kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) 		return NET_RX_DROP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) 	/* Hold reference to netdevice while using CAIF stack */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) 	caifd_hold(caifd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) 	rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) 	err = caifd->layer.up->receive(caifd->layer.up, pkt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) 	/* For -EILSEQ the packet is not freed so so it now */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) 	if (err == -EILSEQ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) 		cfpkt_destroy(pkt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) 	/* Release reference to stack upwards */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) 	caifd_put(caifd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) 	if (err != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) 		err = NET_RX_DROP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) static struct packet_type caif_packet_type __read_mostly = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) 	.type = cpu_to_be16(ETH_P_CAIF),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) 	.func = receive,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) static void dev_flowctrl(struct net_device *dev, int on)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) 	struct caif_device_entry *caifd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) 	rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) 	caifd = caif_get(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) 	if (!caifd || !caifd->layer.up || !caifd->layer.up->ctrlcmd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) 		rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) 	caifd_hold(caifd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) 	rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) 	caifd->layer.up->ctrlcmd(caifd->layer.up,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) 				 on ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) 				 _CAIF_CTRLCMD_PHYIF_FLOW_ON_IND :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) 				 _CAIF_CTRLCMD_PHYIF_FLOW_OFF_IND,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) 				 caifd->layer.id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) 	caifd_put(caifd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) int caif_enroll_dev(struct net_device *dev, struct caif_dev_common *caifdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) 		     struct cflayer *link_support, int head_room,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) 		     struct cflayer **layer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) 		     int (**rcv_func)(struct sk_buff *, struct net_device *,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) 				      struct packet_type *,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) 				      struct net_device *))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) 	struct caif_device_entry *caifd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) 	enum cfcnfg_phy_preference pref;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) 	struct cfcnfg *cfg = get_cfcnfg(dev_net(dev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) 	struct caif_device_entry_list *caifdevs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) 	int res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) 	caifdevs = caif_device_list(dev_net(dev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) 	caifd = caif_device_alloc(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) 	if (!caifd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) 	*layer = &caifd->layer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) 	spin_lock_init(&caifd->flow_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) 	switch (caifdev->link_select) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) 	case CAIF_LINK_HIGH_BANDW:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) 		pref = CFPHYPREF_HIGH_BW;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) 	case CAIF_LINK_LOW_LATENCY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) 		pref = CFPHYPREF_LOW_LAT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) 		pref = CFPHYPREF_HIGH_BW;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) 	mutex_lock(&caifdevs->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) 	list_add_rcu(&caifd->list, &caifdevs->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) 	strlcpy(caifd->layer.name, dev->name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) 		sizeof(caifd->layer.name));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) 	caifd->layer.transmit = transmit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) 	res = cfcnfg_add_phy_layer(cfg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) 				dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) 				&caifd->layer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) 				pref,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) 				link_support,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) 				caifdev->use_fcs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) 				head_room);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) 	mutex_unlock(&caifdevs->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) 	if (rcv_func)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) 		*rcv_func = receive;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) 	return res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) EXPORT_SYMBOL(caif_enroll_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) /* notify Caif of device events */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) static int caif_device_notify(struct notifier_block *me, unsigned long what,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) 			      void *ptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) 	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) 	struct caif_device_entry *caifd = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) 	struct caif_dev_common *caifdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) 	struct cfcnfg *cfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) 	struct cflayer *layer, *link_support;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) 	int head_room = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) 	struct caif_device_entry_list *caifdevs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) 	int res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) 	cfg = get_cfcnfg(dev_net(dev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) 	caifdevs = caif_device_list(dev_net(dev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) 	caifd = caif_get(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) 	if (caifd == NULL && dev->type != ARPHRD_CAIF)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) 	switch (what) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) 	case NETDEV_REGISTER:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) 		if (caifd != NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) 		caifdev = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) 		link_support = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) 		if (caifdev->use_frag) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) 			head_room = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) 			link_support = cfserl_create(dev->ifindex,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) 							caifdev->use_stx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) 			if (!link_support) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) 				pr_warn("Out of memory\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) 		res = caif_enroll_dev(dev, caifdev, link_support, head_room,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) 				&layer, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) 		if (res)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) 			cfserl_release(link_support);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) 		caifdev->flowctrl = dev_flowctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) 	case NETDEV_UP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) 		rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) 		caifd = caif_get(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) 		if (caifd == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) 			rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) 		caifd->xoff = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) 		cfcnfg_set_phy_state(cfg, &caifd->layer, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) 		rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) 	case NETDEV_DOWN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) 		rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) 		caifd = caif_get(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) 		if (!caifd || !caifd->layer.up || !caifd->layer.up->ctrlcmd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) 			rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) 		cfcnfg_set_phy_state(cfg, &caifd->layer, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) 		caifd_hold(caifd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) 		rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) 		caifd->layer.up->ctrlcmd(caifd->layer.up,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) 					 _CAIF_CTRLCMD_PHYIF_DOWN_IND,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) 					 caifd->layer.id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) 		spin_lock_bh(&caifd->flow_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) 		 * Replace our xoff-destructor with original destructor.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) 		 * We trust that skb->destructor *always* is called before
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) 		 * the skb reference is invalid. The hijacked SKB destructor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) 		 * takes the flow_lock so manipulating the skb->destructor here
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) 		 * should be safe.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) 		*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) 		if (caifd->xoff_skb_dtor != NULL && caifd->xoff_skb != NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) 			caifd->xoff_skb->destructor = caifd->xoff_skb_dtor;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) 		caifd->xoff = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) 		caifd->xoff_skb_dtor = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) 		caifd->xoff_skb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) 		spin_unlock_bh(&caifd->flow_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) 		caifd_put(caifd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) 	case NETDEV_UNREGISTER:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) 		mutex_lock(&caifdevs->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) 		caifd = caif_get(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) 		if (caifd == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) 			mutex_unlock(&caifdevs->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) 		list_del_rcu(&caifd->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) 		 * NETDEV_UNREGISTER is called repeatedly until all reference
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) 		 * counts for the net-device are released. If references to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) 		 * caifd is taken, simply ignore NETDEV_UNREGISTER and wait for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) 		 * the next call to NETDEV_UNREGISTER.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) 		 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) 		 * If any packets are in flight down the CAIF Stack,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) 		 * cfcnfg_del_phy_layer will return nonzero.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) 		 * If no packets are in flight, the CAIF Stack associated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) 		 * with the net-device un-registering is freed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) 		if (caifd_refcnt_read(caifd) != 0 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) 			cfcnfg_del_phy_layer(cfg, &caifd->layer) != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) 			pr_info("Wait for device inuse\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) 			/* Enrole device if CAIF Stack is still in use */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) 			list_add_rcu(&caifd->list, &caifdevs->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) 			mutex_unlock(&caifdevs->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) 		synchronize_rcu();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) 		dev_put(caifd->netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) 		free_percpu(caifd->pcpu_refcnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) 		kfree(caifd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) 		mutex_unlock(&caifdevs->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) static struct notifier_block caif_device_notifier = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) 	.notifier_call = caif_device_notify,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) 	.priority = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) /* Per-namespace Caif devices handling */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) static int caif_init_net(struct net *net)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) 	struct caif_net *caifn = net_generic(net, caif_net_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) 	INIT_LIST_HEAD(&caifn->caifdevs.list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) 	mutex_init(&caifn->caifdevs.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) 	caifn->cfg = cfcnfg_create();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) 	if (!caifn->cfg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) static void caif_exit_net(struct net *net)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) 	struct caif_device_entry *caifd, *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) 	struct caif_device_entry_list *caifdevs =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) 	    caif_device_list(net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) 	struct cfcnfg *cfg =  get_cfcnfg(net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) 	rtnl_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) 	mutex_lock(&caifdevs->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) 	list_for_each_entry_safe(caifd, tmp, &caifdevs->list, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) 		int i = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) 		list_del_rcu(&caifd->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) 		cfcnfg_set_phy_state(cfg, &caifd->layer, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) 		while (i < 10 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) 			(caifd_refcnt_read(caifd) != 0 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) 			cfcnfg_del_phy_layer(cfg, &caifd->layer) != 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) 			pr_info("Wait for device inuse\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) 			msleep(250);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) 			i++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) 		synchronize_rcu();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) 		dev_put(caifd->netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) 		free_percpu(caifd->pcpu_refcnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) 		kfree(caifd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) 	cfcnfg_remove(cfg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) 	mutex_unlock(&caifdevs->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) 	rtnl_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) static struct pernet_operations caif_net_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) 	.init = caif_init_net,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) 	.exit = caif_exit_net,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) 	.id   = &caif_net_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) 	.size = sizeof(struct caif_net),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) /* Initialize Caif devices list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) static int __init caif_device_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) 	int result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) 	result = register_pernet_subsys(&caif_net_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) 	if (result)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) 		return result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) 	register_netdevice_notifier(&caif_device_notifier);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) 	dev_add_pack(&caif_packet_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) 	return result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) static void __exit caif_device_exit(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) 	unregister_netdevice_notifier(&caif_device_notifier);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) 	dev_remove_pack(&caif_packet_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) 	unregister_pernet_subsys(&caif_net_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) module_init(caif_device_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) module_exit(caif_device_exit);