Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    1) // SPDX-License-Identifier: GPL-2.0-or-later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    3)  * drivers/net/macsec.c - MACsec device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    4)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    5)  * Copyright (c) 2015 Sabrina Dubroca <sd@queasysnail.net>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    6)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    7) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    8) #include <linux/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    9) #include <linux/skbuff.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   10) #include <linux/socket.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   11) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   12) #include <crypto/aead.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   13) #include <linux/etherdevice.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   14) #include <linux/netdevice.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   15) #include <linux/rtnetlink.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   16) #include <linux/refcount.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   17) #include <net/genetlink.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   18) #include <net/sock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   19) #include <net/gro_cells.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   20) #include <net/macsec.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   21) #include <linux/phy.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   22) #include <linux/byteorder/generic.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   23) #include <linux/if_arp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   24) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   25) #include <uapi/linux/if_macsec.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   26) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   27) #define MACSEC_SCI_LEN 8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   28) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   29) /* SecTAG length = macsec_eth_header without the optional SCI */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   30) #define MACSEC_TAG_LEN 6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   31) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   32) struct macsec_eth_header {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   33) 	struct ethhdr eth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   34) 	/* SecTAG */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   35) 	u8  tci_an;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   36) #if defined(__LITTLE_ENDIAN_BITFIELD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   37) 	u8  short_length:6,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   38) 		  unused:2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   39) #elif defined(__BIG_ENDIAN_BITFIELD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   40) 	u8        unused:2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   41) 	    short_length:6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   42) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   43) #error	"Please fix <asm/byteorder.h>"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   44) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   45) 	__be32 packet_number;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   46) 	u8 secure_channel_id[8]; /* optional */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   47) } __packed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   48) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   49) #define MACSEC_TCI_VERSION 0x80
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   50) #define MACSEC_TCI_ES      0x40 /* end station */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   51) #define MACSEC_TCI_SC      0x20 /* SCI present */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   52) #define MACSEC_TCI_SCB     0x10 /* epon */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   53) #define MACSEC_TCI_E       0x08 /* encryption */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   54) #define MACSEC_TCI_C       0x04 /* changed text */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   55) #define MACSEC_AN_MASK     0x03 /* association number */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   56) #define MACSEC_TCI_CONFID  (MACSEC_TCI_E | MACSEC_TCI_C)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   57) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   58) /* minimum secure data length deemed "not short", see IEEE 802.1AE-2006 9.7 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   59) #define MIN_NON_SHORT_LEN 48
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   60) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   61) #define GCM_AES_IV_LEN 12
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   62) #define DEFAULT_ICV_LEN 16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   63) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   64) #define for_each_rxsc(secy, sc)				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   65) 	for (sc = rcu_dereference_bh(secy->rx_sc);	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   66) 	     sc;					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   67) 	     sc = rcu_dereference_bh(sc->next))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   68) #define for_each_rxsc_rtnl(secy, sc)			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   69) 	for (sc = rtnl_dereference(secy->rx_sc);	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   70) 	     sc;					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   71) 	     sc = rtnl_dereference(sc->next))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   72) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   73) #define pn_same_half(pn1, pn2) (!(((pn1) >> 31) ^ ((pn2) >> 31)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   74) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   75) struct gcm_iv_xpn {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   76) 	union {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   77) 		u8 short_secure_channel_id[4];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   78) 		ssci_t ssci;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   79) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   80) 	__be64 pn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   81) } __packed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   82) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   83) struct gcm_iv {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   84) 	union {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   85) 		u8 secure_channel_id[8];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   86) 		sci_t sci;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   87) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   88) 	__be32 pn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   89) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   90) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   91) #define MACSEC_VALIDATE_DEFAULT MACSEC_VALIDATE_STRICT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   92) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   93) struct pcpu_secy_stats {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   94) 	struct macsec_dev_stats stats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   95) 	struct u64_stats_sync syncp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   96) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   97) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   98) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   99)  * struct macsec_dev - private data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  100)  * @secy: SecY config
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  101)  * @real_dev: pointer to underlying netdevice
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  102)  * @stats: MACsec device stats
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  103)  * @secys: linked list of SecY's on the underlying device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  104)  * @offload: status of offloading on the MACsec device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  105)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  106) struct macsec_dev {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  107) 	struct macsec_secy secy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  108) 	struct net_device *real_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  109) 	struct pcpu_secy_stats __percpu *stats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  110) 	struct list_head secys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  111) 	struct gro_cells gro_cells;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  112) 	enum macsec_offload offload;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  113) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  114) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  115) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  116)  * struct macsec_rxh_data - rx_handler private argument
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  117)  * @secys: linked list of SecY's on this underlying device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  118)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  119) struct macsec_rxh_data {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  120) 	struct list_head secys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  121) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  122) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  123) static struct macsec_dev *macsec_priv(const struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  124) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  125) 	return (struct macsec_dev *)netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  126) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  127) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  128) static struct macsec_rxh_data *macsec_data_rcu(const struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  129) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  130) 	return rcu_dereference_bh(dev->rx_handler_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  131) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  132) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  133) static struct macsec_rxh_data *macsec_data_rtnl(const struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  134) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  135) 	return rtnl_dereference(dev->rx_handler_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  136) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  137) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  138) struct macsec_cb {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  139) 	struct aead_request *req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  140) 	union {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  141) 		struct macsec_tx_sa *tx_sa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  142) 		struct macsec_rx_sa *rx_sa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  143) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  144) 	u8 assoc_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  145) 	bool valid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  146) 	bool has_sci;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  147) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  148) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  149) static struct macsec_rx_sa *macsec_rxsa_get(struct macsec_rx_sa __rcu *ptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  150) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  151) 	struct macsec_rx_sa *sa = rcu_dereference_bh(ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  152) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  153) 	if (!sa || !sa->active)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  154) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  155) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  156) 	if (!refcount_inc_not_zero(&sa->refcnt))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  157) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  158) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  159) 	return sa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  160) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  161) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  162) static void free_rx_sc_rcu(struct rcu_head *head)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  163) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  164) 	struct macsec_rx_sc *rx_sc = container_of(head, struct macsec_rx_sc, rcu_head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  165) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  166) 	free_percpu(rx_sc->stats);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  167) 	kfree(rx_sc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  168) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  169) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  170) static struct macsec_rx_sc *macsec_rxsc_get(struct macsec_rx_sc *sc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  171) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  172) 	return refcount_inc_not_zero(&sc->refcnt) ? sc : NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  173) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  174) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  175) static void macsec_rxsc_put(struct macsec_rx_sc *sc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  176) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  177) 	if (refcount_dec_and_test(&sc->refcnt))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  178) 		call_rcu(&sc->rcu_head, free_rx_sc_rcu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  179) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  180) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  181) static void free_rxsa(struct rcu_head *head)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  182) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  183) 	struct macsec_rx_sa *sa = container_of(head, struct macsec_rx_sa, rcu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  184) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  185) 	crypto_free_aead(sa->key.tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  186) 	free_percpu(sa->stats);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  187) 	kfree(sa);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  188) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  189) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  190) static void macsec_rxsa_put(struct macsec_rx_sa *sa)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  191) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  192) 	if (refcount_dec_and_test(&sa->refcnt))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  193) 		call_rcu(&sa->rcu, free_rxsa);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  194) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  195) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  196) static struct macsec_tx_sa *macsec_txsa_get(struct macsec_tx_sa __rcu *ptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  197) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  198) 	struct macsec_tx_sa *sa = rcu_dereference_bh(ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  199) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  200) 	if (!sa || !sa->active)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  201) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  202) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  203) 	if (!refcount_inc_not_zero(&sa->refcnt))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  204) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  205) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  206) 	return sa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  207) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  208) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  209) static void free_txsa(struct rcu_head *head)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  210) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  211) 	struct macsec_tx_sa *sa = container_of(head, struct macsec_tx_sa, rcu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  212) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  213) 	crypto_free_aead(sa->key.tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  214) 	free_percpu(sa->stats);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  215) 	kfree(sa);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  216) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  217) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  218) static void macsec_txsa_put(struct macsec_tx_sa *sa)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  219) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  220) 	if (refcount_dec_and_test(&sa->refcnt))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  221) 		call_rcu(&sa->rcu, free_txsa);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  222) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  223) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  224) static struct macsec_cb *macsec_skb_cb(struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  225) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  226) 	BUILD_BUG_ON(sizeof(struct macsec_cb) > sizeof(skb->cb));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  227) 	return (struct macsec_cb *)skb->cb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  228) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  229) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  230) #define MACSEC_PORT_ES (htons(0x0001))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  231) #define MACSEC_PORT_SCB (0x0000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  232) #define MACSEC_UNDEF_SCI ((__force sci_t)0xffffffffffffffffULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  233) #define MACSEC_UNDEF_SSCI ((__force ssci_t)0xffffffff)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  234) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  235) #define MACSEC_GCM_AES_128_SAK_LEN 16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  236) #define MACSEC_GCM_AES_256_SAK_LEN 32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  237) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  238) #define DEFAULT_SAK_LEN MACSEC_GCM_AES_128_SAK_LEN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  239) #define DEFAULT_XPN false
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  240) #define DEFAULT_SEND_SCI true
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  241) #define DEFAULT_ENCRYPT false
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  242) #define DEFAULT_ENCODING_SA 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  243) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  244) static bool send_sci(const struct macsec_secy *secy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  245) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  246) 	const struct macsec_tx_sc *tx_sc = &secy->tx_sc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  247) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  248) 	return tx_sc->send_sci ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  249) 		(secy->n_rx_sc > 1 && !tx_sc->end_station && !tx_sc->scb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  250) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  251) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  252) static sci_t make_sci(u8 *addr, __be16 port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  253) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  254) 	sci_t sci;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  255) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  256) 	memcpy(&sci, addr, ETH_ALEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  257) 	memcpy(((char *)&sci) + ETH_ALEN, &port, sizeof(port));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  258) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  259) 	return sci;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  260) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  261) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  262) static sci_t macsec_frame_sci(struct macsec_eth_header *hdr, bool sci_present)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  263) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  264) 	sci_t sci;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  265) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  266) 	if (sci_present)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  267) 		memcpy(&sci, hdr->secure_channel_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  268) 		       sizeof(hdr->secure_channel_id));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  269) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  270) 		sci = make_sci(hdr->eth.h_source, MACSEC_PORT_ES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  271) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  272) 	return sci;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  273) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  274) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  275) static unsigned int macsec_sectag_len(bool sci_present)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  276) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  277) 	return MACSEC_TAG_LEN + (sci_present ? MACSEC_SCI_LEN : 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  278) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  279) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  280) static unsigned int macsec_hdr_len(bool sci_present)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  281) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  282) 	return macsec_sectag_len(sci_present) + ETH_HLEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  283) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  284) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  285) static unsigned int macsec_extra_len(bool sci_present)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  286) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  287) 	return macsec_sectag_len(sci_present) + sizeof(__be16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  288) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  289) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  290) /* Fill SecTAG according to IEEE 802.1AE-2006 10.5.3 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  291) static void macsec_fill_sectag(struct macsec_eth_header *h,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  292) 			       const struct macsec_secy *secy, u32 pn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  293) 			       bool sci_present)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  294) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  295) 	const struct macsec_tx_sc *tx_sc = &secy->tx_sc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  296) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  297) 	memset(&h->tci_an, 0, macsec_sectag_len(sci_present));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  298) 	h->eth.h_proto = htons(ETH_P_MACSEC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  299) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  300) 	if (sci_present) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  301) 		h->tci_an |= MACSEC_TCI_SC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  302) 		memcpy(&h->secure_channel_id, &secy->sci,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  303) 		       sizeof(h->secure_channel_id));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  304) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  305) 		if (tx_sc->end_station)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  306) 			h->tci_an |= MACSEC_TCI_ES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  307) 		if (tx_sc->scb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  308) 			h->tci_an |= MACSEC_TCI_SCB;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  309) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  310) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  311) 	h->packet_number = htonl(pn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  312) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  313) 	/* with GCM, C/E clear for !encrypt, both set for encrypt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  314) 	if (tx_sc->encrypt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  315) 		h->tci_an |= MACSEC_TCI_CONFID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  316) 	else if (secy->icv_len != DEFAULT_ICV_LEN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  317) 		h->tci_an |= MACSEC_TCI_C;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  318) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  319) 	h->tci_an |= tx_sc->encoding_sa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  320) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  321) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  322) static void macsec_set_shortlen(struct macsec_eth_header *h, size_t data_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  323) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  324) 	if (data_len < MIN_NON_SHORT_LEN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  325) 		h->short_length = data_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  326) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  327) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  328) /* Checks if a MACsec interface is being offloaded to an hardware engine */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  329) static bool macsec_is_offloaded(struct macsec_dev *macsec)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  330) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  331) 	if (macsec->offload == MACSEC_OFFLOAD_MAC ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  332) 	    macsec->offload == MACSEC_OFFLOAD_PHY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  333) 		return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  334) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  335) 	return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  336) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  337) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  338) /* Checks if underlying layers implement MACsec offloading functions. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  339) static bool macsec_check_offload(enum macsec_offload offload,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  340) 				 struct macsec_dev *macsec)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  341) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  342) 	if (!macsec || !macsec->real_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  343) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  344) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  345) 	if (offload == MACSEC_OFFLOAD_PHY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  346) 		return macsec->real_dev->phydev &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  347) 		       macsec->real_dev->phydev->macsec_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  348) 	else if (offload == MACSEC_OFFLOAD_MAC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  349) 		return macsec->real_dev->features & NETIF_F_HW_MACSEC &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  350) 		       macsec->real_dev->macsec_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  351) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  352) 	return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  353) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  354) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  355) static const struct macsec_ops *__macsec_get_ops(enum macsec_offload offload,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  356) 						 struct macsec_dev *macsec,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  357) 						 struct macsec_context *ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  358) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  359) 	if (ctx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  360) 		memset(ctx, 0, sizeof(*ctx));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  361) 		ctx->offload = offload;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  362) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  363) 		if (offload == MACSEC_OFFLOAD_PHY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  364) 			ctx->phydev = macsec->real_dev->phydev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  365) 		else if (offload == MACSEC_OFFLOAD_MAC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  366) 			ctx->netdev = macsec->real_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  367) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  368) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  369) 	if (offload == MACSEC_OFFLOAD_PHY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  370) 		return macsec->real_dev->phydev->macsec_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  371) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  372) 		return macsec->real_dev->macsec_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  373) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  374) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  375) /* Returns a pointer to the MACsec ops struct if any and updates the MACsec
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  376)  * context device reference if provided.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  377)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  378) static const struct macsec_ops *macsec_get_ops(struct macsec_dev *macsec,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  379) 					       struct macsec_context *ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  380) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  381) 	if (!macsec_check_offload(macsec->offload, macsec))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  382) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  383) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  384) 	return __macsec_get_ops(macsec->offload, macsec, ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  385) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  386) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  387) /* validate MACsec packet according to IEEE 802.1AE-2018 9.12 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  388) static bool macsec_validate_skb(struct sk_buff *skb, u16 icv_len, bool xpn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  389) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  390) 	struct macsec_eth_header *h = (struct macsec_eth_header *)skb->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  391) 	int len = skb->len - 2 * ETH_ALEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  392) 	int extra_len = macsec_extra_len(!!(h->tci_an & MACSEC_TCI_SC)) + icv_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  393) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  394) 	/* a) It comprises at least 17 octets */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  395) 	if (skb->len <= 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  396) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  397) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  398) 	/* b) MACsec EtherType: already checked */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  399) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  400) 	/* c) V bit is clear */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  401) 	if (h->tci_an & MACSEC_TCI_VERSION)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  402) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  403) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  404) 	/* d) ES or SCB => !SC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  405) 	if ((h->tci_an & MACSEC_TCI_ES || h->tci_an & MACSEC_TCI_SCB) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  406) 	    (h->tci_an & MACSEC_TCI_SC))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  407) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  408) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  409) 	/* e) Bits 7 and 8 of octet 4 of the SecTAG are clear */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  410) 	if (h->unused)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  411) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  412) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  413) 	/* rx.pn != 0 if not XPN (figure 10-5 with 802.11AEbw-2013 amendment) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  414) 	if (!h->packet_number && !xpn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  415) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  416) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  417) 	/* length check, f) g) h) i) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  418) 	if (h->short_length)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  419) 		return len == extra_len + h->short_length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  420) 	return len >= extra_len + MIN_NON_SHORT_LEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  421) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  422) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  423) #define MACSEC_NEEDED_HEADROOM (macsec_extra_len(true))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  424) #define MACSEC_NEEDED_TAILROOM MACSEC_STD_ICV_LEN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  425) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  426) static void macsec_fill_iv_xpn(unsigned char *iv, ssci_t ssci, u64 pn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  427) 			       salt_t salt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  428) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  429) 	struct gcm_iv_xpn *gcm_iv = (struct gcm_iv_xpn *)iv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  430) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  431) 	gcm_iv->ssci = ssci ^ salt.ssci;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  432) 	gcm_iv->pn = cpu_to_be64(pn) ^ salt.pn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  433) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  434) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  435) static void macsec_fill_iv(unsigned char *iv, sci_t sci, u32 pn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  436) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  437) 	struct gcm_iv *gcm_iv = (struct gcm_iv *)iv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  438) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  439) 	gcm_iv->sci = sci;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  440) 	gcm_iv->pn = htonl(pn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  441) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  442) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  443) static struct macsec_eth_header *macsec_ethhdr(struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  444) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  445) 	return (struct macsec_eth_header *)skb_mac_header(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  446) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  447) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  448) static sci_t dev_to_sci(struct net_device *dev, __be16 port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  449) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  450) 	return make_sci(dev->dev_addr, port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  451) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  452) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  453) static void __macsec_pn_wrapped(struct macsec_secy *secy,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  454) 				struct macsec_tx_sa *tx_sa)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  455) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  456) 	pr_debug("PN wrapped, transitioning to !oper\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  457) 	tx_sa->active = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  458) 	if (secy->protect_frames)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  459) 		secy->operational = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  460) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  461) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  462) void macsec_pn_wrapped(struct macsec_secy *secy, struct macsec_tx_sa *tx_sa)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  463) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  464) 	spin_lock_bh(&tx_sa->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  465) 	__macsec_pn_wrapped(secy, tx_sa);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  466) 	spin_unlock_bh(&tx_sa->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  467) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  468) EXPORT_SYMBOL_GPL(macsec_pn_wrapped);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  469) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  470) static pn_t tx_sa_update_pn(struct macsec_tx_sa *tx_sa,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  471) 			    struct macsec_secy *secy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  472) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  473) 	pn_t pn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  474) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  475) 	spin_lock_bh(&tx_sa->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  476) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  477) 	pn = tx_sa->next_pn_halves;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  478) 	if (secy->xpn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  479) 		tx_sa->next_pn++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  480) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  481) 		tx_sa->next_pn_halves.lower++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  482) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  483) 	if (tx_sa->next_pn == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  484) 		__macsec_pn_wrapped(secy, tx_sa);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  485) 	spin_unlock_bh(&tx_sa->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  486) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  487) 	return pn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  488) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  489) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  490) static void macsec_encrypt_finish(struct sk_buff *skb, struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  491) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  492) 	struct macsec_dev *macsec = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  493) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  494) 	skb->dev = macsec->real_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  495) 	skb_reset_mac_header(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  496) 	skb->protocol = eth_hdr(skb)->h_proto;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  497) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  498) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  499) static void macsec_count_tx(struct sk_buff *skb, struct macsec_tx_sc *tx_sc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  500) 			    struct macsec_tx_sa *tx_sa)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  501) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  502) 	struct pcpu_tx_sc_stats *txsc_stats = this_cpu_ptr(tx_sc->stats);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  503) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  504) 	u64_stats_update_begin(&txsc_stats->syncp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  505) 	if (tx_sc->encrypt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  506) 		txsc_stats->stats.OutOctetsEncrypted += skb->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  507) 		txsc_stats->stats.OutPktsEncrypted++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  508) 		this_cpu_inc(tx_sa->stats->OutPktsEncrypted);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  509) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  510) 		txsc_stats->stats.OutOctetsProtected += skb->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  511) 		txsc_stats->stats.OutPktsProtected++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  512) 		this_cpu_inc(tx_sa->stats->OutPktsProtected);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  513) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  514) 	u64_stats_update_end(&txsc_stats->syncp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  515) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  516) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  517) static void count_tx(struct net_device *dev, int ret, int len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  518) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  519) 	if (likely(ret == NET_XMIT_SUCCESS || ret == NET_XMIT_CN)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  520) 		struct pcpu_sw_netstats *stats = this_cpu_ptr(dev->tstats);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  521) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  522) 		u64_stats_update_begin(&stats->syncp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  523) 		stats->tx_packets++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  524) 		stats->tx_bytes += len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  525) 		u64_stats_update_end(&stats->syncp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  526) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  527) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  528) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  529) static void macsec_encrypt_done(struct crypto_async_request *base, int err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  530) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  531) 	struct sk_buff *skb = base->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  532) 	struct net_device *dev = skb->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  533) 	struct macsec_dev *macsec = macsec_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  534) 	struct macsec_tx_sa *sa = macsec_skb_cb(skb)->tx_sa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  535) 	int len, ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  536) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  537) 	aead_request_free(macsec_skb_cb(skb)->req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  538) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  539) 	rcu_read_lock_bh();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  540) 	macsec_encrypt_finish(skb, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  541) 	macsec_count_tx(skb, &macsec->secy.tx_sc, macsec_skb_cb(skb)->tx_sa);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  542) 	len = skb->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  543) 	ret = dev_queue_xmit(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  544) 	count_tx(dev, ret, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  545) 	rcu_read_unlock_bh();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  546) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  547) 	macsec_txsa_put(sa);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  548) 	dev_put(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  549) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  550) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  551) static struct aead_request *macsec_alloc_req(struct crypto_aead *tfm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  552) 					     unsigned char **iv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  553) 					     struct scatterlist **sg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  554) 					     int num_frags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  555) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  556) 	size_t size, iv_offset, sg_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  557) 	struct aead_request *req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  558) 	void *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  559) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  560) 	size = sizeof(struct aead_request) + crypto_aead_reqsize(tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  561) 	iv_offset = size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  562) 	size += GCM_AES_IV_LEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  563) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  564) 	size = ALIGN(size, __alignof__(struct scatterlist));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  565) 	sg_offset = size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  566) 	size += sizeof(struct scatterlist) * num_frags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  567) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  568) 	tmp = kmalloc(size, GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  569) 	if (!tmp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  570) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  571) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  572) 	*iv = (unsigned char *)(tmp + iv_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  573) 	*sg = (struct scatterlist *)(tmp + sg_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  574) 	req = tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  575) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  576) 	aead_request_set_tfm(req, tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  577) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  578) 	return req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  579) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  580) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  581) static struct sk_buff *macsec_encrypt(struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  582) 				      struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  583) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  584) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  585) 	struct scatterlist *sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  586) 	struct sk_buff *trailer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  587) 	unsigned char *iv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  588) 	struct ethhdr *eth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  589) 	struct macsec_eth_header *hh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  590) 	size_t unprotected_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  591) 	struct aead_request *req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  592) 	struct macsec_secy *secy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  593) 	struct macsec_tx_sc *tx_sc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  594) 	struct macsec_tx_sa *tx_sa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  595) 	struct macsec_dev *macsec = macsec_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  596) 	bool sci_present;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  597) 	pn_t pn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  598) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  599) 	secy = &macsec->secy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  600) 	tx_sc = &secy->tx_sc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  601) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  602) 	/* 10.5.1 TX SA assignment */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  603) 	tx_sa = macsec_txsa_get(tx_sc->sa[tx_sc->encoding_sa]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  604) 	if (!tx_sa) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  605) 		secy->operational = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  606) 		kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  607) 		return ERR_PTR(-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  608) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  609) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  610) 	if (unlikely(skb_headroom(skb) < MACSEC_NEEDED_HEADROOM ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  611) 		     skb_tailroom(skb) < MACSEC_NEEDED_TAILROOM)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  612) 		struct sk_buff *nskb = skb_copy_expand(skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  613) 						       MACSEC_NEEDED_HEADROOM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  614) 						       MACSEC_NEEDED_TAILROOM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  615) 						       GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  616) 		if (likely(nskb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  617) 			consume_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  618) 			skb = nskb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  619) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  620) 			macsec_txsa_put(tx_sa);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  621) 			kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  622) 			return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  623) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  624) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  625) 		skb = skb_unshare(skb, GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  626) 		if (!skb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  627) 			macsec_txsa_put(tx_sa);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  628) 			return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  629) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  630) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  631) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  632) 	unprotected_len = skb->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  633) 	eth = eth_hdr(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  634) 	sci_present = send_sci(secy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  635) 	hh = skb_push(skb, macsec_extra_len(sci_present));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  636) 	memmove(hh, eth, 2 * ETH_ALEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  637) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  638) 	pn = tx_sa_update_pn(tx_sa, secy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  639) 	if (pn.full64 == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  640) 		macsec_txsa_put(tx_sa);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  641) 		kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  642) 		return ERR_PTR(-ENOLINK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  643) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  644) 	macsec_fill_sectag(hh, secy, pn.lower, sci_present);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  645) 	macsec_set_shortlen(hh, unprotected_len - 2 * ETH_ALEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  646) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  647) 	skb_put(skb, secy->icv_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  648) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  649) 	if (skb->len - ETH_HLEN > macsec_priv(dev)->real_dev->mtu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  650) 		struct pcpu_secy_stats *secy_stats = this_cpu_ptr(macsec->stats);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  651) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  652) 		u64_stats_update_begin(&secy_stats->syncp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  653) 		secy_stats->stats.OutPktsTooLong++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  654) 		u64_stats_update_end(&secy_stats->syncp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  655) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  656) 		macsec_txsa_put(tx_sa);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  657) 		kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  658) 		return ERR_PTR(-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  659) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  660) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  661) 	ret = skb_cow_data(skb, 0, &trailer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  662) 	if (unlikely(ret < 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  663) 		macsec_txsa_put(tx_sa);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  664) 		kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  665) 		return ERR_PTR(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  666) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  667) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  668) 	req = macsec_alloc_req(tx_sa->key.tfm, &iv, &sg, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  669) 	if (!req) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  670) 		macsec_txsa_put(tx_sa);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  671) 		kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  672) 		return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  673) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  674) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  675) 	if (secy->xpn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  676) 		macsec_fill_iv_xpn(iv, tx_sa->ssci, pn.full64, tx_sa->key.salt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  677) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  678) 		macsec_fill_iv(iv, secy->sci, pn.lower);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  679) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  680) 	sg_init_table(sg, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  681) 	ret = skb_to_sgvec(skb, sg, 0, skb->len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  682) 	if (unlikely(ret < 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  683) 		aead_request_free(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  684) 		macsec_txsa_put(tx_sa);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  685) 		kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  686) 		return ERR_PTR(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  687) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  688) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  689) 	if (tx_sc->encrypt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  690) 		int len = skb->len - macsec_hdr_len(sci_present) -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  691) 			  secy->icv_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  692) 		aead_request_set_crypt(req, sg, sg, len, iv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  693) 		aead_request_set_ad(req, macsec_hdr_len(sci_present));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  694) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  695) 		aead_request_set_crypt(req, sg, sg, 0, iv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  696) 		aead_request_set_ad(req, skb->len - secy->icv_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  697) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  698) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  699) 	macsec_skb_cb(skb)->req = req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  700) 	macsec_skb_cb(skb)->tx_sa = tx_sa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  701) 	aead_request_set_callback(req, 0, macsec_encrypt_done, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  702) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  703) 	dev_hold(skb->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  704) 	ret = crypto_aead_encrypt(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  705) 	if (ret == -EINPROGRESS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  706) 		return ERR_PTR(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  707) 	} else if (ret != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  708) 		dev_put(skb->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  709) 		kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  710) 		aead_request_free(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  711) 		macsec_txsa_put(tx_sa);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  712) 		return ERR_PTR(-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  713) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  714) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  715) 	dev_put(skb->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  716) 	aead_request_free(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  717) 	macsec_txsa_put(tx_sa);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  718) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  719) 	return skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  720) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  721) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  722) static bool macsec_post_decrypt(struct sk_buff *skb, struct macsec_secy *secy, u32 pn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  723) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  724) 	struct macsec_rx_sa *rx_sa = macsec_skb_cb(skb)->rx_sa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  725) 	struct pcpu_rx_sc_stats *rxsc_stats = this_cpu_ptr(rx_sa->sc->stats);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  726) 	struct macsec_eth_header *hdr = macsec_ethhdr(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  727) 	u32 lowest_pn = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  728) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  729) 	spin_lock(&rx_sa->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  730) 	if (rx_sa->next_pn_halves.lower >= secy->replay_window)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  731) 		lowest_pn = rx_sa->next_pn_halves.lower - secy->replay_window;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  732) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  733) 	/* Now perform replay protection check again
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  734) 	 * (see IEEE 802.1AE-2006 figure 10-5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  735) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  736) 	if (secy->replay_protect && pn < lowest_pn &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  737) 	    (!secy->xpn || pn_same_half(pn, lowest_pn))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  738) 		spin_unlock(&rx_sa->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  739) 		u64_stats_update_begin(&rxsc_stats->syncp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  740) 		rxsc_stats->stats.InPktsLate++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  741) 		u64_stats_update_end(&rxsc_stats->syncp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  742) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  743) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  744) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  745) 	if (secy->validate_frames != MACSEC_VALIDATE_DISABLED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  746) 		u64_stats_update_begin(&rxsc_stats->syncp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  747) 		if (hdr->tci_an & MACSEC_TCI_E)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  748) 			rxsc_stats->stats.InOctetsDecrypted += skb->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  749) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  750) 			rxsc_stats->stats.InOctetsValidated += skb->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  751) 		u64_stats_update_end(&rxsc_stats->syncp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  752) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  753) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  754) 	if (!macsec_skb_cb(skb)->valid) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  755) 		spin_unlock(&rx_sa->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  756) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  757) 		/* 10.6.5 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  758) 		if (hdr->tci_an & MACSEC_TCI_C ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  759) 		    secy->validate_frames == MACSEC_VALIDATE_STRICT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  760) 			u64_stats_update_begin(&rxsc_stats->syncp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  761) 			rxsc_stats->stats.InPktsNotValid++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  762) 			u64_stats_update_end(&rxsc_stats->syncp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  763) 			return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  764) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  765) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  766) 		u64_stats_update_begin(&rxsc_stats->syncp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  767) 		if (secy->validate_frames == MACSEC_VALIDATE_CHECK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  768) 			rxsc_stats->stats.InPktsInvalid++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  769) 			this_cpu_inc(rx_sa->stats->InPktsInvalid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  770) 		} else if (pn < lowest_pn) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  771) 			rxsc_stats->stats.InPktsDelayed++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  772) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  773) 			rxsc_stats->stats.InPktsUnchecked++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  774) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  775) 		u64_stats_update_end(&rxsc_stats->syncp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  776) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  777) 		u64_stats_update_begin(&rxsc_stats->syncp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  778) 		if (pn < lowest_pn) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  779) 			rxsc_stats->stats.InPktsDelayed++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  780) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  781) 			rxsc_stats->stats.InPktsOK++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  782) 			this_cpu_inc(rx_sa->stats->InPktsOK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  783) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  784) 		u64_stats_update_end(&rxsc_stats->syncp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  785) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  786) 		// Instead of "pn >=" - to support pn overflow in xpn
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  787) 		if (pn + 1 > rx_sa->next_pn_halves.lower) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  788) 			rx_sa->next_pn_halves.lower = pn + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  789) 		} else if (secy->xpn &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  790) 			   !pn_same_half(pn, rx_sa->next_pn_halves.lower)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  791) 			rx_sa->next_pn_halves.upper++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  792) 			rx_sa->next_pn_halves.lower = pn + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  793) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  794) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  795) 		spin_unlock(&rx_sa->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  796) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  797) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  798) 	return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  799) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  800) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  801) static void macsec_reset_skb(struct sk_buff *skb, struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  802) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  803) 	skb->pkt_type = PACKET_HOST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  804) 	skb->protocol = eth_type_trans(skb, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  805) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  806) 	skb_reset_network_header(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  807) 	if (!skb_transport_header_was_set(skb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  808) 		skb_reset_transport_header(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  809) 	skb_reset_mac_len(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  810) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  811) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  812) static void macsec_finalize_skb(struct sk_buff *skb, u8 icv_len, u8 hdr_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  813) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  814) 	skb->ip_summed = CHECKSUM_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  815) 	memmove(skb->data + hdr_len, skb->data, 2 * ETH_ALEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  816) 	skb_pull(skb, hdr_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  817) 	pskb_trim_unique(skb, skb->len - icv_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  818) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  819) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  820) static void count_rx(struct net_device *dev, int len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  821) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  822) 	struct pcpu_sw_netstats *stats = this_cpu_ptr(dev->tstats);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  823) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  824) 	u64_stats_update_begin(&stats->syncp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  825) 	stats->rx_packets++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  826) 	stats->rx_bytes += len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  827) 	u64_stats_update_end(&stats->syncp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  828) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  829) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  830) static void macsec_decrypt_done(struct crypto_async_request *base, int err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  831) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  832) 	struct sk_buff *skb = base->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  833) 	struct net_device *dev = skb->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  834) 	struct macsec_dev *macsec = macsec_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  835) 	struct macsec_rx_sa *rx_sa = macsec_skb_cb(skb)->rx_sa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  836) 	struct macsec_rx_sc *rx_sc = rx_sa->sc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  837) 	int len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  838) 	u32 pn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  839) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  840) 	aead_request_free(macsec_skb_cb(skb)->req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  841) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  842) 	if (!err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  843) 		macsec_skb_cb(skb)->valid = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  844) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  845) 	rcu_read_lock_bh();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  846) 	pn = ntohl(macsec_ethhdr(skb)->packet_number);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  847) 	if (!macsec_post_decrypt(skb, &macsec->secy, pn)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  848) 		rcu_read_unlock_bh();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  849) 		kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  850) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  851) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  852) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  853) 	macsec_finalize_skb(skb, macsec->secy.icv_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  854) 			    macsec_extra_len(macsec_skb_cb(skb)->has_sci));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  855) 	macsec_reset_skb(skb, macsec->secy.netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  856) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  857) 	len = skb->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  858) 	if (gro_cells_receive(&macsec->gro_cells, skb) == NET_RX_SUCCESS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  859) 		count_rx(dev, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  860) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  861) 	rcu_read_unlock_bh();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  862) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  863) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  864) 	macsec_rxsa_put(rx_sa);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  865) 	macsec_rxsc_put(rx_sc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  866) 	dev_put(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  867) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  868) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  869) static struct sk_buff *macsec_decrypt(struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  870) 				      struct net_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  871) 				      struct macsec_rx_sa *rx_sa,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  872) 				      sci_t sci,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  873) 				      struct macsec_secy *secy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  874) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  875) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  876) 	struct scatterlist *sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  877) 	struct sk_buff *trailer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  878) 	unsigned char *iv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  879) 	struct aead_request *req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  880) 	struct macsec_eth_header *hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  881) 	u32 hdr_pn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  882) 	u16 icv_len = secy->icv_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  883) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  884) 	macsec_skb_cb(skb)->valid = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  885) 	skb = skb_share_check(skb, GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  886) 	if (!skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  887) 		return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  888) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  889) 	ret = skb_cow_data(skb, 0, &trailer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  890) 	if (unlikely(ret < 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  891) 		kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  892) 		return ERR_PTR(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  893) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  894) 	req = macsec_alloc_req(rx_sa->key.tfm, &iv, &sg, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  895) 	if (!req) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  896) 		kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  897) 		return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  898) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  899) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  900) 	hdr = (struct macsec_eth_header *)skb->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  901) 	hdr_pn = ntohl(hdr->packet_number);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  902) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  903) 	if (secy->xpn) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  904) 		pn_t recovered_pn = rx_sa->next_pn_halves;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  905) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  906) 		recovered_pn.lower = hdr_pn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  907) 		if (hdr_pn < rx_sa->next_pn_halves.lower &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  908) 		    !pn_same_half(hdr_pn, rx_sa->next_pn_halves.lower))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  909) 			recovered_pn.upper++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  910) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  911) 		macsec_fill_iv_xpn(iv, rx_sa->ssci, recovered_pn.full64,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  912) 				   rx_sa->key.salt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  913) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  914) 		macsec_fill_iv(iv, sci, hdr_pn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  915) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  916) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  917) 	sg_init_table(sg, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  918) 	ret = skb_to_sgvec(skb, sg, 0, skb->len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  919) 	if (unlikely(ret < 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  920) 		aead_request_free(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  921) 		kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  922) 		return ERR_PTR(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  923) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  924) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  925) 	if (hdr->tci_an & MACSEC_TCI_E) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  926) 		/* confidentiality: ethernet + macsec header
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  927) 		 * authenticated, encrypted payload
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  928) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  929) 		int len = skb->len - macsec_hdr_len(macsec_skb_cb(skb)->has_sci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  930) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  931) 		aead_request_set_crypt(req, sg, sg, len, iv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  932) 		aead_request_set_ad(req, macsec_hdr_len(macsec_skb_cb(skb)->has_sci));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  933) 		skb = skb_unshare(skb, GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  934) 		if (!skb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  935) 			aead_request_free(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  936) 			return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  937) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  938) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  939) 		/* integrity only: all headers + data authenticated */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  940) 		aead_request_set_crypt(req, sg, sg, icv_len, iv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  941) 		aead_request_set_ad(req, skb->len - icv_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  942) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  943) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  944) 	macsec_skb_cb(skb)->req = req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  945) 	skb->dev = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  946) 	aead_request_set_callback(req, 0, macsec_decrypt_done, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  947) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  948) 	dev_hold(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  949) 	ret = crypto_aead_decrypt(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  950) 	if (ret == -EINPROGRESS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  951) 		return ERR_PTR(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  952) 	} else if (ret != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  953) 		/* decryption/authentication failed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  954) 		 * 10.6 if validateFrames is disabled, deliver anyway
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  955) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  956) 		if (ret != -EBADMSG) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  957) 			kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  958) 			skb = ERR_PTR(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  959) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  960) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  961) 		macsec_skb_cb(skb)->valid = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  962) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  963) 	dev_put(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  964) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  965) 	aead_request_free(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  966) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  967) 	return skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  968) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  969) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  970) static struct macsec_rx_sc *find_rx_sc(struct macsec_secy *secy, sci_t sci)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  971) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  972) 	struct macsec_rx_sc *rx_sc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  973) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  974) 	for_each_rxsc(secy, rx_sc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  975) 		if (rx_sc->sci == sci)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  976) 			return rx_sc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  977) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  978) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  979) 	return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  980) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  981) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  982) static struct macsec_rx_sc *find_rx_sc_rtnl(struct macsec_secy *secy, sci_t sci)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  983) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  984) 	struct macsec_rx_sc *rx_sc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  985) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  986) 	for_each_rxsc_rtnl(secy, rx_sc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  987) 		if (rx_sc->sci == sci)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  988) 			return rx_sc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  989) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  990) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  991) 	return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  992) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  993) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  994) static enum rx_handler_result handle_not_macsec(struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  995) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  996) 	/* Deliver to the uncontrolled port by default */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  997) 	enum rx_handler_result ret = RX_HANDLER_PASS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  998) 	struct ethhdr *hdr = eth_hdr(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  999) 	struct macsec_rxh_data *rxd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) 	struct macsec_dev *macsec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) 	rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) 	rxd = macsec_data_rcu(skb->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) 	list_for_each_entry_rcu(macsec, &rxd->secys, secys) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) 		struct sk_buff *nskb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) 		struct pcpu_secy_stats *secy_stats = this_cpu_ptr(macsec->stats);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) 		struct net_device *ndev = macsec->secy.netdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) 		/* If h/w offloading is enabled, HW decodes frames and strips
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) 		 * the SecTAG, so we have to deduce which port to deliver to.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) 		if (macsec_is_offloaded(macsec) && netif_running(ndev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) 			if (ether_addr_equal_64bits(hdr->h_dest,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) 						    ndev->dev_addr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) 				/* exact match, divert skb to this port */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) 				skb->dev = ndev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) 				skb->pkt_type = PACKET_HOST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) 				ret = RX_HANDLER_ANOTHER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) 				goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) 			} else if (is_multicast_ether_addr_64bits(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) 					   hdr->h_dest)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) 				/* multicast frame, deliver on this port too */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) 				nskb = skb_clone(skb, GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) 				if (!nskb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) 					break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) 				nskb->dev = ndev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) 				if (ether_addr_equal_64bits(hdr->h_dest,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) 							    ndev->broadcast))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) 					nskb->pkt_type = PACKET_BROADCAST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) 				else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) 					nskb->pkt_type = PACKET_MULTICAST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) 				netif_rx(nskb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) 		/* 10.6 If the management control validateFrames is not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) 		 * Strict, frames without a SecTAG are received, counted, and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) 		 * delivered to the Controlled Port
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) 		if (macsec->secy.validate_frames == MACSEC_VALIDATE_STRICT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) 			u64_stats_update_begin(&secy_stats->syncp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) 			secy_stats->stats.InPktsNoTag++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) 			u64_stats_update_end(&secy_stats->syncp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) 		/* deliver on this port */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) 		nskb = skb_clone(skb, GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) 		if (!nskb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) 		nskb->dev = ndev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) 		if (netif_rx(nskb) == NET_RX_SUCCESS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) 			u64_stats_update_begin(&secy_stats->syncp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) 			secy_stats->stats.InPktsUntagged++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) 			u64_stats_update_end(&secy_stats->syncp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) 	rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) static rx_handler_result_t macsec_handle_frame(struct sk_buff **pskb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) 	struct sk_buff *skb = *pskb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) 	struct net_device *dev = skb->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) 	struct macsec_eth_header *hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) 	struct macsec_secy *secy = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) 	struct macsec_rx_sc *rx_sc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) 	struct macsec_rx_sa *rx_sa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) 	struct macsec_rxh_data *rxd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) 	struct macsec_dev *macsec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) 	unsigned int len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) 	sci_t sci;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) 	u32 hdr_pn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) 	bool cbit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) 	struct pcpu_rx_sc_stats *rxsc_stats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) 	struct pcpu_secy_stats *secy_stats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) 	bool pulled_sci;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) 	if (skb_headroom(skb) < ETH_HLEN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) 		goto drop_direct;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) 	hdr = macsec_ethhdr(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) 	if (hdr->eth.h_proto != htons(ETH_P_MACSEC))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) 		return handle_not_macsec(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) 	skb = skb_unshare(skb, GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) 	*pskb = skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) 	if (!skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) 		return RX_HANDLER_CONSUMED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) 	pulled_sci = pskb_may_pull(skb, macsec_extra_len(true));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) 	if (!pulled_sci) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) 		if (!pskb_may_pull(skb, macsec_extra_len(false)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) 			goto drop_direct;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) 	hdr = macsec_ethhdr(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) 	/* Frames with a SecTAG that has the TCI E bit set but the C
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) 	 * bit clear are discarded, as this reserved encoding is used
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) 	 * to identify frames with a SecTAG that are not to be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) 	 * delivered to the Controlled Port.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) 	if ((hdr->tci_an & (MACSEC_TCI_C | MACSEC_TCI_E)) == MACSEC_TCI_E)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) 		return RX_HANDLER_PASS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) 	/* now, pull the extra length */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) 	if (hdr->tci_an & MACSEC_TCI_SC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) 		if (!pulled_sci)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) 			goto drop_direct;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) 	/* ethernet header is part of crypto processing */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) 	skb_push(skb, ETH_HLEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) 	macsec_skb_cb(skb)->has_sci = !!(hdr->tci_an & MACSEC_TCI_SC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) 	macsec_skb_cb(skb)->assoc_num = hdr->tci_an & MACSEC_AN_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) 	sci = macsec_frame_sci(hdr, macsec_skb_cb(skb)->has_sci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) 	rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) 	rxd = macsec_data_rcu(skb->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) 	list_for_each_entry_rcu(macsec, &rxd->secys, secys) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) 		struct macsec_rx_sc *sc = find_rx_sc(&macsec->secy, sci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) 		sc = sc ? macsec_rxsc_get(sc) : NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) 		if (sc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) 			secy = &macsec->secy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) 			rx_sc = sc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) 	if (!secy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) 		goto nosci;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) 	dev = secy->netdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) 	macsec = macsec_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) 	secy_stats = this_cpu_ptr(macsec->stats);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) 	rxsc_stats = this_cpu_ptr(rx_sc->stats);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) 	if (!macsec_validate_skb(skb, secy->icv_len, secy->xpn)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) 		u64_stats_update_begin(&secy_stats->syncp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) 		secy_stats->stats.InPktsBadTag++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) 		u64_stats_update_end(&secy_stats->syncp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) 		goto drop_nosa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) 	rx_sa = macsec_rxsa_get(rx_sc->sa[macsec_skb_cb(skb)->assoc_num]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) 	if (!rx_sa) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) 		/* 10.6.1 if the SA is not in use */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) 		/* If validateFrames is Strict or the C bit in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) 		 * SecTAG is set, discard
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) 		if (hdr->tci_an & MACSEC_TCI_C ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) 		    secy->validate_frames == MACSEC_VALIDATE_STRICT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) 			u64_stats_update_begin(&rxsc_stats->syncp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) 			rxsc_stats->stats.InPktsNotUsingSA++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) 			u64_stats_update_end(&rxsc_stats->syncp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) 			goto drop_nosa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) 		/* not Strict, the frame (with the SecTAG and ICV
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) 		 * removed) is delivered to the Controlled Port.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) 		u64_stats_update_begin(&rxsc_stats->syncp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) 		rxsc_stats->stats.InPktsUnusedSA++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) 		u64_stats_update_end(&rxsc_stats->syncp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) 		goto deliver;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) 	/* First, PN check to avoid decrypting obviously wrong packets */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) 	hdr_pn = ntohl(hdr->packet_number);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) 	if (secy->replay_protect) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) 		bool late;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) 		spin_lock(&rx_sa->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) 		late = rx_sa->next_pn_halves.lower >= secy->replay_window &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) 		       hdr_pn < (rx_sa->next_pn_halves.lower - secy->replay_window);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) 		if (secy->xpn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) 			late = late && pn_same_half(rx_sa->next_pn_halves.lower, hdr_pn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) 		spin_unlock(&rx_sa->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) 		if (late) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) 			u64_stats_update_begin(&rxsc_stats->syncp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) 			rxsc_stats->stats.InPktsLate++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) 			u64_stats_update_end(&rxsc_stats->syncp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) 			goto drop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) 	macsec_skb_cb(skb)->rx_sa = rx_sa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) 	/* Disabled && !changed text => skip validation */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) 	if (hdr->tci_an & MACSEC_TCI_C ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) 	    secy->validate_frames != MACSEC_VALIDATE_DISABLED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) 		skb = macsec_decrypt(skb, dev, rx_sa, sci, secy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) 	if (IS_ERR(skb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) 		/* the decrypt callback needs the reference */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) 		if (PTR_ERR(skb) != -EINPROGRESS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) 			macsec_rxsa_put(rx_sa);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) 			macsec_rxsc_put(rx_sc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) 		rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) 		*pskb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) 		return RX_HANDLER_CONSUMED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) 	if (!macsec_post_decrypt(skb, secy, hdr_pn))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) 		goto drop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) deliver:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) 	macsec_finalize_skb(skb, secy->icv_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) 			    macsec_extra_len(macsec_skb_cb(skb)->has_sci));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) 	macsec_reset_skb(skb, secy->netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) 	if (rx_sa)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) 		macsec_rxsa_put(rx_sa);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) 	macsec_rxsc_put(rx_sc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) 	skb_orphan(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) 	len = skb->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) 	ret = gro_cells_receive(&macsec->gro_cells, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) 	if (ret == NET_RX_SUCCESS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) 		count_rx(dev, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) 		macsec->secy.netdev->stats.rx_dropped++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) 	rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) 	*pskb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) 	return RX_HANDLER_CONSUMED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) drop:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) 	macsec_rxsa_put(rx_sa);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) drop_nosa:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) 	macsec_rxsc_put(rx_sc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) 	rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) drop_direct:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) 	kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) 	*pskb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) 	return RX_HANDLER_CONSUMED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) nosci:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) 	/* 10.6.1 if the SC is not found */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) 	cbit = !!(hdr->tci_an & MACSEC_TCI_C);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) 	if (!cbit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) 		macsec_finalize_skb(skb, DEFAULT_ICV_LEN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) 				    macsec_extra_len(macsec_skb_cb(skb)->has_sci));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) 	list_for_each_entry_rcu(macsec, &rxd->secys, secys) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) 		struct sk_buff *nskb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) 		secy_stats = this_cpu_ptr(macsec->stats);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) 		/* If validateFrames is Strict or the C bit in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) 		 * SecTAG is set, discard
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) 		if (cbit ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) 		    macsec->secy.validate_frames == MACSEC_VALIDATE_STRICT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) 			u64_stats_update_begin(&secy_stats->syncp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) 			secy_stats->stats.InPktsNoSCI++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) 			u64_stats_update_end(&secy_stats->syncp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) 		/* not strict, the frame (with the SecTAG and ICV
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) 		 * removed) is delivered to the Controlled Port.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) 		nskb = skb_clone(skb, GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) 		if (!nskb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) 		macsec_reset_skb(nskb, macsec->secy.netdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) 		ret = netif_rx(nskb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) 		if (ret == NET_RX_SUCCESS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) 			u64_stats_update_begin(&secy_stats->syncp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) 			secy_stats->stats.InPktsUnknownSCI++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) 			u64_stats_update_end(&secy_stats->syncp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) 			macsec->secy.netdev->stats.rx_dropped++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) 	rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) 	*pskb = skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) 	return RX_HANDLER_PASS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) static struct crypto_aead *macsec_alloc_tfm(char *key, int key_len, int icv_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) 	struct crypto_aead *tfm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) 	/* Pick a sync gcm(aes) cipher to ensure order is preserved. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) 	tfm = crypto_alloc_aead("gcm(aes)", 0, CRYPTO_ALG_ASYNC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) 	if (IS_ERR(tfm))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) 		return tfm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) 	ret = crypto_aead_setkey(tfm, key, key_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) 	if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) 		goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) 	ret = crypto_aead_setauthsize(tfm, icv_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) 	if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) 		goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) 	return tfm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) 	crypto_free_aead(tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) 	return ERR_PTR(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) static int init_rx_sa(struct macsec_rx_sa *rx_sa, char *sak, int key_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) 		      int icv_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) 	rx_sa->stats = alloc_percpu(struct macsec_rx_sa_stats);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) 	if (!rx_sa->stats)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) 	rx_sa->key.tfm = macsec_alloc_tfm(sak, key_len, icv_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) 	if (IS_ERR(rx_sa->key.tfm)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) 		free_percpu(rx_sa->stats);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) 		return PTR_ERR(rx_sa->key.tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) 	rx_sa->ssci = MACSEC_UNDEF_SSCI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) 	rx_sa->active = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) 	rx_sa->next_pn = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) 	refcount_set(&rx_sa->refcnt, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) 	spin_lock_init(&rx_sa->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) static void clear_rx_sa(struct macsec_rx_sa *rx_sa)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) 	rx_sa->active = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) 	macsec_rxsa_put(rx_sa);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) static void free_rx_sc(struct macsec_rx_sc *rx_sc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) 	for (i = 0; i < MACSEC_NUM_AN; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) 		struct macsec_rx_sa *sa = rtnl_dereference(rx_sc->sa[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) 		RCU_INIT_POINTER(rx_sc->sa[i], NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) 		if (sa)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) 			clear_rx_sa(sa);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) 	macsec_rxsc_put(rx_sc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) static struct macsec_rx_sc *del_rx_sc(struct macsec_secy *secy, sci_t sci)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) 	struct macsec_rx_sc *rx_sc, __rcu **rx_scp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) 	for (rx_scp = &secy->rx_sc, rx_sc = rtnl_dereference(*rx_scp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) 	     rx_sc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) 	     rx_scp = &rx_sc->next, rx_sc = rtnl_dereference(*rx_scp)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) 		if (rx_sc->sci == sci) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) 			if (rx_sc->active)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) 				secy->n_rx_sc--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) 			rcu_assign_pointer(*rx_scp, rx_sc->next);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) 			return rx_sc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) 	return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) static struct macsec_rx_sc *create_rx_sc(struct net_device *dev, sci_t sci)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) 	struct macsec_rx_sc *rx_sc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) 	struct macsec_dev *macsec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) 	struct net_device *real_dev = macsec_priv(dev)->real_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) 	struct macsec_rxh_data *rxd = macsec_data_rtnl(real_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) 	struct macsec_secy *secy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) 	list_for_each_entry(macsec, &rxd->secys, secys) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) 		if (find_rx_sc_rtnl(&macsec->secy, sci))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) 			return ERR_PTR(-EEXIST);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) 	rx_sc = kzalloc(sizeof(*rx_sc), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) 	if (!rx_sc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) 		return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) 	rx_sc->stats = netdev_alloc_pcpu_stats(struct pcpu_rx_sc_stats);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) 	if (!rx_sc->stats) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) 		kfree(rx_sc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) 		return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) 	rx_sc->sci = sci;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) 	rx_sc->active = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) 	refcount_set(&rx_sc->refcnt, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) 	secy = &macsec_priv(dev)->secy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) 	rcu_assign_pointer(rx_sc->next, secy->rx_sc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) 	rcu_assign_pointer(secy->rx_sc, rx_sc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) 	if (rx_sc->active)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) 		secy->n_rx_sc++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) 	return rx_sc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) static int init_tx_sa(struct macsec_tx_sa *tx_sa, char *sak, int key_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) 		      int icv_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) 	tx_sa->stats = alloc_percpu(struct macsec_tx_sa_stats);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) 	if (!tx_sa->stats)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) 	tx_sa->key.tfm = macsec_alloc_tfm(sak, key_len, icv_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) 	if (IS_ERR(tx_sa->key.tfm)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) 		free_percpu(tx_sa->stats);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) 		return PTR_ERR(tx_sa->key.tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) 	tx_sa->ssci = MACSEC_UNDEF_SSCI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) 	tx_sa->active = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) 	refcount_set(&tx_sa->refcnt, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) 	spin_lock_init(&tx_sa->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) static void clear_tx_sa(struct macsec_tx_sa *tx_sa)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) 	tx_sa->active = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) 	macsec_txsa_put(tx_sa);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) static struct genl_family macsec_fam;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) static struct net_device *get_dev_from_nl(struct net *net,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) 					  struct nlattr **attrs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) 	int ifindex = nla_get_u32(attrs[MACSEC_ATTR_IFINDEX]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) 	struct net_device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) 	dev = __dev_get_by_index(net, ifindex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) 	if (!dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) 		return ERR_PTR(-ENODEV);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) 	if (!netif_is_macsec(dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) 		return ERR_PTR(-ENODEV);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) 	return dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) static enum macsec_offload nla_get_offload(const struct nlattr *nla)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) 	return (__force enum macsec_offload)nla_get_u8(nla);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) static sci_t nla_get_sci(const struct nlattr *nla)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) 	return (__force sci_t)nla_get_u64(nla);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) static int nla_put_sci(struct sk_buff *skb, int attrtype, sci_t value,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) 		       int padattr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) 	return nla_put_u64_64bit(skb, attrtype, (__force u64)value, padattr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) static ssci_t nla_get_ssci(const struct nlattr *nla)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) 	return (__force ssci_t)nla_get_u32(nla);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) static int nla_put_ssci(struct sk_buff *skb, int attrtype, ssci_t value)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) 	return nla_put_u32(skb, attrtype, (__force u64)value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) static struct macsec_tx_sa *get_txsa_from_nl(struct net *net,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) 					     struct nlattr **attrs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) 					     struct nlattr **tb_sa,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) 					     struct net_device **devp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) 					     struct macsec_secy **secyp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) 					     struct macsec_tx_sc **scp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) 					     u8 *assoc_num)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) 	struct net_device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) 	struct macsec_secy *secy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) 	struct macsec_tx_sc *tx_sc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) 	struct macsec_tx_sa *tx_sa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) 	if (!tb_sa[MACSEC_SA_ATTR_AN])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) 		return ERR_PTR(-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) 	*assoc_num = nla_get_u8(tb_sa[MACSEC_SA_ATTR_AN]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) 	dev = get_dev_from_nl(net, attrs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) 	if (IS_ERR(dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) 		return ERR_CAST(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) 	if (*assoc_num >= MACSEC_NUM_AN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) 		return ERR_PTR(-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) 	secy = &macsec_priv(dev)->secy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) 	tx_sc = &secy->tx_sc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) 	tx_sa = rtnl_dereference(tx_sc->sa[*assoc_num]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) 	if (!tx_sa)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) 		return ERR_PTR(-ENODEV);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) 	*devp = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) 	*scp = tx_sc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) 	*secyp = secy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) 	return tx_sa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) static struct macsec_rx_sc *get_rxsc_from_nl(struct net *net,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) 					     struct nlattr **attrs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) 					     struct nlattr **tb_rxsc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) 					     struct net_device **devp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) 					     struct macsec_secy **secyp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) 	struct net_device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) 	struct macsec_secy *secy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) 	struct macsec_rx_sc *rx_sc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) 	sci_t sci;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) 	dev = get_dev_from_nl(net, attrs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) 	if (IS_ERR(dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) 		return ERR_CAST(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) 	secy = &macsec_priv(dev)->secy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) 	if (!tb_rxsc[MACSEC_RXSC_ATTR_SCI])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) 		return ERR_PTR(-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) 	sci = nla_get_sci(tb_rxsc[MACSEC_RXSC_ATTR_SCI]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) 	rx_sc = find_rx_sc_rtnl(secy, sci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) 	if (!rx_sc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) 		return ERR_PTR(-ENODEV);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) 	*secyp = secy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) 	*devp = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) 	return rx_sc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) static struct macsec_rx_sa *get_rxsa_from_nl(struct net *net,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) 					     struct nlattr **attrs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) 					     struct nlattr **tb_rxsc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) 					     struct nlattr **tb_sa,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) 					     struct net_device **devp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) 					     struct macsec_secy **secyp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) 					     struct macsec_rx_sc **scp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) 					     u8 *assoc_num)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) 	struct macsec_rx_sc *rx_sc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) 	struct macsec_rx_sa *rx_sa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) 	if (!tb_sa[MACSEC_SA_ATTR_AN])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) 		return ERR_PTR(-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) 	*assoc_num = nla_get_u8(tb_sa[MACSEC_SA_ATTR_AN]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) 	if (*assoc_num >= MACSEC_NUM_AN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) 		return ERR_PTR(-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) 	rx_sc = get_rxsc_from_nl(net, attrs, tb_rxsc, devp, secyp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) 	if (IS_ERR(rx_sc))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) 		return ERR_CAST(rx_sc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) 	rx_sa = rtnl_dereference(rx_sc->sa[*assoc_num]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) 	if (!rx_sa)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) 		return ERR_PTR(-ENODEV);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) 	*scp = rx_sc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) 	return rx_sa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) static const struct nla_policy macsec_genl_policy[NUM_MACSEC_ATTR] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) 	[MACSEC_ATTR_IFINDEX] = { .type = NLA_U32 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) 	[MACSEC_ATTR_RXSC_CONFIG] = { .type = NLA_NESTED },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) 	[MACSEC_ATTR_SA_CONFIG] = { .type = NLA_NESTED },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) 	[MACSEC_ATTR_OFFLOAD] = { .type = NLA_NESTED },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) static const struct nla_policy macsec_genl_rxsc_policy[NUM_MACSEC_RXSC_ATTR] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) 	[MACSEC_RXSC_ATTR_SCI] = { .type = NLA_U64 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) 	[MACSEC_RXSC_ATTR_ACTIVE] = { .type = NLA_U8 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) static const struct nla_policy macsec_genl_sa_policy[NUM_MACSEC_SA_ATTR] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) 	[MACSEC_SA_ATTR_AN] = { .type = NLA_U8 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) 	[MACSEC_SA_ATTR_ACTIVE] = { .type = NLA_U8 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) 	[MACSEC_SA_ATTR_PN] = NLA_POLICY_MIN_LEN(4),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) 	[MACSEC_SA_ATTR_KEYID] = { .type = NLA_BINARY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) 				   .len = MACSEC_KEYID_LEN, },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) 	[MACSEC_SA_ATTR_KEY] = { .type = NLA_BINARY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) 				 .len = MACSEC_MAX_KEY_LEN, },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) 	[MACSEC_SA_ATTR_SSCI] = { .type = NLA_U32 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) 	[MACSEC_SA_ATTR_SALT] = { .type = NLA_BINARY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) 				  .len = MACSEC_SALT_LEN, },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) static const struct nla_policy macsec_genl_offload_policy[NUM_MACSEC_OFFLOAD_ATTR] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) 	[MACSEC_OFFLOAD_ATTR_TYPE] = { .type = NLA_U8 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) /* Offloads an operation to a device driver */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) static int macsec_offload(int (* const func)(struct macsec_context *),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) 			  struct macsec_context *ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) 	if (unlikely(!func))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) 	if (ctx->offload == MACSEC_OFFLOAD_PHY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) 		mutex_lock(&ctx->phydev->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) 	/* Phase I: prepare. The drive should fail here if there are going to be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) 	 * issues in the commit phase.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) 	ctx->prepare = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) 	ret = (*func)(ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) 		goto phy_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) 	/* Phase II: commit. This step cannot fail. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) 	ctx->prepare = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) 	ret = (*func)(ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) 	/* This should never happen: commit is not allowed to fail */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) 	if (unlikely(ret))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) 		WARN(1, "MACsec offloading commit failed (%d)\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) phy_unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) 	if (ctx->offload == MACSEC_OFFLOAD_PHY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) 		mutex_unlock(&ctx->phydev->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) static int parse_sa_config(struct nlattr **attrs, struct nlattr **tb_sa)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) 	if (!attrs[MACSEC_ATTR_SA_CONFIG])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) 	if (nla_parse_nested_deprecated(tb_sa, MACSEC_SA_ATTR_MAX, attrs[MACSEC_ATTR_SA_CONFIG], macsec_genl_sa_policy, NULL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) static int parse_rxsc_config(struct nlattr **attrs, struct nlattr **tb_rxsc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) 	if (!attrs[MACSEC_ATTR_RXSC_CONFIG])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) 	if (nla_parse_nested_deprecated(tb_rxsc, MACSEC_RXSC_ATTR_MAX, attrs[MACSEC_ATTR_RXSC_CONFIG], macsec_genl_rxsc_policy, NULL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) static bool validate_add_rxsa(struct nlattr **attrs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) 	if (!attrs[MACSEC_SA_ATTR_AN] ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) 	    !attrs[MACSEC_SA_ATTR_KEY] ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) 	    !attrs[MACSEC_SA_ATTR_KEYID])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) 	if (nla_get_u8(attrs[MACSEC_SA_ATTR_AN]) >= MACSEC_NUM_AN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) 	if (attrs[MACSEC_SA_ATTR_PN] &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) 	    *(u64 *)nla_data(attrs[MACSEC_SA_ATTR_PN]) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) 	if (attrs[MACSEC_SA_ATTR_ACTIVE]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) 		if (nla_get_u8(attrs[MACSEC_SA_ATTR_ACTIVE]) > 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) 			return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) 	if (nla_len(attrs[MACSEC_SA_ATTR_KEYID]) != MACSEC_KEYID_LEN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) 	return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) static int macsec_add_rxsa(struct sk_buff *skb, struct genl_info *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) 	struct net_device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) 	struct nlattr **attrs = info->attrs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) 	struct macsec_secy *secy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) 	struct macsec_rx_sc *rx_sc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) 	struct macsec_rx_sa *rx_sa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) 	unsigned char assoc_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) 	int pn_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) 	struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) 	struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) 	if (!attrs[MACSEC_ATTR_IFINDEX])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) 	if (parse_sa_config(attrs, tb_sa))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) 	if (parse_rxsc_config(attrs, tb_rxsc))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) 	if (!validate_add_rxsa(tb_sa))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) 	rtnl_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) 	rx_sc = get_rxsc_from_nl(genl_info_net(info), attrs, tb_rxsc, &dev, &secy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) 	if (IS_ERR(rx_sc)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) 		rtnl_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) 		return PTR_ERR(rx_sc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) 	assoc_num = nla_get_u8(tb_sa[MACSEC_SA_ATTR_AN]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) 	if (nla_len(tb_sa[MACSEC_SA_ATTR_KEY]) != secy->key_len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) 		pr_notice("macsec: nl: add_rxsa: bad key length: %d != %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) 			  nla_len(tb_sa[MACSEC_SA_ATTR_KEY]), secy->key_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) 		rtnl_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) 	pn_len = secy->xpn ? MACSEC_XPN_PN_LEN : MACSEC_DEFAULT_PN_LEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) 	if (nla_len(tb_sa[MACSEC_SA_ATTR_PN]) != pn_len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) 		pr_notice("macsec: nl: add_rxsa: bad pn length: %d != %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) 			  nla_len(tb_sa[MACSEC_SA_ATTR_PN]), pn_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) 		rtnl_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) 	if (secy->xpn) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) 		if (!tb_sa[MACSEC_SA_ATTR_SSCI] || !tb_sa[MACSEC_SA_ATTR_SALT]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) 			rtnl_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) 		if (nla_len(tb_sa[MACSEC_SA_ATTR_SALT]) != MACSEC_SALT_LEN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) 			pr_notice("macsec: nl: add_rxsa: bad salt length: %d != %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) 				  nla_len(tb_sa[MACSEC_SA_ATTR_SALT]),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) 				  MACSEC_SA_ATTR_SALT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) 			rtnl_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) 	rx_sa = rtnl_dereference(rx_sc->sa[assoc_num]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) 	if (rx_sa) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) 		rtnl_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) 		return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) 	rx_sa = kmalloc(sizeof(*rx_sa), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) 	if (!rx_sa) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) 		rtnl_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) 	err = init_rx_sa(rx_sa, nla_data(tb_sa[MACSEC_SA_ATTR_KEY]),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) 			 secy->key_len, secy->icv_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) 	if (err < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) 		kfree(rx_sa);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) 		rtnl_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) 	if (tb_sa[MACSEC_SA_ATTR_PN]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) 		spin_lock_bh(&rx_sa->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) 		rx_sa->next_pn = nla_get_u64(tb_sa[MACSEC_SA_ATTR_PN]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) 		spin_unlock_bh(&rx_sa->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) 	if (tb_sa[MACSEC_SA_ATTR_ACTIVE])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) 		rx_sa->active = !!nla_get_u8(tb_sa[MACSEC_SA_ATTR_ACTIVE]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) 	rx_sa->sc = rx_sc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) 	/* If h/w offloading is available, propagate to the device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) 	if (macsec_is_offloaded(netdev_priv(dev))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) 		const struct macsec_ops *ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) 		struct macsec_context ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) 		ops = macsec_get_ops(netdev_priv(dev), &ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) 		if (!ops) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) 			err = -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814) 			goto cleanup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) 		ctx.sa.assoc_num = assoc_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) 		ctx.sa.rx_sa = rx_sa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) 		ctx.secy = secy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820) 		memcpy(ctx.sa.key, nla_data(tb_sa[MACSEC_SA_ATTR_KEY]),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) 		       secy->key_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) 		err = macsec_offload(ops->mdo_add_rxsa, &ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) 		if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) 			goto cleanup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) 	if (secy->xpn) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) 		rx_sa->ssci = nla_get_ssci(tb_sa[MACSEC_SA_ATTR_SSCI]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) 		nla_memcpy(rx_sa->key.salt.bytes, tb_sa[MACSEC_SA_ATTR_SALT],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831) 			   MACSEC_SALT_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) 	nla_memcpy(rx_sa->key.id, tb_sa[MACSEC_SA_ATTR_KEYID], MACSEC_KEYID_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) 	rcu_assign_pointer(rx_sc->sa[assoc_num], rx_sa);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) 	rtnl_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841) cleanup:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) 	kfree(rx_sa);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843) 	rtnl_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) static bool validate_add_rxsc(struct nlattr **attrs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849) 	if (!attrs[MACSEC_RXSC_ATTR_SCI])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) 	if (attrs[MACSEC_RXSC_ATTR_ACTIVE]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853) 		if (nla_get_u8(attrs[MACSEC_RXSC_ATTR_ACTIVE]) > 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854) 			return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) 	return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860) static int macsec_add_rxsc(struct sk_buff *skb, struct genl_info *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862) 	struct net_device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863) 	sci_t sci = MACSEC_UNDEF_SCI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) 	struct nlattr **attrs = info->attrs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865) 	struct macsec_rx_sc *rx_sc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) 	struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867) 	struct macsec_secy *secy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868) 	bool was_active;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871) 	if (!attrs[MACSEC_ATTR_IFINDEX])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874) 	if (parse_rxsc_config(attrs, tb_rxsc))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877) 	if (!validate_add_rxsc(tb_rxsc))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880) 	rtnl_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881) 	dev = get_dev_from_nl(genl_info_net(info), attrs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) 	if (IS_ERR(dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883) 		rtnl_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884) 		return PTR_ERR(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887) 	secy = &macsec_priv(dev)->secy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) 	sci = nla_get_sci(tb_rxsc[MACSEC_RXSC_ATTR_SCI]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890) 	rx_sc = create_rx_sc(dev, sci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891) 	if (IS_ERR(rx_sc)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892) 		rtnl_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893) 		return PTR_ERR(rx_sc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896) 	was_active = rx_sc->active;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897) 	if (tb_rxsc[MACSEC_RXSC_ATTR_ACTIVE])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898) 		rx_sc->active = !!nla_get_u8(tb_rxsc[MACSEC_RXSC_ATTR_ACTIVE]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900) 	if (macsec_is_offloaded(netdev_priv(dev))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901) 		const struct macsec_ops *ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902) 		struct macsec_context ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904) 		ops = macsec_get_ops(netdev_priv(dev), &ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) 		if (!ops) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906) 			ret = -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907) 			goto cleanup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910) 		ctx.rx_sc = rx_sc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911) 		ctx.secy = secy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913) 		ret = macsec_offload(ops->mdo_add_rxsc, &ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914) 		if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915) 			goto cleanup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918) 	rtnl_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922) cleanup:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923) 	rx_sc->active = was_active;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924) 	rtnl_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928) static bool validate_add_txsa(struct nlattr **attrs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930) 	if (!attrs[MACSEC_SA_ATTR_AN] ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931) 	    !attrs[MACSEC_SA_ATTR_PN] ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932) 	    !attrs[MACSEC_SA_ATTR_KEY] ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933) 	    !attrs[MACSEC_SA_ATTR_KEYID])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936) 	if (nla_get_u8(attrs[MACSEC_SA_ATTR_AN]) >= MACSEC_NUM_AN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939) 	if (nla_get_u32(attrs[MACSEC_SA_ATTR_PN]) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942) 	if (attrs[MACSEC_SA_ATTR_ACTIVE]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943) 		if (nla_get_u8(attrs[MACSEC_SA_ATTR_ACTIVE]) > 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944) 			return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947) 	if (nla_len(attrs[MACSEC_SA_ATTR_KEYID]) != MACSEC_KEYID_LEN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950) 	return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953) static int macsec_add_txsa(struct sk_buff *skb, struct genl_info *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955) 	struct net_device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956) 	struct nlattr **attrs = info->attrs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957) 	struct macsec_secy *secy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958) 	struct macsec_tx_sc *tx_sc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959) 	struct macsec_tx_sa *tx_sa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960) 	unsigned char assoc_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961) 	int pn_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962) 	struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963) 	bool was_operational;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966) 	if (!attrs[MACSEC_ATTR_IFINDEX])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969) 	if (parse_sa_config(attrs, tb_sa))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972) 	if (!validate_add_txsa(tb_sa))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975) 	rtnl_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976) 	dev = get_dev_from_nl(genl_info_net(info), attrs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977) 	if (IS_ERR(dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978) 		rtnl_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979) 		return PTR_ERR(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982) 	secy = &macsec_priv(dev)->secy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983) 	tx_sc = &secy->tx_sc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985) 	assoc_num = nla_get_u8(tb_sa[MACSEC_SA_ATTR_AN]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987) 	if (nla_len(tb_sa[MACSEC_SA_ATTR_KEY]) != secy->key_len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988) 		pr_notice("macsec: nl: add_txsa: bad key length: %d != %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989) 			  nla_len(tb_sa[MACSEC_SA_ATTR_KEY]), secy->key_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990) 		rtnl_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994) 	pn_len = secy->xpn ? MACSEC_XPN_PN_LEN : MACSEC_DEFAULT_PN_LEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995) 	if (nla_len(tb_sa[MACSEC_SA_ATTR_PN]) != pn_len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996) 		pr_notice("macsec: nl: add_txsa: bad pn length: %d != %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997) 			  nla_len(tb_sa[MACSEC_SA_ATTR_PN]), pn_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998) 		rtnl_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002) 	if (secy->xpn) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003) 		if (!tb_sa[MACSEC_SA_ATTR_SSCI] || !tb_sa[MACSEC_SA_ATTR_SALT]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004) 			rtnl_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008) 		if (nla_len(tb_sa[MACSEC_SA_ATTR_SALT]) != MACSEC_SALT_LEN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009) 			pr_notice("macsec: nl: add_txsa: bad salt length: %d != %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010) 				  nla_len(tb_sa[MACSEC_SA_ATTR_SALT]),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011) 				  MACSEC_SA_ATTR_SALT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2012) 			rtnl_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2013) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2014) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2015) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2016) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2017) 	tx_sa = rtnl_dereference(tx_sc->sa[assoc_num]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2018) 	if (tx_sa) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2019) 		rtnl_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2020) 		return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2021) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2022) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2023) 	tx_sa = kmalloc(sizeof(*tx_sa), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2024) 	if (!tx_sa) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2025) 		rtnl_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2026) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2027) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2028) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2029) 	err = init_tx_sa(tx_sa, nla_data(tb_sa[MACSEC_SA_ATTR_KEY]),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2030) 			 secy->key_len, secy->icv_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2031) 	if (err < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2032) 		kfree(tx_sa);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2033) 		rtnl_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2034) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2035) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2036) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2037) 	spin_lock_bh(&tx_sa->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2038) 	tx_sa->next_pn = nla_get_u64(tb_sa[MACSEC_SA_ATTR_PN]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2039) 	spin_unlock_bh(&tx_sa->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2040) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2041) 	if (tb_sa[MACSEC_SA_ATTR_ACTIVE])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2042) 		tx_sa->active = !!nla_get_u8(tb_sa[MACSEC_SA_ATTR_ACTIVE]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2043) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2044) 	was_operational = secy->operational;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2045) 	if (assoc_num == tx_sc->encoding_sa && tx_sa->active)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2046) 		secy->operational = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2047) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2048) 	/* If h/w offloading is available, propagate to the device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2049) 	if (macsec_is_offloaded(netdev_priv(dev))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2050) 		const struct macsec_ops *ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2051) 		struct macsec_context ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2052) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2053) 		ops = macsec_get_ops(netdev_priv(dev), &ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2054) 		if (!ops) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2055) 			err = -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2056) 			goto cleanup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2057) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2058) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2059) 		ctx.sa.assoc_num = assoc_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2060) 		ctx.sa.tx_sa = tx_sa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2061) 		ctx.secy = secy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2062) 		memcpy(ctx.sa.key, nla_data(tb_sa[MACSEC_SA_ATTR_KEY]),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2063) 		       secy->key_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2064) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2065) 		err = macsec_offload(ops->mdo_add_txsa, &ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2066) 		if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2067) 			goto cleanup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2068) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2069) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2070) 	if (secy->xpn) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2071) 		tx_sa->ssci = nla_get_ssci(tb_sa[MACSEC_SA_ATTR_SSCI]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2072) 		nla_memcpy(tx_sa->key.salt.bytes, tb_sa[MACSEC_SA_ATTR_SALT],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2073) 			   MACSEC_SALT_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2074) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2075) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2076) 	nla_memcpy(tx_sa->key.id, tb_sa[MACSEC_SA_ATTR_KEYID], MACSEC_KEYID_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2077) 	rcu_assign_pointer(tx_sc->sa[assoc_num], tx_sa);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2078) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2079) 	rtnl_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2080) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2081) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2082) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2083) cleanup:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2084) 	secy->operational = was_operational;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2085) 	kfree(tx_sa);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2086) 	rtnl_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2087) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2088) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2089) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2090) static int macsec_del_rxsa(struct sk_buff *skb, struct genl_info *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2091) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2092) 	struct nlattr **attrs = info->attrs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2093) 	struct net_device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2094) 	struct macsec_secy *secy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2095) 	struct macsec_rx_sc *rx_sc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2096) 	struct macsec_rx_sa *rx_sa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2097) 	u8 assoc_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2098) 	struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2099) 	struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2100) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2101) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2102) 	if (!attrs[MACSEC_ATTR_IFINDEX])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2103) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2104) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2105) 	if (parse_sa_config(attrs, tb_sa))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2106) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2107) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2108) 	if (parse_rxsc_config(attrs, tb_rxsc))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2109) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2110) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2111) 	rtnl_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2112) 	rx_sa = get_rxsa_from_nl(genl_info_net(info), attrs, tb_rxsc, tb_sa,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2113) 				 &dev, &secy, &rx_sc, &assoc_num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2114) 	if (IS_ERR(rx_sa)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2115) 		rtnl_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2116) 		return PTR_ERR(rx_sa);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2117) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2118) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2119) 	if (rx_sa->active) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2120) 		rtnl_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2121) 		return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2122) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2123) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2124) 	/* If h/w offloading is available, propagate to the device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2125) 	if (macsec_is_offloaded(netdev_priv(dev))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2126) 		const struct macsec_ops *ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2127) 		struct macsec_context ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2128) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2129) 		ops = macsec_get_ops(netdev_priv(dev), &ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2130) 		if (!ops) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2131) 			ret = -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2132) 			goto cleanup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2133) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2134) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2135) 		ctx.sa.assoc_num = assoc_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2136) 		ctx.sa.rx_sa = rx_sa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2137) 		ctx.secy = secy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2138) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2139) 		ret = macsec_offload(ops->mdo_del_rxsa, &ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2140) 		if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2141) 			goto cleanup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2142) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2143) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2144) 	RCU_INIT_POINTER(rx_sc->sa[assoc_num], NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2145) 	clear_rx_sa(rx_sa);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2146) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2147) 	rtnl_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2148) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2149) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2150) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2151) cleanup:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2152) 	rtnl_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2153) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2154) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2155) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2156) static int macsec_del_rxsc(struct sk_buff *skb, struct genl_info *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2157) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2158) 	struct nlattr **attrs = info->attrs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2159) 	struct net_device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2160) 	struct macsec_secy *secy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2161) 	struct macsec_rx_sc *rx_sc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2162) 	sci_t sci;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2163) 	struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2164) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2165) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2166) 	if (!attrs[MACSEC_ATTR_IFINDEX])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2167) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2168) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2169) 	if (parse_rxsc_config(attrs, tb_rxsc))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2170) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2171) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2172) 	if (!tb_rxsc[MACSEC_RXSC_ATTR_SCI])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2173) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2174) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2175) 	rtnl_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2176) 	dev = get_dev_from_nl(genl_info_net(info), info->attrs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2177) 	if (IS_ERR(dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2178) 		rtnl_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2179) 		return PTR_ERR(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2180) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2181) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2182) 	secy = &macsec_priv(dev)->secy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2183) 	sci = nla_get_sci(tb_rxsc[MACSEC_RXSC_ATTR_SCI]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2184) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2185) 	rx_sc = del_rx_sc(secy, sci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2186) 	if (!rx_sc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2187) 		rtnl_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2188) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2189) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2190) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2191) 	/* If h/w offloading is available, propagate to the device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2192) 	if (macsec_is_offloaded(netdev_priv(dev))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2193) 		const struct macsec_ops *ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2194) 		struct macsec_context ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2195) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2196) 		ops = macsec_get_ops(netdev_priv(dev), &ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2197) 		if (!ops) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2198) 			ret = -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2199) 			goto cleanup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2200) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2201) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2202) 		ctx.rx_sc = rx_sc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2203) 		ctx.secy = secy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2204) 		ret = macsec_offload(ops->mdo_del_rxsc, &ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2205) 		if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2206) 			goto cleanup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2207) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2208) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2209) 	free_rx_sc(rx_sc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2210) 	rtnl_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2211) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2212) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2213) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2214) cleanup:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2215) 	rtnl_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2216) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2217) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2218) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2219) static int macsec_del_txsa(struct sk_buff *skb, struct genl_info *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2220) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2221) 	struct nlattr **attrs = info->attrs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2222) 	struct net_device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2223) 	struct macsec_secy *secy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2224) 	struct macsec_tx_sc *tx_sc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2225) 	struct macsec_tx_sa *tx_sa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2226) 	u8 assoc_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2227) 	struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2228) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2229) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2230) 	if (!attrs[MACSEC_ATTR_IFINDEX])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2231) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2232) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2233) 	if (parse_sa_config(attrs, tb_sa))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2234) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2235) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2236) 	rtnl_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2237) 	tx_sa = get_txsa_from_nl(genl_info_net(info), attrs, tb_sa,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2238) 				 &dev, &secy, &tx_sc, &assoc_num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2239) 	if (IS_ERR(tx_sa)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2240) 		rtnl_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2241) 		return PTR_ERR(tx_sa);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2242) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2243) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2244) 	if (tx_sa->active) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2245) 		rtnl_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2246) 		return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2247) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2248) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2249) 	/* If h/w offloading is available, propagate to the device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2250) 	if (macsec_is_offloaded(netdev_priv(dev))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2251) 		const struct macsec_ops *ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2252) 		struct macsec_context ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2253) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2254) 		ops = macsec_get_ops(netdev_priv(dev), &ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2255) 		if (!ops) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2256) 			ret = -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2257) 			goto cleanup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2258) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2259) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2260) 		ctx.sa.assoc_num = assoc_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2261) 		ctx.sa.tx_sa = tx_sa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2262) 		ctx.secy = secy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2263) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2264) 		ret = macsec_offload(ops->mdo_del_txsa, &ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2265) 		if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2266) 			goto cleanup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2267) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2268) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2269) 	RCU_INIT_POINTER(tx_sc->sa[assoc_num], NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2270) 	clear_tx_sa(tx_sa);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2271) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2272) 	rtnl_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2273) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2274) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2275) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2276) cleanup:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2277) 	rtnl_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2278) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2279) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2280) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2281) static bool validate_upd_sa(struct nlattr **attrs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2282) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2283) 	if (!attrs[MACSEC_SA_ATTR_AN] ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2284) 	    attrs[MACSEC_SA_ATTR_KEY] ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2285) 	    attrs[MACSEC_SA_ATTR_KEYID] ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2286) 	    attrs[MACSEC_SA_ATTR_SSCI] ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2287) 	    attrs[MACSEC_SA_ATTR_SALT])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2288) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2289) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2290) 	if (nla_get_u8(attrs[MACSEC_SA_ATTR_AN]) >= MACSEC_NUM_AN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2291) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2292) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2293) 	if (attrs[MACSEC_SA_ATTR_PN] && nla_get_u32(attrs[MACSEC_SA_ATTR_PN]) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2294) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2295) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2296) 	if (attrs[MACSEC_SA_ATTR_ACTIVE]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2297) 		if (nla_get_u8(attrs[MACSEC_SA_ATTR_ACTIVE]) > 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2298) 			return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2299) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2300) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2301) 	return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2302) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2303) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2304) static int macsec_upd_txsa(struct sk_buff *skb, struct genl_info *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2305) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2306) 	struct nlattr **attrs = info->attrs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2307) 	struct net_device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2308) 	struct macsec_secy *secy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2309) 	struct macsec_tx_sc *tx_sc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2310) 	struct macsec_tx_sa *tx_sa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2311) 	u8 assoc_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2312) 	struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2313) 	bool was_operational, was_active;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2314) 	pn_t prev_pn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2315) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2316) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2317) 	prev_pn.full64 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2318) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2319) 	if (!attrs[MACSEC_ATTR_IFINDEX])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2320) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2321) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2322) 	if (parse_sa_config(attrs, tb_sa))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2323) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2324) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2325) 	if (!validate_upd_sa(tb_sa))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2326) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2327) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2328) 	rtnl_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2329) 	tx_sa = get_txsa_from_nl(genl_info_net(info), attrs, tb_sa,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2330) 				 &dev, &secy, &tx_sc, &assoc_num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2331) 	if (IS_ERR(tx_sa)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2332) 		rtnl_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2333) 		return PTR_ERR(tx_sa);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2334) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2335) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2336) 	if (tb_sa[MACSEC_SA_ATTR_PN]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2337) 		int pn_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2338) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2339) 		pn_len = secy->xpn ? MACSEC_XPN_PN_LEN : MACSEC_DEFAULT_PN_LEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2340) 		if (nla_len(tb_sa[MACSEC_SA_ATTR_PN]) != pn_len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2341) 			pr_notice("macsec: nl: upd_txsa: bad pn length: %d != %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2342) 				  nla_len(tb_sa[MACSEC_SA_ATTR_PN]), pn_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2343) 			rtnl_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2344) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2345) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2346) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2347) 		spin_lock_bh(&tx_sa->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2348) 		prev_pn = tx_sa->next_pn_halves;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2349) 		tx_sa->next_pn = nla_get_u64(tb_sa[MACSEC_SA_ATTR_PN]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2350) 		spin_unlock_bh(&tx_sa->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2351) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2352) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2353) 	was_active = tx_sa->active;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2354) 	if (tb_sa[MACSEC_SA_ATTR_ACTIVE])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2355) 		tx_sa->active = nla_get_u8(tb_sa[MACSEC_SA_ATTR_ACTIVE]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2356) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2357) 	was_operational = secy->operational;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2358) 	if (assoc_num == tx_sc->encoding_sa)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2359) 		secy->operational = tx_sa->active;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2360) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2361) 	/* If h/w offloading is available, propagate to the device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2362) 	if (macsec_is_offloaded(netdev_priv(dev))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2363) 		const struct macsec_ops *ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2364) 		struct macsec_context ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2365) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2366) 		ops = macsec_get_ops(netdev_priv(dev), &ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2367) 		if (!ops) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2368) 			ret = -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2369) 			goto cleanup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2370) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2371) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2372) 		ctx.sa.assoc_num = assoc_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2373) 		ctx.sa.tx_sa = tx_sa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2374) 		ctx.secy = secy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2375) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2376) 		ret = macsec_offload(ops->mdo_upd_txsa, &ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2377) 		if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2378) 			goto cleanup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2379) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2380) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2381) 	rtnl_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2382) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2383) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2384) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2385) cleanup:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2386) 	if (tb_sa[MACSEC_SA_ATTR_PN]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2387) 		spin_lock_bh(&tx_sa->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2388) 		tx_sa->next_pn_halves = prev_pn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2389) 		spin_unlock_bh(&tx_sa->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2390) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2391) 	tx_sa->active = was_active;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2392) 	secy->operational = was_operational;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2393) 	rtnl_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2394) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2395) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2396) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2397) static int macsec_upd_rxsa(struct sk_buff *skb, struct genl_info *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2398) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2399) 	struct nlattr **attrs = info->attrs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2400) 	struct net_device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2401) 	struct macsec_secy *secy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2402) 	struct macsec_rx_sc *rx_sc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2403) 	struct macsec_rx_sa *rx_sa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2404) 	u8 assoc_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2405) 	struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2406) 	struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2407) 	bool was_active;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2408) 	pn_t prev_pn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2409) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2410) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2411) 	prev_pn.full64 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2412) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2413) 	if (!attrs[MACSEC_ATTR_IFINDEX])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2414) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2415) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2416) 	if (parse_rxsc_config(attrs, tb_rxsc))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2417) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2418) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2419) 	if (parse_sa_config(attrs, tb_sa))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2420) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2421) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2422) 	if (!validate_upd_sa(tb_sa))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2423) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2424) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2425) 	rtnl_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2426) 	rx_sa = get_rxsa_from_nl(genl_info_net(info), attrs, tb_rxsc, tb_sa,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2427) 				 &dev, &secy, &rx_sc, &assoc_num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2428) 	if (IS_ERR(rx_sa)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2429) 		rtnl_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2430) 		return PTR_ERR(rx_sa);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2431) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2432) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2433) 	if (tb_sa[MACSEC_SA_ATTR_PN]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2434) 		int pn_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2435) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2436) 		pn_len = secy->xpn ? MACSEC_XPN_PN_LEN : MACSEC_DEFAULT_PN_LEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2437) 		if (nla_len(tb_sa[MACSEC_SA_ATTR_PN]) != pn_len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2438) 			pr_notice("macsec: nl: upd_rxsa: bad pn length: %d != %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2439) 				  nla_len(tb_sa[MACSEC_SA_ATTR_PN]), pn_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2440) 			rtnl_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2441) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2442) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2443) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2444) 		spin_lock_bh(&rx_sa->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2445) 		prev_pn = rx_sa->next_pn_halves;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2446) 		rx_sa->next_pn = nla_get_u64(tb_sa[MACSEC_SA_ATTR_PN]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2447) 		spin_unlock_bh(&rx_sa->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2448) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2449) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2450) 	was_active = rx_sa->active;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2451) 	if (tb_sa[MACSEC_SA_ATTR_ACTIVE])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2452) 		rx_sa->active = nla_get_u8(tb_sa[MACSEC_SA_ATTR_ACTIVE]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2453) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2454) 	/* If h/w offloading is available, propagate to the device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2455) 	if (macsec_is_offloaded(netdev_priv(dev))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2456) 		const struct macsec_ops *ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2457) 		struct macsec_context ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2458) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2459) 		ops = macsec_get_ops(netdev_priv(dev), &ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2460) 		if (!ops) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2461) 			ret = -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2462) 			goto cleanup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2463) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2464) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2465) 		ctx.sa.assoc_num = assoc_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2466) 		ctx.sa.rx_sa = rx_sa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2467) 		ctx.secy = secy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2468) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2469) 		ret = macsec_offload(ops->mdo_upd_rxsa, &ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2470) 		if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2471) 			goto cleanup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2472) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2473) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2474) 	rtnl_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2475) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2476) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2477) cleanup:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2478) 	if (tb_sa[MACSEC_SA_ATTR_PN]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2479) 		spin_lock_bh(&rx_sa->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2480) 		rx_sa->next_pn_halves = prev_pn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2481) 		spin_unlock_bh(&rx_sa->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2482) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2483) 	rx_sa->active = was_active;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2484) 	rtnl_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2485) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2486) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2487) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2488) static int macsec_upd_rxsc(struct sk_buff *skb, struct genl_info *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2489) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2490) 	struct nlattr **attrs = info->attrs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2491) 	struct net_device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2492) 	struct macsec_secy *secy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2493) 	struct macsec_rx_sc *rx_sc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2494) 	struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2495) 	unsigned int prev_n_rx_sc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2496) 	bool was_active;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2497) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2498) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2499) 	if (!attrs[MACSEC_ATTR_IFINDEX])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2500) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2501) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2502) 	if (parse_rxsc_config(attrs, tb_rxsc))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2503) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2504) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2505) 	if (!validate_add_rxsc(tb_rxsc))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2506) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2507) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2508) 	rtnl_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2509) 	rx_sc = get_rxsc_from_nl(genl_info_net(info), attrs, tb_rxsc, &dev, &secy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2510) 	if (IS_ERR(rx_sc)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2511) 		rtnl_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2512) 		return PTR_ERR(rx_sc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2513) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2514) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2515) 	was_active = rx_sc->active;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2516) 	prev_n_rx_sc = secy->n_rx_sc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2517) 	if (tb_rxsc[MACSEC_RXSC_ATTR_ACTIVE]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2518) 		bool new = !!nla_get_u8(tb_rxsc[MACSEC_RXSC_ATTR_ACTIVE]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2519) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2520) 		if (rx_sc->active != new)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2521) 			secy->n_rx_sc += new ? 1 : -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2522) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2523) 		rx_sc->active = new;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2524) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2525) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2526) 	/* If h/w offloading is available, propagate to the device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2527) 	if (macsec_is_offloaded(netdev_priv(dev))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2528) 		const struct macsec_ops *ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2529) 		struct macsec_context ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2530) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2531) 		ops = macsec_get_ops(netdev_priv(dev), &ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2532) 		if (!ops) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2533) 			ret = -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2534) 			goto cleanup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2535) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2536) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2537) 		ctx.rx_sc = rx_sc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2538) 		ctx.secy = secy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2539) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2540) 		ret = macsec_offload(ops->mdo_upd_rxsc, &ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2541) 		if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2542) 			goto cleanup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2543) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2544) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2545) 	rtnl_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2546) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2547) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2548) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2549) cleanup:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2550) 	secy->n_rx_sc = prev_n_rx_sc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2551) 	rx_sc->active = was_active;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2552) 	rtnl_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2553) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2554) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2555) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2556) static bool macsec_is_configured(struct macsec_dev *macsec)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2557) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2558) 	struct macsec_secy *secy = &macsec->secy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2559) 	struct macsec_tx_sc *tx_sc = &secy->tx_sc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2560) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2561) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2562) 	if (secy->n_rx_sc > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2563) 		return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2564) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2565) 	for (i = 0; i < MACSEC_NUM_AN; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2566) 		if (tx_sc->sa[i])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2567) 			return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2568) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2569) 	return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2570) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2571) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2572) static int macsec_upd_offload(struct sk_buff *skb, struct genl_info *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2573) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2574) 	struct nlattr *tb_offload[MACSEC_OFFLOAD_ATTR_MAX + 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2575) 	enum macsec_offload offload, prev_offload;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2576) 	int (*func)(struct macsec_context *ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2577) 	struct nlattr **attrs = info->attrs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2578) 	struct net_device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2579) 	const struct macsec_ops *ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2580) 	struct macsec_context ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2581) 	struct macsec_dev *macsec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2582) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2583) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2584) 	if (!attrs[MACSEC_ATTR_IFINDEX])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2585) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2586) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2587) 	if (!attrs[MACSEC_ATTR_OFFLOAD])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2588) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2589) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2590) 	if (nla_parse_nested_deprecated(tb_offload, MACSEC_OFFLOAD_ATTR_MAX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2591) 					attrs[MACSEC_ATTR_OFFLOAD],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2592) 					macsec_genl_offload_policy, NULL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2593) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2594) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2595) 	dev = get_dev_from_nl(genl_info_net(info), attrs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2596) 	if (IS_ERR(dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2597) 		return PTR_ERR(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2598) 	macsec = macsec_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2599) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2600) 	if (!tb_offload[MACSEC_OFFLOAD_ATTR_TYPE])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2601) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2602) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2603) 	offload = nla_get_u8(tb_offload[MACSEC_OFFLOAD_ATTR_TYPE]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2604) 	if (macsec->offload == offload)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2605) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2606) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2607) 	/* Check if the offloading mode is supported by the underlying layers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2608) 	if (offload != MACSEC_OFFLOAD_OFF &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2609) 	    !macsec_check_offload(offload, macsec))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2610) 		return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2611) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2612) 	/* Check if the net device is busy. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2613) 	if (netif_running(dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2614) 		return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2615) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2616) 	rtnl_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2617) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2618) 	prev_offload = macsec->offload;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2619) 	macsec->offload = offload;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2620) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2621) 	/* Check if the device already has rules configured: we do not support
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2622) 	 * rules migration.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2623) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2624) 	if (macsec_is_configured(macsec)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2625) 		ret = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2626) 		goto rollback;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2627) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2628) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2629) 	ops = __macsec_get_ops(offload == MACSEC_OFFLOAD_OFF ? prev_offload : offload,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2630) 			       macsec, &ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2631) 	if (!ops) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2632) 		ret = -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2633) 		goto rollback;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2634) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2635) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2636) 	if (prev_offload == MACSEC_OFFLOAD_OFF)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2637) 		func = ops->mdo_add_secy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2638) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2639) 		func = ops->mdo_del_secy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2640) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2641) 	ctx.secy = &macsec->secy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2642) 	ret = macsec_offload(func, &ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2643) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2644) 		goto rollback;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2645) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2646) 	/* Force features update, since they are different for SW MACSec and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2647) 	 * HW offloading cases.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2648) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2649) 	netdev_update_features(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2650) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2651) 	rtnl_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2652) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2653) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2654) rollback:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2655) 	macsec->offload = prev_offload;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2656) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2657) 	rtnl_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2658) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2659) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2660) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2661) static void get_tx_sa_stats(struct net_device *dev, int an,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2662) 			    struct macsec_tx_sa *tx_sa,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2663) 			    struct macsec_tx_sa_stats *sum)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2664) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2665) 	struct macsec_dev *macsec = macsec_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2666) 	int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2667) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2668) 	/* If h/w offloading is available, propagate to the device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2669) 	if (macsec_is_offloaded(macsec)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2670) 		const struct macsec_ops *ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2671) 		struct macsec_context ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2672) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2673) 		ops = macsec_get_ops(macsec, &ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2674) 		if (ops) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2675) 			ctx.sa.assoc_num = an;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2676) 			ctx.sa.tx_sa = tx_sa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2677) 			ctx.stats.tx_sa_stats = sum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2678) 			ctx.secy = &macsec_priv(dev)->secy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2679) 			macsec_offload(ops->mdo_get_tx_sa_stats, &ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2680) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2681) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2682) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2683) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2684) 	for_each_possible_cpu(cpu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2685) 		const struct macsec_tx_sa_stats *stats =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2686) 			per_cpu_ptr(tx_sa->stats, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2687) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2688) 		sum->OutPktsProtected += stats->OutPktsProtected;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2689) 		sum->OutPktsEncrypted += stats->OutPktsEncrypted;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2690) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2691) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2692) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2693) static int copy_tx_sa_stats(struct sk_buff *skb, struct macsec_tx_sa_stats *sum)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2694) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2695) 	if (nla_put_u32(skb, MACSEC_SA_STATS_ATTR_OUT_PKTS_PROTECTED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2696) 			sum->OutPktsProtected) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2697) 	    nla_put_u32(skb, MACSEC_SA_STATS_ATTR_OUT_PKTS_ENCRYPTED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2698) 			sum->OutPktsEncrypted))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2699) 		return -EMSGSIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2700) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2701) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2702) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2703) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2704) static void get_rx_sa_stats(struct net_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2705) 			    struct macsec_rx_sc *rx_sc, int an,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2706) 			    struct macsec_rx_sa *rx_sa,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2707) 			    struct macsec_rx_sa_stats *sum)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2708) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2709) 	struct macsec_dev *macsec = macsec_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2710) 	int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2711) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2712) 	/* If h/w offloading is available, propagate to the device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2713) 	if (macsec_is_offloaded(macsec)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2714) 		const struct macsec_ops *ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2715) 		struct macsec_context ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2716) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2717) 		ops = macsec_get_ops(macsec, &ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2718) 		if (ops) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2719) 			ctx.sa.assoc_num = an;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2720) 			ctx.sa.rx_sa = rx_sa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2721) 			ctx.stats.rx_sa_stats = sum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2722) 			ctx.secy = &macsec_priv(dev)->secy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2723) 			ctx.rx_sc = rx_sc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2724) 			macsec_offload(ops->mdo_get_rx_sa_stats, &ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2725) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2726) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2727) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2728) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2729) 	for_each_possible_cpu(cpu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2730) 		const struct macsec_rx_sa_stats *stats =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2731) 			per_cpu_ptr(rx_sa->stats, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2732) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2733) 		sum->InPktsOK         += stats->InPktsOK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2734) 		sum->InPktsInvalid    += stats->InPktsInvalid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2735) 		sum->InPktsNotValid   += stats->InPktsNotValid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2736) 		sum->InPktsNotUsingSA += stats->InPktsNotUsingSA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2737) 		sum->InPktsUnusedSA   += stats->InPktsUnusedSA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2738) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2739) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2740) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2741) static int copy_rx_sa_stats(struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2742) 			    struct macsec_rx_sa_stats *sum)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2743) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2744) 	if (nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_OK, sum->InPktsOK) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2745) 	    nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_INVALID,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2746) 			sum->InPktsInvalid) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2747) 	    nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_NOT_VALID,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2748) 			sum->InPktsNotValid) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2749) 	    nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_NOT_USING_SA,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2750) 			sum->InPktsNotUsingSA) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2751) 	    nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_UNUSED_SA,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2752) 			sum->InPktsUnusedSA))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2753) 		return -EMSGSIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2754) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2755) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2756) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2757) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2758) static void get_rx_sc_stats(struct net_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2759) 			    struct macsec_rx_sc *rx_sc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2760) 			    struct macsec_rx_sc_stats *sum)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2761) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2762) 	struct macsec_dev *macsec = macsec_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2763) 	int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2764) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2765) 	/* If h/w offloading is available, propagate to the device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2766) 	if (macsec_is_offloaded(macsec)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2767) 		const struct macsec_ops *ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2768) 		struct macsec_context ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2769) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2770) 		ops = macsec_get_ops(macsec, &ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2771) 		if (ops) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2772) 			ctx.stats.rx_sc_stats = sum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2773) 			ctx.secy = &macsec_priv(dev)->secy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2774) 			ctx.rx_sc = rx_sc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2775) 			macsec_offload(ops->mdo_get_rx_sc_stats, &ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2776) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2777) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2778) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2779) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2780) 	for_each_possible_cpu(cpu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2781) 		const struct pcpu_rx_sc_stats *stats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2782) 		struct macsec_rx_sc_stats tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2783) 		unsigned int start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2784) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2785) 		stats = per_cpu_ptr(rx_sc->stats, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2786) 		do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2787) 			start = u64_stats_fetch_begin_irq(&stats->syncp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2788) 			memcpy(&tmp, &stats->stats, sizeof(tmp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2789) 		} while (u64_stats_fetch_retry_irq(&stats->syncp, start));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2790) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2791) 		sum->InOctetsValidated += tmp.InOctetsValidated;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2792) 		sum->InOctetsDecrypted += tmp.InOctetsDecrypted;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2793) 		sum->InPktsUnchecked   += tmp.InPktsUnchecked;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2794) 		sum->InPktsDelayed     += tmp.InPktsDelayed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2795) 		sum->InPktsOK          += tmp.InPktsOK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2796) 		sum->InPktsInvalid     += tmp.InPktsInvalid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2797) 		sum->InPktsLate        += tmp.InPktsLate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2798) 		sum->InPktsNotValid    += tmp.InPktsNotValid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2799) 		sum->InPktsNotUsingSA  += tmp.InPktsNotUsingSA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2800) 		sum->InPktsUnusedSA    += tmp.InPktsUnusedSA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2801) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2802) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2803) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2804) static int copy_rx_sc_stats(struct sk_buff *skb, struct macsec_rx_sc_stats *sum)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2805) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2806) 	if (nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_OCTETS_VALIDATED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2807) 			      sum->InOctetsValidated,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2808) 			      MACSEC_RXSC_STATS_ATTR_PAD) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2809) 	    nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_OCTETS_DECRYPTED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2810) 			      sum->InOctetsDecrypted,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2811) 			      MACSEC_RXSC_STATS_ATTR_PAD) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2812) 	    nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_UNCHECKED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2813) 			      sum->InPktsUnchecked,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2814) 			      MACSEC_RXSC_STATS_ATTR_PAD) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2815) 	    nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_DELAYED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2816) 			      sum->InPktsDelayed,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2817) 			      MACSEC_RXSC_STATS_ATTR_PAD) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2818) 	    nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_OK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2819) 			      sum->InPktsOK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2820) 			      MACSEC_RXSC_STATS_ATTR_PAD) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2821) 	    nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_INVALID,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2822) 			      sum->InPktsInvalid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2823) 			      MACSEC_RXSC_STATS_ATTR_PAD) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2824) 	    nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_LATE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2825) 			      sum->InPktsLate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2826) 			      MACSEC_RXSC_STATS_ATTR_PAD) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2827) 	    nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_NOT_VALID,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2828) 			      sum->InPktsNotValid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2829) 			      MACSEC_RXSC_STATS_ATTR_PAD) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2830) 	    nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_NOT_USING_SA,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2831) 			      sum->InPktsNotUsingSA,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2832) 			      MACSEC_RXSC_STATS_ATTR_PAD) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2833) 	    nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_UNUSED_SA,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2834) 			      sum->InPktsUnusedSA,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2835) 			      MACSEC_RXSC_STATS_ATTR_PAD))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2836) 		return -EMSGSIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2837) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2838) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2839) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2840) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2841) static void get_tx_sc_stats(struct net_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2842) 			    struct macsec_tx_sc_stats *sum)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2843) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2844) 	struct macsec_dev *macsec = macsec_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2845) 	int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2846) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2847) 	/* If h/w offloading is available, propagate to the device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2848) 	if (macsec_is_offloaded(macsec)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2849) 		const struct macsec_ops *ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2850) 		struct macsec_context ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2851) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2852) 		ops = macsec_get_ops(macsec, &ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2853) 		if (ops) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2854) 			ctx.stats.tx_sc_stats = sum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2855) 			ctx.secy = &macsec_priv(dev)->secy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2856) 			macsec_offload(ops->mdo_get_tx_sc_stats, &ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2857) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2858) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2859) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2860) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2861) 	for_each_possible_cpu(cpu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2862) 		const struct pcpu_tx_sc_stats *stats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2863) 		struct macsec_tx_sc_stats tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2864) 		unsigned int start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2865) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2866) 		stats = per_cpu_ptr(macsec_priv(dev)->secy.tx_sc.stats, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2867) 		do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2868) 			start = u64_stats_fetch_begin_irq(&stats->syncp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2869) 			memcpy(&tmp, &stats->stats, sizeof(tmp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2870) 		} while (u64_stats_fetch_retry_irq(&stats->syncp, start));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2871) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2872) 		sum->OutPktsProtected   += tmp.OutPktsProtected;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2873) 		sum->OutPktsEncrypted   += tmp.OutPktsEncrypted;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2874) 		sum->OutOctetsProtected += tmp.OutOctetsProtected;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2875) 		sum->OutOctetsEncrypted += tmp.OutOctetsEncrypted;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2876) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2877) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2878) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2879) static int copy_tx_sc_stats(struct sk_buff *skb, struct macsec_tx_sc_stats *sum)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2880) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2881) 	if (nla_put_u64_64bit(skb, MACSEC_TXSC_STATS_ATTR_OUT_PKTS_PROTECTED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2882) 			      sum->OutPktsProtected,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2883) 			      MACSEC_TXSC_STATS_ATTR_PAD) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2884) 	    nla_put_u64_64bit(skb, MACSEC_TXSC_STATS_ATTR_OUT_PKTS_ENCRYPTED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2885) 			      sum->OutPktsEncrypted,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2886) 			      MACSEC_TXSC_STATS_ATTR_PAD) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2887) 	    nla_put_u64_64bit(skb, MACSEC_TXSC_STATS_ATTR_OUT_OCTETS_PROTECTED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2888) 			      sum->OutOctetsProtected,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2889) 			      MACSEC_TXSC_STATS_ATTR_PAD) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2890) 	    nla_put_u64_64bit(skb, MACSEC_TXSC_STATS_ATTR_OUT_OCTETS_ENCRYPTED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2891) 			      sum->OutOctetsEncrypted,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2892) 			      MACSEC_TXSC_STATS_ATTR_PAD))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2893) 		return -EMSGSIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2894) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2895) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2896) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2897) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2898) static void get_secy_stats(struct net_device *dev, struct macsec_dev_stats *sum)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2899) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2900) 	struct macsec_dev *macsec = macsec_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2901) 	int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2902) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2903) 	/* If h/w offloading is available, propagate to the device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2904) 	if (macsec_is_offloaded(macsec)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2905) 		const struct macsec_ops *ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2906) 		struct macsec_context ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2907) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2908) 		ops = macsec_get_ops(macsec, &ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2909) 		if (ops) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2910) 			ctx.stats.dev_stats = sum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2911) 			ctx.secy = &macsec_priv(dev)->secy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2912) 			macsec_offload(ops->mdo_get_dev_stats, &ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2913) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2914) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2915) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2916) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2917) 	for_each_possible_cpu(cpu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2918) 		const struct pcpu_secy_stats *stats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2919) 		struct macsec_dev_stats tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2920) 		unsigned int start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2921) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2922) 		stats = per_cpu_ptr(macsec_priv(dev)->stats, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2923) 		do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2924) 			start = u64_stats_fetch_begin_irq(&stats->syncp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2925) 			memcpy(&tmp, &stats->stats, sizeof(tmp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2926) 		} while (u64_stats_fetch_retry_irq(&stats->syncp, start));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2927) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2928) 		sum->OutPktsUntagged  += tmp.OutPktsUntagged;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2929) 		sum->InPktsUntagged   += tmp.InPktsUntagged;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2930) 		sum->OutPktsTooLong   += tmp.OutPktsTooLong;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2931) 		sum->InPktsNoTag      += tmp.InPktsNoTag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2932) 		sum->InPktsBadTag     += tmp.InPktsBadTag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2933) 		sum->InPktsUnknownSCI += tmp.InPktsUnknownSCI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2934) 		sum->InPktsNoSCI      += tmp.InPktsNoSCI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2935) 		sum->InPktsOverrun    += tmp.InPktsOverrun;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2936) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2937) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2938) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2939) static int copy_secy_stats(struct sk_buff *skb, struct macsec_dev_stats *sum)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2940) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2941) 	if (nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_OUT_PKTS_UNTAGGED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2942) 			      sum->OutPktsUntagged,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2943) 			      MACSEC_SECY_STATS_ATTR_PAD) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2944) 	    nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_UNTAGGED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2945) 			      sum->InPktsUntagged,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2946) 			      MACSEC_SECY_STATS_ATTR_PAD) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2947) 	    nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_OUT_PKTS_TOO_LONG,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2948) 			      sum->OutPktsTooLong,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2949) 			      MACSEC_SECY_STATS_ATTR_PAD) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2950) 	    nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_NO_TAG,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2951) 			      sum->InPktsNoTag,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2952) 			      MACSEC_SECY_STATS_ATTR_PAD) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2953) 	    nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_BAD_TAG,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2954) 			      sum->InPktsBadTag,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2955) 			      MACSEC_SECY_STATS_ATTR_PAD) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2956) 	    nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_UNKNOWN_SCI,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2957) 			      sum->InPktsUnknownSCI,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2958) 			      MACSEC_SECY_STATS_ATTR_PAD) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2959) 	    nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_NO_SCI,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2960) 			      sum->InPktsNoSCI,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2961) 			      MACSEC_SECY_STATS_ATTR_PAD) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2962) 	    nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_OVERRUN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2963) 			      sum->InPktsOverrun,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2964) 			      MACSEC_SECY_STATS_ATTR_PAD))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2965) 		return -EMSGSIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2966) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2967) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2968) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2969) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2970) static int nla_put_secy(struct macsec_secy *secy, struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2971) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2972) 	struct macsec_tx_sc *tx_sc = &secy->tx_sc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2973) 	struct nlattr *secy_nest = nla_nest_start_noflag(skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2974) 							 MACSEC_ATTR_SECY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2975) 	u64 csid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2976) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2977) 	if (!secy_nest)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2978) 		return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2979) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2980) 	switch (secy->key_len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2981) 	case MACSEC_GCM_AES_128_SAK_LEN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2982) 		csid = secy->xpn ? MACSEC_CIPHER_ID_GCM_AES_XPN_128 : MACSEC_DEFAULT_CIPHER_ID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2983) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2984) 	case MACSEC_GCM_AES_256_SAK_LEN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2985) 		csid = secy->xpn ? MACSEC_CIPHER_ID_GCM_AES_XPN_256 : MACSEC_CIPHER_ID_GCM_AES_256;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2986) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2987) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2988) 		goto cancel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2989) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2990) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2991) 	if (nla_put_sci(skb, MACSEC_SECY_ATTR_SCI, secy->sci,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2992) 			MACSEC_SECY_ATTR_PAD) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2993) 	    nla_put_u64_64bit(skb, MACSEC_SECY_ATTR_CIPHER_SUITE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2994) 			      csid, MACSEC_SECY_ATTR_PAD) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2995) 	    nla_put_u8(skb, MACSEC_SECY_ATTR_ICV_LEN, secy->icv_len) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2996) 	    nla_put_u8(skb, MACSEC_SECY_ATTR_OPER, secy->operational) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2997) 	    nla_put_u8(skb, MACSEC_SECY_ATTR_PROTECT, secy->protect_frames) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2998) 	    nla_put_u8(skb, MACSEC_SECY_ATTR_REPLAY, secy->replay_protect) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2999) 	    nla_put_u8(skb, MACSEC_SECY_ATTR_VALIDATE, secy->validate_frames) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3000) 	    nla_put_u8(skb, MACSEC_SECY_ATTR_ENCRYPT, tx_sc->encrypt) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3001) 	    nla_put_u8(skb, MACSEC_SECY_ATTR_INC_SCI, tx_sc->send_sci) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3002) 	    nla_put_u8(skb, MACSEC_SECY_ATTR_ES, tx_sc->end_station) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3003) 	    nla_put_u8(skb, MACSEC_SECY_ATTR_SCB, tx_sc->scb) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3004) 	    nla_put_u8(skb, MACSEC_SECY_ATTR_ENCODING_SA, tx_sc->encoding_sa))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3005) 		goto cancel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3006) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3007) 	if (secy->replay_protect) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3008) 		if (nla_put_u32(skb, MACSEC_SECY_ATTR_WINDOW, secy->replay_window))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3009) 			goto cancel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3010) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3011) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3012) 	nla_nest_end(skb, secy_nest);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3013) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3014) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3015) cancel:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3016) 	nla_nest_cancel(skb, secy_nest);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3017) 	return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3018) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3019) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3020) static noinline_for_stack int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3021) dump_secy(struct macsec_secy *secy, struct net_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3022) 	  struct sk_buff *skb, struct netlink_callback *cb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3023) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3024) 	struct macsec_tx_sc_stats tx_sc_stats = {0, };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3025) 	struct macsec_tx_sa_stats tx_sa_stats = {0, };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3026) 	struct macsec_rx_sc_stats rx_sc_stats = {0, };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3027) 	struct macsec_rx_sa_stats rx_sa_stats = {0, };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3028) 	struct macsec_dev *macsec = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3029) 	struct macsec_dev_stats dev_stats = {0, };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3030) 	struct macsec_tx_sc *tx_sc = &secy->tx_sc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3031) 	struct nlattr *txsa_list, *rxsc_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3032) 	struct macsec_rx_sc *rx_sc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3033) 	struct nlattr *attr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3034) 	void *hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3035) 	int i, j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3036) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3037) 	hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3038) 			  &macsec_fam, NLM_F_MULTI, MACSEC_CMD_GET_TXSC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3039) 	if (!hdr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3040) 		return -EMSGSIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3041) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3042) 	genl_dump_check_consistent(cb, hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3043) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3044) 	if (nla_put_u32(skb, MACSEC_ATTR_IFINDEX, dev->ifindex))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3045) 		goto nla_put_failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3046) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3047) 	attr = nla_nest_start_noflag(skb, MACSEC_ATTR_OFFLOAD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3048) 	if (!attr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3049) 		goto nla_put_failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3050) 	if (nla_put_u8(skb, MACSEC_OFFLOAD_ATTR_TYPE, macsec->offload))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3051) 		goto nla_put_failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3052) 	nla_nest_end(skb, attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3053) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3054) 	if (nla_put_secy(secy, skb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3055) 		goto nla_put_failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3056) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3057) 	attr = nla_nest_start_noflag(skb, MACSEC_ATTR_TXSC_STATS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3058) 	if (!attr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3059) 		goto nla_put_failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3060) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3061) 	get_tx_sc_stats(dev, &tx_sc_stats);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3062) 	if (copy_tx_sc_stats(skb, &tx_sc_stats)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3063) 		nla_nest_cancel(skb, attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3064) 		goto nla_put_failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3065) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3066) 	nla_nest_end(skb, attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3067) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3068) 	attr = nla_nest_start_noflag(skb, MACSEC_ATTR_SECY_STATS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3069) 	if (!attr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3070) 		goto nla_put_failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3071) 	get_secy_stats(dev, &dev_stats);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3072) 	if (copy_secy_stats(skb, &dev_stats)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3073) 		nla_nest_cancel(skb, attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3074) 		goto nla_put_failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3075) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3076) 	nla_nest_end(skb, attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3077) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3078) 	txsa_list = nla_nest_start_noflag(skb, MACSEC_ATTR_TXSA_LIST);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3079) 	if (!txsa_list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3080) 		goto nla_put_failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3081) 	for (i = 0, j = 1; i < MACSEC_NUM_AN; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3082) 		struct macsec_tx_sa *tx_sa = rtnl_dereference(tx_sc->sa[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3083) 		struct nlattr *txsa_nest;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3084) 		u64 pn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3085) 		int pn_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3086) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3087) 		if (!tx_sa)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3088) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3089) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3090) 		txsa_nest = nla_nest_start_noflag(skb, j++);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3091) 		if (!txsa_nest) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3092) 			nla_nest_cancel(skb, txsa_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3093) 			goto nla_put_failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3094) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3095) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3096) 		attr = nla_nest_start_noflag(skb, MACSEC_SA_ATTR_STATS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3097) 		if (!attr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3098) 			nla_nest_cancel(skb, txsa_nest);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3099) 			nla_nest_cancel(skb, txsa_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3100) 			goto nla_put_failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3101) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3102) 		memset(&tx_sa_stats, 0, sizeof(tx_sa_stats));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3103) 		get_tx_sa_stats(dev, i, tx_sa, &tx_sa_stats);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3104) 		if (copy_tx_sa_stats(skb, &tx_sa_stats)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3105) 			nla_nest_cancel(skb, attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3106) 			nla_nest_cancel(skb, txsa_nest);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3107) 			nla_nest_cancel(skb, txsa_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3108) 			goto nla_put_failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3109) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3110) 		nla_nest_end(skb, attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3111) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3112) 		if (secy->xpn) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3113) 			pn = tx_sa->next_pn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3114) 			pn_len = MACSEC_XPN_PN_LEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3115) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3116) 			pn = tx_sa->next_pn_halves.lower;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3117) 			pn_len = MACSEC_DEFAULT_PN_LEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3118) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3119) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3120) 		if (nla_put_u8(skb, MACSEC_SA_ATTR_AN, i) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3121) 		    nla_put(skb, MACSEC_SA_ATTR_PN, pn_len, &pn) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3122) 		    nla_put(skb, MACSEC_SA_ATTR_KEYID, MACSEC_KEYID_LEN, tx_sa->key.id) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3123) 		    (secy->xpn && nla_put_ssci(skb, MACSEC_SA_ATTR_SSCI, tx_sa->ssci)) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3124) 		    nla_put_u8(skb, MACSEC_SA_ATTR_ACTIVE, tx_sa->active)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3125) 			nla_nest_cancel(skb, txsa_nest);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3126) 			nla_nest_cancel(skb, txsa_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3127) 			goto nla_put_failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3128) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3129) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3130) 		nla_nest_end(skb, txsa_nest);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3131) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3132) 	nla_nest_end(skb, txsa_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3133) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3134) 	rxsc_list = nla_nest_start_noflag(skb, MACSEC_ATTR_RXSC_LIST);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3135) 	if (!rxsc_list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3136) 		goto nla_put_failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3137) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3138) 	j = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3139) 	for_each_rxsc_rtnl(secy, rx_sc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3140) 		int k;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3141) 		struct nlattr *rxsa_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3142) 		struct nlattr *rxsc_nest = nla_nest_start_noflag(skb, j++);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3143) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3144) 		if (!rxsc_nest) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3145) 			nla_nest_cancel(skb, rxsc_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3146) 			goto nla_put_failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3147) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3148) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3149) 		if (nla_put_u8(skb, MACSEC_RXSC_ATTR_ACTIVE, rx_sc->active) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3150) 		    nla_put_sci(skb, MACSEC_RXSC_ATTR_SCI, rx_sc->sci,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3151) 				MACSEC_RXSC_ATTR_PAD)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3152) 			nla_nest_cancel(skb, rxsc_nest);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3153) 			nla_nest_cancel(skb, rxsc_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3154) 			goto nla_put_failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3155) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3156) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3157) 		attr = nla_nest_start_noflag(skb, MACSEC_RXSC_ATTR_STATS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3158) 		if (!attr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3159) 			nla_nest_cancel(skb, rxsc_nest);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3160) 			nla_nest_cancel(skb, rxsc_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3161) 			goto nla_put_failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3162) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3163) 		memset(&rx_sc_stats, 0, sizeof(rx_sc_stats));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3164) 		get_rx_sc_stats(dev, rx_sc, &rx_sc_stats);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3165) 		if (copy_rx_sc_stats(skb, &rx_sc_stats)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3166) 			nla_nest_cancel(skb, attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3167) 			nla_nest_cancel(skb, rxsc_nest);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3168) 			nla_nest_cancel(skb, rxsc_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3169) 			goto nla_put_failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3170) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3171) 		nla_nest_end(skb, attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3172) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3173) 		rxsa_list = nla_nest_start_noflag(skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3174) 						  MACSEC_RXSC_ATTR_SA_LIST);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3175) 		if (!rxsa_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3176) 			nla_nest_cancel(skb, rxsc_nest);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3177) 			nla_nest_cancel(skb, rxsc_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3178) 			goto nla_put_failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3179) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3180) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3181) 		for (i = 0, k = 1; i < MACSEC_NUM_AN; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3182) 			struct macsec_rx_sa *rx_sa = rtnl_dereference(rx_sc->sa[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3183) 			struct nlattr *rxsa_nest;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3184) 			u64 pn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3185) 			int pn_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3186) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3187) 			if (!rx_sa)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3188) 				continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3189) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3190) 			rxsa_nest = nla_nest_start_noflag(skb, k++);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3191) 			if (!rxsa_nest) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3192) 				nla_nest_cancel(skb, rxsa_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3193) 				nla_nest_cancel(skb, rxsc_nest);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3194) 				nla_nest_cancel(skb, rxsc_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3195) 				goto nla_put_failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3196) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3197) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3198) 			attr = nla_nest_start_noflag(skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3199) 						     MACSEC_SA_ATTR_STATS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3200) 			if (!attr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3201) 				nla_nest_cancel(skb, rxsa_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3202) 				nla_nest_cancel(skb, rxsc_nest);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3203) 				nla_nest_cancel(skb, rxsc_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3204) 				goto nla_put_failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3205) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3206) 			memset(&rx_sa_stats, 0, sizeof(rx_sa_stats));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3207) 			get_rx_sa_stats(dev, rx_sc, i, rx_sa, &rx_sa_stats);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3208) 			if (copy_rx_sa_stats(skb, &rx_sa_stats)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3209) 				nla_nest_cancel(skb, attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3210) 				nla_nest_cancel(skb, rxsa_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3211) 				nla_nest_cancel(skb, rxsc_nest);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3212) 				nla_nest_cancel(skb, rxsc_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3213) 				goto nla_put_failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3214) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3215) 			nla_nest_end(skb, attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3216) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3217) 			if (secy->xpn) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3218) 				pn = rx_sa->next_pn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3219) 				pn_len = MACSEC_XPN_PN_LEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3220) 			} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3221) 				pn = rx_sa->next_pn_halves.lower;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3222) 				pn_len = MACSEC_DEFAULT_PN_LEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3223) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3224) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3225) 			if (nla_put_u8(skb, MACSEC_SA_ATTR_AN, i) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3226) 			    nla_put(skb, MACSEC_SA_ATTR_PN, pn_len, &pn) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3227) 			    nla_put(skb, MACSEC_SA_ATTR_KEYID, MACSEC_KEYID_LEN, rx_sa->key.id) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3228) 			    (secy->xpn && nla_put_ssci(skb, MACSEC_SA_ATTR_SSCI, rx_sa->ssci)) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3229) 			    nla_put_u8(skb, MACSEC_SA_ATTR_ACTIVE, rx_sa->active)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3230) 				nla_nest_cancel(skb, rxsa_nest);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3231) 				nla_nest_cancel(skb, rxsc_nest);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3232) 				nla_nest_cancel(skb, rxsc_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3233) 				goto nla_put_failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3234) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3235) 			nla_nest_end(skb, rxsa_nest);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3236) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3237) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3238) 		nla_nest_end(skb, rxsa_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3239) 		nla_nest_end(skb, rxsc_nest);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3240) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3241) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3242) 	nla_nest_end(skb, rxsc_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3243) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3244) 	genlmsg_end(skb, hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3245) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3246) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3247) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3248) nla_put_failure:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3249) 	genlmsg_cancel(skb, hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3250) 	return -EMSGSIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3251) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3252) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3253) static int macsec_generation = 1; /* protected by RTNL */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3254) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3255) static int macsec_dump_txsc(struct sk_buff *skb, struct netlink_callback *cb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3256) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3257) 	struct net *net = sock_net(skb->sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3258) 	struct net_device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3259) 	int dev_idx, d;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3260) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3261) 	dev_idx = cb->args[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3262) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3263) 	d = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3264) 	rtnl_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3265) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3266) 	cb->seq = macsec_generation;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3267) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3268) 	for_each_netdev(net, dev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3269) 		struct macsec_secy *secy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3270) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3271) 		if (d < dev_idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3272) 			goto next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3273) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3274) 		if (!netif_is_macsec(dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3275) 			goto next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3276) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3277) 		secy = &macsec_priv(dev)->secy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3278) 		if (dump_secy(secy, dev, skb, cb) < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3279) 			goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3280) next:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3281) 		d++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3282) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3283) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3284) done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3285) 	rtnl_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3286) 	cb->args[0] = d;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3287) 	return skb->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3288) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3289) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3290) static const struct genl_small_ops macsec_genl_ops[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3291) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3292) 		.cmd = MACSEC_CMD_GET_TXSC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3293) 		.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3294) 		.dumpit = macsec_dump_txsc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3295) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3296) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3297) 		.cmd = MACSEC_CMD_ADD_RXSC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3298) 		.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3299) 		.doit = macsec_add_rxsc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3300) 		.flags = GENL_ADMIN_PERM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3301) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3302) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3303) 		.cmd = MACSEC_CMD_DEL_RXSC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3304) 		.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3305) 		.doit = macsec_del_rxsc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3306) 		.flags = GENL_ADMIN_PERM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3307) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3308) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3309) 		.cmd = MACSEC_CMD_UPD_RXSC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3310) 		.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3311) 		.doit = macsec_upd_rxsc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3312) 		.flags = GENL_ADMIN_PERM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3313) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3314) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3315) 		.cmd = MACSEC_CMD_ADD_TXSA,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3316) 		.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3317) 		.doit = macsec_add_txsa,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3318) 		.flags = GENL_ADMIN_PERM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3319) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3320) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3321) 		.cmd = MACSEC_CMD_DEL_TXSA,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3322) 		.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3323) 		.doit = macsec_del_txsa,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3324) 		.flags = GENL_ADMIN_PERM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3325) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3326) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3327) 		.cmd = MACSEC_CMD_UPD_TXSA,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3328) 		.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3329) 		.doit = macsec_upd_txsa,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3330) 		.flags = GENL_ADMIN_PERM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3331) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3332) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3333) 		.cmd = MACSEC_CMD_ADD_RXSA,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3334) 		.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3335) 		.doit = macsec_add_rxsa,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3336) 		.flags = GENL_ADMIN_PERM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3337) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3338) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3339) 		.cmd = MACSEC_CMD_DEL_RXSA,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3340) 		.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3341) 		.doit = macsec_del_rxsa,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3342) 		.flags = GENL_ADMIN_PERM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3343) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3344) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3345) 		.cmd = MACSEC_CMD_UPD_RXSA,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3346) 		.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3347) 		.doit = macsec_upd_rxsa,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3348) 		.flags = GENL_ADMIN_PERM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3349) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3350) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3351) 		.cmd = MACSEC_CMD_UPD_OFFLOAD,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3352) 		.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3353) 		.doit = macsec_upd_offload,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3354) 		.flags = GENL_ADMIN_PERM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3355) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3356) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3357) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3358) static struct genl_family macsec_fam __ro_after_init = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3359) 	.name		= MACSEC_GENL_NAME,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3360) 	.hdrsize	= 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3361) 	.version	= MACSEC_GENL_VERSION,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3362) 	.maxattr	= MACSEC_ATTR_MAX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3363) 	.policy = macsec_genl_policy,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3364) 	.netnsok	= true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3365) 	.module		= THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3366) 	.small_ops	= macsec_genl_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3367) 	.n_small_ops	= ARRAY_SIZE(macsec_genl_ops),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3368) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3369) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3370) static netdev_tx_t macsec_start_xmit(struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3371) 				     struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3372) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3373) 	struct macsec_dev *macsec = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3374) 	struct macsec_secy *secy = &macsec->secy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3375) 	struct pcpu_secy_stats *secy_stats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3376) 	int ret, len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3377) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3378) 	if (macsec_is_offloaded(netdev_priv(dev))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3379) 		skb->dev = macsec->real_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3380) 		return dev_queue_xmit(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3381) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3382) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3383) 	/* 10.5 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3384) 	if (!secy->protect_frames) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3385) 		secy_stats = this_cpu_ptr(macsec->stats);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3386) 		u64_stats_update_begin(&secy_stats->syncp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3387) 		secy_stats->stats.OutPktsUntagged++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3388) 		u64_stats_update_end(&secy_stats->syncp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3389) 		skb->dev = macsec->real_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3390) 		len = skb->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3391) 		ret = dev_queue_xmit(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3392) 		count_tx(dev, ret, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3393) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3394) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3395) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3396) 	if (!secy->operational) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3397) 		kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3398) 		dev->stats.tx_dropped++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3399) 		return NETDEV_TX_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3400) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3401) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3402) 	skb = macsec_encrypt(skb, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3403) 	if (IS_ERR(skb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3404) 		if (PTR_ERR(skb) != -EINPROGRESS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3405) 			dev->stats.tx_dropped++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3406) 		return NETDEV_TX_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3407) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3408) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3409) 	macsec_count_tx(skb, &macsec->secy.tx_sc, macsec_skb_cb(skb)->tx_sa);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3410) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3411) 	macsec_encrypt_finish(skb, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3412) 	len = skb->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3413) 	ret = dev_queue_xmit(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3414) 	count_tx(dev, ret, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3415) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3416) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3417) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3418) #define SW_MACSEC_FEATURES \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3419) 	(NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3420) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3421) /* If h/w offloading is enabled, use real device features save for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3422)  *   VLAN_FEATURES - they require additional ops
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3423)  *   HW_MACSEC - no reason to report it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3424)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3425) #define REAL_DEV_FEATURES(dev) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3426) 	((dev)->features & ~(NETIF_F_VLAN_FEATURES | NETIF_F_HW_MACSEC))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3427) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3428) static int macsec_dev_init(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3429) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3430) 	struct macsec_dev *macsec = macsec_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3431) 	struct net_device *real_dev = macsec->real_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3432) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3433) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3434) 	dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3435) 	if (!dev->tstats)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3436) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3437) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3438) 	err = gro_cells_init(&macsec->gro_cells, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3439) 	if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3440) 		free_percpu(dev->tstats);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3441) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3442) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3443) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3444) 	if (macsec_is_offloaded(macsec)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3445) 		dev->features = REAL_DEV_FEATURES(real_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3446) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3447) 		dev->features = real_dev->features & SW_MACSEC_FEATURES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3448) 		dev->features |= NETIF_F_LLTX | NETIF_F_GSO_SOFTWARE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3449) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3450) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3451) 	dev->needed_headroom = real_dev->needed_headroom +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3452) 			       MACSEC_NEEDED_HEADROOM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3453) 	dev->needed_tailroom = real_dev->needed_tailroom +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3454) 			       MACSEC_NEEDED_TAILROOM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3455) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3456) 	if (is_zero_ether_addr(dev->dev_addr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3457) 		eth_hw_addr_inherit(dev, real_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3458) 	if (is_zero_ether_addr(dev->broadcast))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3459) 		memcpy(dev->broadcast, real_dev->broadcast, dev->addr_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3460) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3461) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3462) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3463) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3464) static void macsec_dev_uninit(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3465) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3466) 	struct macsec_dev *macsec = macsec_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3467) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3468) 	gro_cells_destroy(&macsec->gro_cells);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3469) 	free_percpu(dev->tstats);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3470) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3471) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3472) static netdev_features_t macsec_fix_features(struct net_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3473) 					     netdev_features_t features)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3474) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3475) 	struct macsec_dev *macsec = macsec_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3476) 	struct net_device *real_dev = macsec->real_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3477) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3478) 	if (macsec_is_offloaded(macsec))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3479) 		return REAL_DEV_FEATURES(real_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3480) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3481) 	features &= (real_dev->features & SW_MACSEC_FEATURES) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3482) 		    NETIF_F_GSO_SOFTWARE | NETIF_F_SOFT_FEATURES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3483) 	features |= NETIF_F_LLTX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3484) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3485) 	return features;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3486) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3487) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3488) static int macsec_dev_open(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3489) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3490) 	struct macsec_dev *macsec = macsec_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3491) 	struct net_device *real_dev = macsec->real_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3492) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3493) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3494) 	err = dev_uc_add(real_dev, dev->dev_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3495) 	if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3496) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3497) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3498) 	if (dev->flags & IFF_ALLMULTI) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3499) 		err = dev_set_allmulti(real_dev, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3500) 		if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3501) 			goto del_unicast;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3502) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3503) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3504) 	if (dev->flags & IFF_PROMISC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3505) 		err = dev_set_promiscuity(real_dev, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3506) 		if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3507) 			goto clear_allmulti;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3508) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3509) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3510) 	/* If h/w offloading is available, propagate to the device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3511) 	if (macsec_is_offloaded(macsec)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3512) 		const struct macsec_ops *ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3513) 		struct macsec_context ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3514) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3515) 		ops = macsec_get_ops(netdev_priv(dev), &ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3516) 		if (!ops) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3517) 			err = -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3518) 			goto clear_allmulti;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3519) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3520) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3521) 		ctx.secy = &macsec->secy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3522) 		err = macsec_offload(ops->mdo_dev_open, &ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3523) 		if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3524) 			goto clear_allmulti;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3525) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3526) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3527) 	if (netif_carrier_ok(real_dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3528) 		netif_carrier_on(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3529) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3530) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3531) clear_allmulti:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3532) 	if (dev->flags & IFF_ALLMULTI)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3533) 		dev_set_allmulti(real_dev, -1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3534) del_unicast:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3535) 	dev_uc_del(real_dev, dev->dev_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3536) 	netif_carrier_off(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3537) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3538) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3539) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3540) static int macsec_dev_stop(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3541) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3542) 	struct macsec_dev *macsec = macsec_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3543) 	struct net_device *real_dev = macsec->real_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3544) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3545) 	netif_carrier_off(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3546) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3547) 	/* If h/w offloading is available, propagate to the device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3548) 	if (macsec_is_offloaded(macsec)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3549) 		const struct macsec_ops *ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3550) 		struct macsec_context ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3551) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3552) 		ops = macsec_get_ops(macsec, &ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3553) 		if (ops) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3554) 			ctx.secy = &macsec->secy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3555) 			macsec_offload(ops->mdo_dev_stop, &ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3556) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3557) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3558) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3559) 	dev_mc_unsync(real_dev, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3560) 	dev_uc_unsync(real_dev, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3561) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3562) 	if (dev->flags & IFF_ALLMULTI)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3563) 		dev_set_allmulti(real_dev, -1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3564) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3565) 	if (dev->flags & IFF_PROMISC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3566) 		dev_set_promiscuity(real_dev, -1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3567) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3568) 	dev_uc_del(real_dev, dev->dev_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3569) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3570) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3571) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3572) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3573) static void macsec_dev_change_rx_flags(struct net_device *dev, int change)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3574) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3575) 	struct net_device *real_dev = macsec_priv(dev)->real_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3576) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3577) 	if (!(dev->flags & IFF_UP))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3578) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3579) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3580) 	if (change & IFF_ALLMULTI)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3581) 		dev_set_allmulti(real_dev, dev->flags & IFF_ALLMULTI ? 1 : -1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3582) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3583) 	if (change & IFF_PROMISC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3584) 		dev_set_promiscuity(real_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3585) 				    dev->flags & IFF_PROMISC ? 1 : -1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3586) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3587) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3588) static void macsec_dev_set_rx_mode(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3589) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3590) 	struct net_device *real_dev = macsec_priv(dev)->real_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3591) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3592) 	dev_mc_sync(real_dev, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3593) 	dev_uc_sync(real_dev, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3594) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3595) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3596) static int macsec_set_mac_address(struct net_device *dev, void *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3597) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3598) 	struct macsec_dev *macsec = macsec_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3599) 	struct net_device *real_dev = macsec->real_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3600) 	struct sockaddr *addr = p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3601) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3602) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3603) 	if (!is_valid_ether_addr(addr->sa_data))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3604) 		return -EADDRNOTAVAIL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3605) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3606) 	if (!(dev->flags & IFF_UP))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3607) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3608) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3609) 	err = dev_uc_add(real_dev, addr->sa_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3610) 	if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3611) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3612) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3613) 	dev_uc_del(real_dev, dev->dev_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3614) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3615) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3616) 	ether_addr_copy(dev->dev_addr, addr->sa_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3617) 	macsec->secy.sci = dev_to_sci(dev, MACSEC_PORT_ES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3618) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3619) 	/* If h/w offloading is available, propagate to the device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3620) 	if (macsec_is_offloaded(macsec)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3621) 		const struct macsec_ops *ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3622) 		struct macsec_context ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3623) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3624) 		ops = macsec_get_ops(macsec, &ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3625) 		if (ops) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3626) 			ctx.secy = &macsec->secy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3627) 			macsec_offload(ops->mdo_upd_secy, &ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3628) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3629) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3630) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3631) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3632) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3633) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3634) static int macsec_change_mtu(struct net_device *dev, int new_mtu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3635) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3636) 	struct macsec_dev *macsec = macsec_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3637) 	unsigned int extra = macsec->secy.icv_len + macsec_extra_len(true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3638) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3639) 	if (macsec->real_dev->mtu - extra < new_mtu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3640) 		return -ERANGE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3641) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3642) 	dev->mtu = new_mtu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3643) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3644) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3645) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3646) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3647) static void macsec_get_stats64(struct net_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3648) 			       struct rtnl_link_stats64 *s)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3649) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3650) 	if (!dev->tstats)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3651) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3652) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3653) 	dev_fetch_sw_netstats(s, dev->tstats);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3654) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3655) 	s->rx_dropped = dev->stats.rx_dropped;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3656) 	s->tx_dropped = dev->stats.tx_dropped;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3657) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3658) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3659) static int macsec_get_iflink(const struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3660) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3661) 	return macsec_priv(dev)->real_dev->ifindex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3662) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3663) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3664) static const struct net_device_ops macsec_netdev_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3665) 	.ndo_init		= macsec_dev_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3666) 	.ndo_uninit		= macsec_dev_uninit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3667) 	.ndo_open		= macsec_dev_open,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3668) 	.ndo_stop		= macsec_dev_stop,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3669) 	.ndo_fix_features	= macsec_fix_features,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3670) 	.ndo_change_mtu		= macsec_change_mtu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3671) 	.ndo_set_rx_mode	= macsec_dev_set_rx_mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3672) 	.ndo_change_rx_flags	= macsec_dev_change_rx_flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3673) 	.ndo_set_mac_address	= macsec_set_mac_address,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3674) 	.ndo_start_xmit		= macsec_start_xmit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3675) 	.ndo_get_stats64	= macsec_get_stats64,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3676) 	.ndo_get_iflink		= macsec_get_iflink,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3677) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3678) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3679) static const struct device_type macsec_type = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3680) 	.name = "macsec",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3681) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3682) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3683) static const struct nla_policy macsec_rtnl_policy[IFLA_MACSEC_MAX + 1] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3684) 	[IFLA_MACSEC_SCI] = { .type = NLA_U64 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3685) 	[IFLA_MACSEC_PORT] = { .type = NLA_U16 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3686) 	[IFLA_MACSEC_ICV_LEN] = { .type = NLA_U8 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3687) 	[IFLA_MACSEC_CIPHER_SUITE] = { .type = NLA_U64 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3688) 	[IFLA_MACSEC_WINDOW] = { .type = NLA_U32 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3689) 	[IFLA_MACSEC_ENCODING_SA] = { .type = NLA_U8 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3690) 	[IFLA_MACSEC_ENCRYPT] = { .type = NLA_U8 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3691) 	[IFLA_MACSEC_PROTECT] = { .type = NLA_U8 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3692) 	[IFLA_MACSEC_INC_SCI] = { .type = NLA_U8 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3693) 	[IFLA_MACSEC_ES] = { .type = NLA_U8 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3694) 	[IFLA_MACSEC_SCB] = { .type = NLA_U8 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3695) 	[IFLA_MACSEC_REPLAY_PROTECT] = { .type = NLA_U8 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3696) 	[IFLA_MACSEC_VALIDATION] = { .type = NLA_U8 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3697) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3698) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3699) static void macsec_free_netdev(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3700) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3701) 	struct macsec_dev *macsec = macsec_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3702) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3703) 	free_percpu(macsec->stats);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3704) 	free_percpu(macsec->secy.tx_sc.stats);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3705) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3706) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3707) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3708) static void macsec_setup(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3709) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3710) 	ether_setup(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3711) 	dev->min_mtu = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3712) 	dev->max_mtu = ETH_MAX_MTU;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3713) 	dev->priv_flags |= IFF_NO_QUEUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3714) 	dev->netdev_ops = &macsec_netdev_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3715) 	dev->needs_free_netdev = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3716) 	dev->priv_destructor = macsec_free_netdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3717) 	SET_NETDEV_DEVTYPE(dev, &macsec_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3718) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3719) 	eth_zero_addr(dev->broadcast);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3720) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3721) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3722) static int macsec_changelink_common(struct net_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3723) 				    struct nlattr *data[])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3724) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3725) 	struct macsec_secy *secy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3726) 	struct macsec_tx_sc *tx_sc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3727) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3728) 	secy = &macsec_priv(dev)->secy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3729) 	tx_sc = &secy->tx_sc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3730) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3731) 	if (data[IFLA_MACSEC_ENCODING_SA]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3732) 		struct macsec_tx_sa *tx_sa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3733) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3734) 		tx_sc->encoding_sa = nla_get_u8(data[IFLA_MACSEC_ENCODING_SA]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3735) 		tx_sa = rtnl_dereference(tx_sc->sa[tx_sc->encoding_sa]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3736) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3737) 		secy->operational = tx_sa && tx_sa->active;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3738) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3739) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3740) 	if (data[IFLA_MACSEC_WINDOW])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3741) 		secy->replay_window = nla_get_u32(data[IFLA_MACSEC_WINDOW]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3742) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3743) 	if (data[IFLA_MACSEC_ENCRYPT])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3744) 		tx_sc->encrypt = !!nla_get_u8(data[IFLA_MACSEC_ENCRYPT]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3745) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3746) 	if (data[IFLA_MACSEC_PROTECT])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3747) 		secy->protect_frames = !!nla_get_u8(data[IFLA_MACSEC_PROTECT]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3748) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3749) 	if (data[IFLA_MACSEC_INC_SCI])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3750) 		tx_sc->send_sci = !!nla_get_u8(data[IFLA_MACSEC_INC_SCI]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3751) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3752) 	if (data[IFLA_MACSEC_ES])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3753) 		tx_sc->end_station = !!nla_get_u8(data[IFLA_MACSEC_ES]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3754) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3755) 	if (data[IFLA_MACSEC_SCB])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3756) 		tx_sc->scb = !!nla_get_u8(data[IFLA_MACSEC_SCB]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3757) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3758) 	if (data[IFLA_MACSEC_REPLAY_PROTECT])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3759) 		secy->replay_protect = !!nla_get_u8(data[IFLA_MACSEC_REPLAY_PROTECT]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3760) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3761) 	if (data[IFLA_MACSEC_VALIDATION])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3762) 		secy->validate_frames = nla_get_u8(data[IFLA_MACSEC_VALIDATION]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3763) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3764) 	if (data[IFLA_MACSEC_CIPHER_SUITE]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3765) 		switch (nla_get_u64(data[IFLA_MACSEC_CIPHER_SUITE])) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3766) 		case MACSEC_CIPHER_ID_GCM_AES_128:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3767) 		case MACSEC_DEFAULT_CIPHER_ID:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3768) 			secy->key_len = MACSEC_GCM_AES_128_SAK_LEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3769) 			secy->xpn = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3770) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3771) 		case MACSEC_CIPHER_ID_GCM_AES_256:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3772) 			secy->key_len = MACSEC_GCM_AES_256_SAK_LEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3773) 			secy->xpn = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3774) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3775) 		case MACSEC_CIPHER_ID_GCM_AES_XPN_128:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3776) 			secy->key_len = MACSEC_GCM_AES_128_SAK_LEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3777) 			secy->xpn = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3778) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3779) 		case MACSEC_CIPHER_ID_GCM_AES_XPN_256:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3780) 			secy->key_len = MACSEC_GCM_AES_256_SAK_LEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3781) 			secy->xpn = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3782) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3783) 		default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3784) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3785) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3786) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3787) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3788) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3789) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3790) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3791) static int macsec_changelink(struct net_device *dev, struct nlattr *tb[],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3792) 			     struct nlattr *data[],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3793) 			     struct netlink_ext_ack *extack)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3794) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3795) 	struct macsec_dev *macsec = macsec_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3796) 	struct macsec_tx_sc tx_sc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3797) 	struct macsec_secy secy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3798) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3799) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3800) 	if (!data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3801) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3802) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3803) 	if (data[IFLA_MACSEC_CIPHER_SUITE] ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3804) 	    data[IFLA_MACSEC_ICV_LEN] ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3805) 	    data[IFLA_MACSEC_SCI] ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3806) 	    data[IFLA_MACSEC_PORT])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3807) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3808) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3809) 	/* Keep a copy of unmodified secy and tx_sc, in case the offload
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3810) 	 * propagation fails, to revert macsec_changelink_common.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3811) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3812) 	memcpy(&secy, &macsec->secy, sizeof(secy));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3813) 	memcpy(&tx_sc, &macsec->secy.tx_sc, sizeof(tx_sc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3814) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3815) 	ret = macsec_changelink_common(dev, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3816) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3817) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3818) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3819) 	/* If h/w offloading is available, propagate to the device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3820) 	if (macsec_is_offloaded(macsec)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3821) 		const struct macsec_ops *ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3822) 		struct macsec_context ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3823) 		int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3824) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3825) 		ops = macsec_get_ops(netdev_priv(dev), &ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3826) 		if (!ops) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3827) 			ret = -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3828) 			goto cleanup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3829) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3830) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3831) 		ctx.secy = &macsec->secy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3832) 		ret = macsec_offload(ops->mdo_upd_secy, &ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3833) 		if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3834) 			goto cleanup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3835) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3836) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3837) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3838) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3839) cleanup:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3840) 	memcpy(&macsec->secy.tx_sc, &tx_sc, sizeof(tx_sc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3841) 	memcpy(&macsec->secy, &secy, sizeof(secy));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3842) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3843) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3844) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3845) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3846) static void macsec_del_dev(struct macsec_dev *macsec)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3847) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3848) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3849) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3850) 	while (macsec->secy.rx_sc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3851) 		struct macsec_rx_sc *rx_sc = rtnl_dereference(macsec->secy.rx_sc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3852) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3853) 		rcu_assign_pointer(macsec->secy.rx_sc, rx_sc->next);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3854) 		free_rx_sc(rx_sc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3855) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3856) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3857) 	for (i = 0; i < MACSEC_NUM_AN; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3858) 		struct macsec_tx_sa *sa = rtnl_dereference(macsec->secy.tx_sc.sa[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3859) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3860) 		if (sa) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3861) 			RCU_INIT_POINTER(macsec->secy.tx_sc.sa[i], NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3862) 			clear_tx_sa(sa);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3863) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3864) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3865) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3866) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3867) static void macsec_common_dellink(struct net_device *dev, struct list_head *head)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3868) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3869) 	struct macsec_dev *macsec = macsec_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3870) 	struct net_device *real_dev = macsec->real_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3871) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3872) 	/* If h/w offloading is available, propagate to the device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3873) 	if (macsec_is_offloaded(macsec)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3874) 		const struct macsec_ops *ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3875) 		struct macsec_context ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3876) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3877) 		ops = macsec_get_ops(netdev_priv(dev), &ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3878) 		if (ops) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3879) 			ctx.secy = &macsec->secy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3880) 			macsec_offload(ops->mdo_del_secy, &ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3881) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3882) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3883) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3884) 	unregister_netdevice_queue(dev, head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3885) 	list_del_rcu(&macsec->secys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3886) 	macsec_del_dev(macsec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3887) 	netdev_upper_dev_unlink(real_dev, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3888) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3889) 	macsec_generation++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3890) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3891) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3892) static void macsec_dellink(struct net_device *dev, struct list_head *head)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3893) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3894) 	struct macsec_dev *macsec = macsec_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3895) 	struct net_device *real_dev = macsec->real_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3896) 	struct macsec_rxh_data *rxd = macsec_data_rtnl(real_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3897) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3898) 	macsec_common_dellink(dev, head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3899) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3900) 	if (list_empty(&rxd->secys)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3901) 		netdev_rx_handler_unregister(real_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3902) 		kfree(rxd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3903) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3904) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3905) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3906) static int register_macsec_dev(struct net_device *real_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3907) 			       struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3908) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3909) 	struct macsec_dev *macsec = macsec_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3910) 	struct macsec_rxh_data *rxd = macsec_data_rtnl(real_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3911) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3912) 	if (!rxd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3913) 		int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3914) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3915) 		rxd = kmalloc(sizeof(*rxd), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3916) 		if (!rxd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3917) 			return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3918) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3919) 		INIT_LIST_HEAD(&rxd->secys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3920) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3921) 		err = netdev_rx_handler_register(real_dev, macsec_handle_frame,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3922) 						 rxd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3923) 		if (err < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3924) 			kfree(rxd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3925) 			return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3926) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3927) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3928) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3929) 	list_add_tail_rcu(&macsec->secys, &rxd->secys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3930) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3931) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3932) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3933) static bool sci_exists(struct net_device *dev, sci_t sci)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3934) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3935) 	struct macsec_rxh_data *rxd = macsec_data_rtnl(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3936) 	struct macsec_dev *macsec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3937) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3938) 	list_for_each_entry(macsec, &rxd->secys, secys) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3939) 		if (macsec->secy.sci == sci)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3940) 			return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3941) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3942) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3943) 	return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3944) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3945) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3946) static int macsec_add_dev(struct net_device *dev, sci_t sci, u8 icv_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3947) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3948) 	struct macsec_dev *macsec = macsec_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3949) 	struct macsec_secy *secy = &macsec->secy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3950) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3951) 	macsec->stats = netdev_alloc_pcpu_stats(struct pcpu_secy_stats);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3952) 	if (!macsec->stats)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3953) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3954) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3955) 	secy->tx_sc.stats = netdev_alloc_pcpu_stats(struct pcpu_tx_sc_stats);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3956) 	if (!secy->tx_sc.stats) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3957) 		free_percpu(macsec->stats);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3958) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3959) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3960) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3961) 	if (sci == MACSEC_UNDEF_SCI)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3962) 		sci = dev_to_sci(dev, MACSEC_PORT_ES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3963) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3964) 	secy->netdev = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3965) 	secy->operational = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3966) 	secy->key_len = DEFAULT_SAK_LEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3967) 	secy->icv_len = icv_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3968) 	secy->validate_frames = MACSEC_VALIDATE_DEFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3969) 	secy->protect_frames = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3970) 	secy->replay_protect = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3971) 	secy->xpn = DEFAULT_XPN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3972) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3973) 	secy->sci = sci;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3974) 	secy->tx_sc.active = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3975) 	secy->tx_sc.encoding_sa = DEFAULT_ENCODING_SA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3976) 	secy->tx_sc.encrypt = DEFAULT_ENCRYPT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3977) 	secy->tx_sc.send_sci = DEFAULT_SEND_SCI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3978) 	secy->tx_sc.end_station = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3979) 	secy->tx_sc.scb = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3980) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3981) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3982) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3983) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3984) static struct lock_class_key macsec_netdev_addr_lock_key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3985) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3986) static int macsec_newlink(struct net *net, struct net_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3987) 			  struct nlattr *tb[], struct nlattr *data[],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3988) 			  struct netlink_ext_ack *extack)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3989) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3990) 	struct macsec_dev *macsec = macsec_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3991) 	rx_handler_func_t *rx_handler;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3992) 	u8 icv_len = DEFAULT_ICV_LEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3993) 	struct net_device *real_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3994) 	int err, mtu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3995) 	sci_t sci;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3996) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3997) 	if (!tb[IFLA_LINK])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3998) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3999) 	real_dev = __dev_get_by_index(net, nla_get_u32(tb[IFLA_LINK]));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4000) 	if (!real_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4001) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4002) 	if (real_dev->type != ARPHRD_ETHER)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4003) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4004) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4005) 	dev->priv_flags |= IFF_MACSEC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4006) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4007) 	macsec->real_dev = real_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4008) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4009) 	if (data && data[IFLA_MACSEC_OFFLOAD])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4010) 		macsec->offload = nla_get_offload(data[IFLA_MACSEC_OFFLOAD]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4011) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4012) 		/* MACsec offloading is off by default */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4013) 		macsec->offload = MACSEC_OFFLOAD_OFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4014) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4015) 	/* Check if the offloading mode is supported by the underlying layers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4016) 	if (macsec->offload != MACSEC_OFFLOAD_OFF &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4017) 	    !macsec_check_offload(macsec->offload, macsec))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4018) 		return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4019) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4020) 	/* send_sci must be set to true when transmit sci explicitly is set */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4021) 	if ((data && data[IFLA_MACSEC_SCI]) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4022) 	    (data && data[IFLA_MACSEC_INC_SCI])) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4023) 		u8 send_sci = !!nla_get_u8(data[IFLA_MACSEC_INC_SCI]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4024) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4025) 		if (!send_sci)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4026) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4027) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4028) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4029) 	if (data && data[IFLA_MACSEC_ICV_LEN])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4030) 		icv_len = nla_get_u8(data[IFLA_MACSEC_ICV_LEN]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4031) 	mtu = real_dev->mtu - icv_len - macsec_extra_len(true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4032) 	if (mtu < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4033) 		dev->mtu = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4034) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4035) 		dev->mtu = mtu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4036) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4037) 	rx_handler = rtnl_dereference(real_dev->rx_handler);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4038) 	if (rx_handler && rx_handler != macsec_handle_frame)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4039) 		return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4040) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4041) 	err = register_netdevice(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4042) 	if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4043) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4044) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4045) 	netdev_lockdep_set_classes(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4046) 	lockdep_set_class(&dev->addr_list_lock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4047) 			  &macsec_netdev_addr_lock_key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4048) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4049) 	err = netdev_upper_dev_link(real_dev, dev, extack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4050) 	if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4051) 		goto unregister;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4052) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4053) 	/* need to be already registered so that ->init has run and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4054) 	 * the MAC addr is set
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4055) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4056) 	if (data && data[IFLA_MACSEC_SCI])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4057) 		sci = nla_get_sci(data[IFLA_MACSEC_SCI]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4058) 	else if (data && data[IFLA_MACSEC_PORT])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4059) 		sci = dev_to_sci(dev, nla_get_be16(data[IFLA_MACSEC_PORT]));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4060) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4061) 		sci = dev_to_sci(dev, MACSEC_PORT_ES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4062) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4063) 	if (rx_handler && sci_exists(real_dev, sci)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4064) 		err = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4065) 		goto unlink;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4066) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4067) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4068) 	err = macsec_add_dev(dev, sci, icv_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4069) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4070) 		goto unlink;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4071) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4072) 	if (data) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4073) 		err = macsec_changelink_common(dev, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4074) 		if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4075) 			goto del_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4076) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4077) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4078) 	/* If h/w offloading is available, propagate to the device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4079) 	if (macsec_is_offloaded(macsec)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4080) 		const struct macsec_ops *ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4081) 		struct macsec_context ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4082) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4083) 		ops = macsec_get_ops(macsec, &ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4084) 		if (ops) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4085) 			ctx.secy = &macsec->secy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4086) 			err = macsec_offload(ops->mdo_add_secy, &ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4087) 			if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4088) 				goto del_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4089) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4090) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4091) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4092) 	err = register_macsec_dev(real_dev, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4093) 	if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4094) 		goto del_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4095) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4096) 	netif_stacked_transfer_operstate(real_dev, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4097) 	linkwatch_fire_event(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4098) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4099) 	macsec_generation++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4100) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4101) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4102) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4103) del_dev:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4104) 	macsec_del_dev(macsec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4105) unlink:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4106) 	netdev_upper_dev_unlink(real_dev, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4107) unregister:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4108) 	unregister_netdevice(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4109) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4110) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4111) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4112) static int macsec_validate_attr(struct nlattr *tb[], struct nlattr *data[],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4113) 				struct netlink_ext_ack *extack)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4114) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4115) 	u64 csid = MACSEC_DEFAULT_CIPHER_ID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4116) 	u8 icv_len = DEFAULT_ICV_LEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4117) 	int flag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4118) 	bool es, scb, sci;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4119) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4120) 	if (!data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4121) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4122) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4123) 	if (data[IFLA_MACSEC_CIPHER_SUITE])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4124) 		csid = nla_get_u64(data[IFLA_MACSEC_CIPHER_SUITE]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4125) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4126) 	if (data[IFLA_MACSEC_ICV_LEN]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4127) 		icv_len = nla_get_u8(data[IFLA_MACSEC_ICV_LEN]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4128) 		if (icv_len != DEFAULT_ICV_LEN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4129) 			char dummy_key[DEFAULT_SAK_LEN] = { 0 };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4130) 			struct crypto_aead *dummy_tfm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4131) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4132) 			dummy_tfm = macsec_alloc_tfm(dummy_key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4133) 						     DEFAULT_SAK_LEN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4134) 						     icv_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4135) 			if (IS_ERR(dummy_tfm))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4136) 				return PTR_ERR(dummy_tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4137) 			crypto_free_aead(dummy_tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4138) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4139) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4140) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4141) 	switch (csid) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4142) 	case MACSEC_CIPHER_ID_GCM_AES_128:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4143) 	case MACSEC_CIPHER_ID_GCM_AES_256:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4144) 	case MACSEC_CIPHER_ID_GCM_AES_XPN_128:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4145) 	case MACSEC_CIPHER_ID_GCM_AES_XPN_256:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4146) 	case MACSEC_DEFAULT_CIPHER_ID:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4147) 		if (icv_len < MACSEC_MIN_ICV_LEN ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4148) 		    icv_len > MACSEC_STD_ICV_LEN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4149) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4150) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4151) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4152) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4153) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4154) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4155) 	if (data[IFLA_MACSEC_ENCODING_SA]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4156) 		if (nla_get_u8(data[IFLA_MACSEC_ENCODING_SA]) >= MACSEC_NUM_AN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4157) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4158) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4159) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4160) 	for (flag = IFLA_MACSEC_ENCODING_SA + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4161) 	     flag < IFLA_MACSEC_VALIDATION;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4162) 	     flag++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4163) 		if (data[flag]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4164) 			if (nla_get_u8(data[flag]) > 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4165) 				return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4166) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4167) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4168) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4169) 	es  = data[IFLA_MACSEC_ES] ? nla_get_u8(data[IFLA_MACSEC_ES]) : false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4170) 	sci = data[IFLA_MACSEC_INC_SCI] ? nla_get_u8(data[IFLA_MACSEC_INC_SCI]) : false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4171) 	scb = data[IFLA_MACSEC_SCB] ? nla_get_u8(data[IFLA_MACSEC_SCB]) : false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4172) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4173) 	if ((sci && (scb || es)) || (scb && es))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4174) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4175) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4176) 	if (data[IFLA_MACSEC_VALIDATION] &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4177) 	    nla_get_u8(data[IFLA_MACSEC_VALIDATION]) > MACSEC_VALIDATE_MAX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4178) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4179) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4180) 	if ((data[IFLA_MACSEC_REPLAY_PROTECT] &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4181) 	     nla_get_u8(data[IFLA_MACSEC_REPLAY_PROTECT])) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4182) 	    !data[IFLA_MACSEC_WINDOW])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4183) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4184) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4185) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4186) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4187) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4188) static struct net *macsec_get_link_net(const struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4189) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4190) 	return dev_net(macsec_priv(dev)->real_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4191) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4192) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4193) static size_t macsec_get_size(const struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4194) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4195) 	return  nla_total_size_64bit(8) + /* IFLA_MACSEC_SCI */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4196) 		nla_total_size(1) + /* IFLA_MACSEC_ICV_LEN */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4197) 		nla_total_size_64bit(8) + /* IFLA_MACSEC_CIPHER_SUITE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4198) 		nla_total_size(4) + /* IFLA_MACSEC_WINDOW */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4199) 		nla_total_size(1) + /* IFLA_MACSEC_ENCODING_SA */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4200) 		nla_total_size(1) + /* IFLA_MACSEC_ENCRYPT */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4201) 		nla_total_size(1) + /* IFLA_MACSEC_PROTECT */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4202) 		nla_total_size(1) + /* IFLA_MACSEC_INC_SCI */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4203) 		nla_total_size(1) + /* IFLA_MACSEC_ES */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4204) 		nla_total_size(1) + /* IFLA_MACSEC_SCB */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4205) 		nla_total_size(1) + /* IFLA_MACSEC_REPLAY_PROTECT */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4206) 		nla_total_size(1) + /* IFLA_MACSEC_VALIDATION */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4207) 		0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4208) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4209) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4210) static int macsec_fill_info(struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4211) 			    const struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4212) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4213) 	struct macsec_secy *secy = &macsec_priv(dev)->secy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4214) 	struct macsec_tx_sc *tx_sc = &secy->tx_sc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4215) 	u64 csid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4216) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4217) 	switch (secy->key_len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4218) 	case MACSEC_GCM_AES_128_SAK_LEN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4219) 		csid = secy->xpn ? MACSEC_CIPHER_ID_GCM_AES_XPN_128 : MACSEC_DEFAULT_CIPHER_ID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4220) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4221) 	case MACSEC_GCM_AES_256_SAK_LEN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4222) 		csid = secy->xpn ? MACSEC_CIPHER_ID_GCM_AES_XPN_256 : MACSEC_CIPHER_ID_GCM_AES_256;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4223) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4224) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4225) 		goto nla_put_failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4226) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4227) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4228) 	if (nla_put_sci(skb, IFLA_MACSEC_SCI, secy->sci,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4229) 			IFLA_MACSEC_PAD) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4230) 	    nla_put_u8(skb, IFLA_MACSEC_ICV_LEN, secy->icv_len) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4231) 	    nla_put_u64_64bit(skb, IFLA_MACSEC_CIPHER_SUITE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4232) 			      csid, IFLA_MACSEC_PAD) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4233) 	    nla_put_u8(skb, IFLA_MACSEC_ENCODING_SA, tx_sc->encoding_sa) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4234) 	    nla_put_u8(skb, IFLA_MACSEC_ENCRYPT, tx_sc->encrypt) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4235) 	    nla_put_u8(skb, IFLA_MACSEC_PROTECT, secy->protect_frames) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4236) 	    nla_put_u8(skb, IFLA_MACSEC_INC_SCI, tx_sc->send_sci) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4237) 	    nla_put_u8(skb, IFLA_MACSEC_ES, tx_sc->end_station) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4238) 	    nla_put_u8(skb, IFLA_MACSEC_SCB, tx_sc->scb) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4239) 	    nla_put_u8(skb, IFLA_MACSEC_REPLAY_PROTECT, secy->replay_protect) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4240) 	    nla_put_u8(skb, IFLA_MACSEC_VALIDATION, secy->validate_frames) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4241) 	    0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4242) 		goto nla_put_failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4243) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4244) 	if (secy->replay_protect) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4245) 		if (nla_put_u32(skb, IFLA_MACSEC_WINDOW, secy->replay_window))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4246) 			goto nla_put_failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4247) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4248) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4249) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4250) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4251) nla_put_failure:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4252) 	return -EMSGSIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4253) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4254) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4255) static struct rtnl_link_ops macsec_link_ops __read_mostly = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4256) 	.kind		= "macsec",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4257) 	.priv_size	= sizeof(struct macsec_dev),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4258) 	.maxtype	= IFLA_MACSEC_MAX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4259) 	.policy		= macsec_rtnl_policy,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4260) 	.setup		= macsec_setup,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4261) 	.validate	= macsec_validate_attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4262) 	.newlink	= macsec_newlink,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4263) 	.changelink	= macsec_changelink,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4264) 	.dellink	= macsec_dellink,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4265) 	.get_size	= macsec_get_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4266) 	.fill_info	= macsec_fill_info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4267) 	.get_link_net	= macsec_get_link_net,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4268) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4269) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4270) static bool is_macsec_master(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4271) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4272) 	return rcu_access_pointer(dev->rx_handler) == macsec_handle_frame;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4273) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4274) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4275) static int macsec_notify(struct notifier_block *this, unsigned long event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4276) 			 void *ptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4277) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4278) 	struct net_device *real_dev = netdev_notifier_info_to_dev(ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4279) 	LIST_HEAD(head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4280) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4281) 	if (!is_macsec_master(real_dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4282) 		return NOTIFY_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4283) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4284) 	switch (event) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4285) 	case NETDEV_DOWN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4286) 	case NETDEV_UP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4287) 	case NETDEV_CHANGE: {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4288) 		struct macsec_dev *m, *n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4289) 		struct macsec_rxh_data *rxd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4290) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4291) 		rxd = macsec_data_rtnl(real_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4292) 		list_for_each_entry_safe(m, n, &rxd->secys, secys) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4293) 			struct net_device *dev = m->secy.netdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4294) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4295) 			netif_stacked_transfer_operstate(real_dev, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4296) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4297) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4298) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4299) 	case NETDEV_UNREGISTER: {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4300) 		struct macsec_dev *m, *n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4301) 		struct macsec_rxh_data *rxd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4302) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4303) 		rxd = macsec_data_rtnl(real_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4304) 		list_for_each_entry_safe(m, n, &rxd->secys, secys) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4305) 			macsec_common_dellink(m->secy.netdev, &head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4306) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4307) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4308) 		netdev_rx_handler_unregister(real_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4309) 		kfree(rxd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4310) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4311) 		unregister_netdevice_many(&head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4312) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4313) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4314) 	case NETDEV_CHANGEMTU: {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4315) 		struct macsec_dev *m;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4316) 		struct macsec_rxh_data *rxd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4317) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4318) 		rxd = macsec_data_rtnl(real_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4319) 		list_for_each_entry(m, &rxd->secys, secys) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4320) 			struct net_device *dev = m->secy.netdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4321) 			unsigned int mtu = real_dev->mtu - (m->secy.icv_len +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4322) 							    macsec_extra_len(true));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4323) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4324) 			if (dev->mtu > mtu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4325) 				dev_set_mtu(dev, mtu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4326) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4327) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4328) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4329) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4330) 	return NOTIFY_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4331) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4332) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4333) static struct notifier_block macsec_notifier = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4334) 	.notifier_call = macsec_notify,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4335) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4336) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4337) static int __init macsec_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4338) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4339) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4340) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4341) 	pr_info("MACsec IEEE 802.1AE\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4342) 	err = register_netdevice_notifier(&macsec_notifier);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4343) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4344) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4345) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4346) 	err = rtnl_link_register(&macsec_link_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4347) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4348) 		goto notifier;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4349) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4350) 	err = genl_register_family(&macsec_fam);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4351) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4352) 		goto rtnl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4353) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4354) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4355) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4356) rtnl:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4357) 	rtnl_link_unregister(&macsec_link_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4358) notifier:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4359) 	unregister_netdevice_notifier(&macsec_notifier);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4360) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4361) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4362) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4363) static void __exit macsec_exit(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4364) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4365) 	genl_unregister_family(&macsec_fam);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4366) 	rtnl_link_unregister(&macsec_link_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4367) 	unregister_netdevice_notifier(&macsec_notifier);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4368) 	rcu_barrier();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4369) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4370) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4371) module_init(macsec_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4372) module_exit(macsec_exit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4373) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4374) MODULE_ALIAS_RTNL_LINK("macsec");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4375) MODULE_ALIAS_GENL_FAMILY("macsec");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4376) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4377) MODULE_DESCRIPTION("MACsec IEEE 802.1AE");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4378) MODULE_LICENSE("GPL v2");