Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    3)  * Networking over Thunderbolt cable using Apple ThunderboltIP protocol
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    4)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    5)  * Copyright (C) 2017, Intel Corporation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    6)  * Authors: Amir Levy <amir.jer.levy@intel.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    7)  *          Michael Jamet <michael.jamet@intel.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    8)  *          Mika Westerberg <mika.westerberg@linux.intel.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    9)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   10) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   11) #include <linux/atomic.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   12) #include <linux/highmem.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   13) #include <linux/if_vlan.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   14) #include <linux/jhash.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   15) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   16) #include <linux/etherdevice.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   17) #include <linux/rtnetlink.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   18) #include <linux/sizes.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   19) #include <linux/thunderbolt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   20) #include <linux/uuid.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   21) #include <linux/workqueue.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   22) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   23) #include <net/ip6_checksum.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   24) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   25) /* Protocol timeouts in ms */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   26) #define TBNET_LOGIN_DELAY	4500
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   27) #define TBNET_LOGIN_TIMEOUT	500
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   28) #define TBNET_LOGOUT_TIMEOUT	100
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   29) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   30) #define TBNET_RING_SIZE		256
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   31) #define TBNET_LOCAL_PATH	0xf
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   32) #define TBNET_LOGIN_RETRIES	60
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   33) #define TBNET_LOGOUT_RETRIES	5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   34) #define TBNET_MATCH_FRAGS_ID	BIT(1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   35) #define TBNET_MAX_MTU		SZ_64K
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   36) #define TBNET_FRAME_SIZE	SZ_4K
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   37) #define TBNET_MAX_PAYLOAD_SIZE	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   38) 	(TBNET_FRAME_SIZE - sizeof(struct thunderbolt_ip_frame_header))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   39) /* Rx packets need to hold space for skb_shared_info */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   40) #define TBNET_RX_MAX_SIZE	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   41) 	(TBNET_FRAME_SIZE + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   42) #define TBNET_RX_PAGE_ORDER	get_order(TBNET_RX_MAX_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   43) #define TBNET_RX_PAGE_SIZE	(PAGE_SIZE << TBNET_RX_PAGE_ORDER)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   44) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   45) #define TBNET_L0_PORT_NUM(route) ((route) & GENMASK(5, 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   46) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   47) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   48)  * struct thunderbolt_ip_frame_header - Header for each Thunderbolt frame
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   49)  * @frame_size: size of the data with the frame
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   50)  * @frame_index: running index on the frames
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   51)  * @frame_id: ID of the frame to match frames to specific packet
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   52)  * @frame_count: how many frames assembles a full packet
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   53)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   54)  * Each data frame passed to the high-speed DMA ring has this header. If
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   55)  * the XDomain network directory announces that %TBNET_MATCH_FRAGS_ID is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   56)  * supported then @frame_id is filled, otherwise it stays %0.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   57)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   58) struct thunderbolt_ip_frame_header {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   59) 	u32 frame_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   60) 	u16 frame_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   61) 	u16 frame_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   62) 	u32 frame_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   63) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   64) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   65) enum thunderbolt_ip_frame_pdf {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   66) 	TBIP_PDF_FRAME_START = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   67) 	TBIP_PDF_FRAME_END,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   68) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   69) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   70) enum thunderbolt_ip_type {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   71) 	TBIP_LOGIN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   72) 	TBIP_LOGIN_RESPONSE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   73) 	TBIP_LOGOUT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   74) 	TBIP_STATUS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   75) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   76) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   77) struct thunderbolt_ip_header {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   78) 	u32 route_hi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   79) 	u32 route_lo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   80) 	u32 length_sn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   81) 	uuid_t uuid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   82) 	uuid_t initiator_uuid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   83) 	uuid_t target_uuid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   84) 	u32 type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   85) 	u32 command_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   86) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   87) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   88) #define TBIP_HDR_LENGTH_MASK		GENMASK(5, 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   89) #define TBIP_HDR_SN_MASK		GENMASK(28, 27)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   90) #define TBIP_HDR_SN_SHIFT		27
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   91) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   92) struct thunderbolt_ip_login {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   93) 	struct thunderbolt_ip_header hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   94) 	u32 proto_version;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   95) 	u32 transmit_path;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   96) 	u32 reserved[4];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   97) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   98) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   99) #define TBIP_LOGIN_PROTO_VERSION	1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  100) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  101) struct thunderbolt_ip_login_response {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  102) 	struct thunderbolt_ip_header hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  103) 	u32 status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  104) 	u32 receiver_mac[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  105) 	u32 receiver_mac_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  106) 	u32 reserved[4];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  107) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  108) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  109) struct thunderbolt_ip_logout {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  110) 	struct thunderbolt_ip_header hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  111) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  112) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  113) struct thunderbolt_ip_status {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  114) 	struct thunderbolt_ip_header hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  115) 	u32 status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  116) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  117) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  118) struct tbnet_stats {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  119) 	u64 tx_packets;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  120) 	u64 rx_packets;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  121) 	u64 tx_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  122) 	u64 rx_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  123) 	u64 rx_errors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  124) 	u64 tx_errors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  125) 	u64 rx_length_errors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  126) 	u64 rx_over_errors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  127) 	u64 rx_crc_errors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  128) 	u64 rx_missed_errors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  129) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  130) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  131) struct tbnet_frame {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  132) 	struct net_device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  133) 	struct page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  134) 	struct ring_frame frame;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  135) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  136) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  137) struct tbnet_ring {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  138) 	struct tbnet_frame frames[TBNET_RING_SIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  139) 	unsigned int cons;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  140) 	unsigned int prod;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  141) 	struct tb_ring *ring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  142) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  143) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  144) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  145)  * struct tbnet - ThunderboltIP network driver private data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  146)  * @svc: XDomain service the driver is bound to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  147)  * @xd: XDomain the service blongs to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  148)  * @handler: ThunderboltIP configuration protocol handler
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  149)  * @dev: Networking device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  150)  * @napi: NAPI structure for Rx polling
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  151)  * @stats: Network statistics
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  152)  * @skb: Network packet that is currently processed on Rx path
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  153)  * @command_id: ID used for next configuration protocol packet
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  154)  * @login_sent: ThunderboltIP login message successfully sent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  155)  * @login_received: ThunderboltIP login message received from the remote
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  156)  *		    host
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  157)  * @transmit_path: HopID the other end needs to use building the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  158)  *		   opposite side path.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  159)  * @connection_lock: Lock serializing access to @login_sent,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  160)  *		     @login_received and @transmit_path.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  161)  * @login_retries: Number of login retries currently done
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  162)  * @login_work: Worker to send ThunderboltIP login packets
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  163)  * @connected_work: Worker that finalizes the ThunderboltIP connection
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  164)  *		    setup and enables DMA paths for high speed data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  165)  *		    transfers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  166)  * @disconnect_work: Worker that handles tearing down the ThunderboltIP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  167)  *		     connection
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  168)  * @rx_hdr: Copy of the currently processed Rx frame. Used when a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  169)  *	    network packet consists of multiple Thunderbolt frames.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  170)  *	    In host byte order.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  171)  * @rx_ring: Software ring holding Rx frames
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  172)  * @frame_id: Frame ID use for next Tx packet
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  173)  *            (if %TBNET_MATCH_FRAGS_ID is supported in both ends)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  174)  * @tx_ring: Software ring holding Tx frames
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  175)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  176) struct tbnet {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  177) 	const struct tb_service *svc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  178) 	struct tb_xdomain *xd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  179) 	struct tb_protocol_handler handler;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  180) 	struct net_device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  181) 	struct napi_struct napi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  182) 	struct tbnet_stats stats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  183) 	struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  184) 	atomic_t command_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  185) 	bool login_sent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  186) 	bool login_received;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  187) 	u32 transmit_path;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  188) 	struct mutex connection_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  189) 	int login_retries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  190) 	struct delayed_work login_work;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  191) 	struct work_struct connected_work;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  192) 	struct work_struct disconnect_work;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  193) 	struct thunderbolt_ip_frame_header rx_hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  194) 	struct tbnet_ring rx_ring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  195) 	atomic_t frame_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  196) 	struct tbnet_ring tx_ring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  197) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  198) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  199) /* Network property directory UUID: c66189ca-1cce-4195-bdb8-49592e5f5a4f */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  200) static const uuid_t tbnet_dir_uuid =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  201) 	UUID_INIT(0xc66189ca, 0x1cce, 0x4195,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  202) 		  0xbd, 0xb8, 0x49, 0x59, 0x2e, 0x5f, 0x5a, 0x4f);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  203) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  204) /* ThunderboltIP protocol UUID: 798f589e-3616-8a47-97c6-5664a920c8dd */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  205) static const uuid_t tbnet_svc_uuid =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  206) 	UUID_INIT(0x798f589e, 0x3616, 0x8a47,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  207) 		  0x97, 0xc6, 0x56, 0x64, 0xa9, 0x20, 0xc8, 0xdd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  208) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  209) static struct tb_property_dir *tbnet_dir;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  210) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  211) static void tbnet_fill_header(struct thunderbolt_ip_header *hdr, u64 route,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  212) 	u8 sequence, const uuid_t *initiator_uuid, const uuid_t *target_uuid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  213) 	enum thunderbolt_ip_type type, size_t size, u32 command_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  214) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  215) 	u32 length_sn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  216) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  217) 	/* Length does not include route_hi/lo and length_sn fields */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  218) 	length_sn = (size - 3 * 4) / 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  219) 	length_sn |= (sequence << TBIP_HDR_SN_SHIFT) & TBIP_HDR_SN_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  220) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  221) 	hdr->route_hi = upper_32_bits(route);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  222) 	hdr->route_lo = lower_32_bits(route);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  223) 	hdr->length_sn = length_sn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  224) 	uuid_copy(&hdr->uuid, &tbnet_svc_uuid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  225) 	uuid_copy(&hdr->initiator_uuid, initiator_uuid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  226) 	uuid_copy(&hdr->target_uuid, target_uuid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  227) 	hdr->type = type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  228) 	hdr->command_id = command_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  229) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  230) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  231) static int tbnet_login_response(struct tbnet *net, u64 route, u8 sequence,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  232) 				u32 command_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  233) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  234) 	struct thunderbolt_ip_login_response reply;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  235) 	struct tb_xdomain *xd = net->xd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  236) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  237) 	memset(&reply, 0, sizeof(reply));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  238) 	tbnet_fill_header(&reply.hdr, route, sequence, xd->local_uuid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  239) 			  xd->remote_uuid, TBIP_LOGIN_RESPONSE, sizeof(reply),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  240) 			  command_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  241) 	memcpy(reply.receiver_mac, net->dev->dev_addr, ETH_ALEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  242) 	reply.receiver_mac_len = ETH_ALEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  243) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  244) 	return tb_xdomain_response(xd, &reply, sizeof(reply),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  245) 				   TB_CFG_PKG_XDOMAIN_RESP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  246) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  247) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  248) static int tbnet_login_request(struct tbnet *net, u8 sequence)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  249) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  250) 	struct thunderbolt_ip_login_response reply;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  251) 	struct thunderbolt_ip_login request;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  252) 	struct tb_xdomain *xd = net->xd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  253) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  254) 	memset(&request, 0, sizeof(request));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  255) 	tbnet_fill_header(&request.hdr, xd->route, sequence, xd->local_uuid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  256) 			  xd->remote_uuid, TBIP_LOGIN, sizeof(request),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  257) 			  atomic_inc_return(&net->command_id));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  258) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  259) 	request.proto_version = TBIP_LOGIN_PROTO_VERSION;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  260) 	request.transmit_path = TBNET_LOCAL_PATH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  261) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  262) 	return tb_xdomain_request(xd, &request, sizeof(request),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  263) 				  TB_CFG_PKG_XDOMAIN_RESP, &reply,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  264) 				  sizeof(reply), TB_CFG_PKG_XDOMAIN_RESP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  265) 				  TBNET_LOGIN_TIMEOUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  266) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  267) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  268) static int tbnet_logout_response(struct tbnet *net, u64 route, u8 sequence,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  269) 				 u32 command_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  270) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  271) 	struct thunderbolt_ip_status reply;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  272) 	struct tb_xdomain *xd = net->xd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  273) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  274) 	memset(&reply, 0, sizeof(reply));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  275) 	tbnet_fill_header(&reply.hdr, route, sequence, xd->local_uuid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  276) 			  xd->remote_uuid, TBIP_STATUS, sizeof(reply),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  277) 			  atomic_inc_return(&net->command_id));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  278) 	return tb_xdomain_response(xd, &reply, sizeof(reply),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  279) 				   TB_CFG_PKG_XDOMAIN_RESP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  280) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  281) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  282) static int tbnet_logout_request(struct tbnet *net)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  283) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  284) 	struct thunderbolt_ip_logout request;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  285) 	struct thunderbolt_ip_status reply;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  286) 	struct tb_xdomain *xd = net->xd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  287) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  288) 	memset(&request, 0, sizeof(request));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  289) 	tbnet_fill_header(&request.hdr, xd->route, 0, xd->local_uuid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  290) 			  xd->remote_uuid, TBIP_LOGOUT, sizeof(request),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  291) 			  atomic_inc_return(&net->command_id));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  292) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  293) 	return tb_xdomain_request(xd, &request, sizeof(request),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  294) 				  TB_CFG_PKG_XDOMAIN_RESP, &reply,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  295) 				  sizeof(reply), TB_CFG_PKG_XDOMAIN_RESP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  296) 				  TBNET_LOGOUT_TIMEOUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  297) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  298) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  299) static void start_login(struct tbnet *net)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  300) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  301) 	mutex_lock(&net->connection_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  302) 	net->login_sent = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  303) 	net->login_received = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  304) 	mutex_unlock(&net->connection_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  305) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  306) 	queue_delayed_work(system_long_wq, &net->login_work,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  307) 			   msecs_to_jiffies(1000));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  308) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  309) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  310) static void stop_login(struct tbnet *net)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  311) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  312) 	cancel_delayed_work_sync(&net->login_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  313) 	cancel_work_sync(&net->connected_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  314) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  315) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  316) static inline unsigned int tbnet_frame_size(const struct tbnet_frame *tf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  317) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  318) 	return tf->frame.size ? : TBNET_FRAME_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  319) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  320) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  321) static void tbnet_free_buffers(struct tbnet_ring *ring)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  322) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  323) 	unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  324) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  325) 	for (i = 0; i < TBNET_RING_SIZE; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  326) 		struct device *dma_dev = tb_ring_dma_device(ring->ring);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  327) 		struct tbnet_frame *tf = &ring->frames[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  328) 		enum dma_data_direction dir;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  329) 		unsigned int order;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  330) 		size_t size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  331) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  332) 		if (!tf->page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  333) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  334) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  335) 		if (ring->ring->is_tx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  336) 			dir = DMA_TO_DEVICE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  337) 			order = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  338) 			size = TBNET_FRAME_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  339) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  340) 			dir = DMA_FROM_DEVICE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  341) 			order = TBNET_RX_PAGE_ORDER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  342) 			size = TBNET_RX_PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  343) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  344) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  345) 		if (tf->frame.buffer_phy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  346) 			dma_unmap_page(dma_dev, tf->frame.buffer_phy, size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  347) 				       dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  348) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  349) 		__free_pages(tf->page, order);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  350) 		tf->page = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  351) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  352) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  353) 	ring->cons = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  354) 	ring->prod = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  355) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  356) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  357) static void tbnet_tear_down(struct tbnet *net, bool send_logout)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  358) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  359) 	netif_carrier_off(net->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  360) 	netif_stop_queue(net->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  361) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  362) 	stop_login(net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  363) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  364) 	mutex_lock(&net->connection_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  365) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  366) 	if (net->login_sent && net->login_received) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  367) 		int retries = TBNET_LOGOUT_RETRIES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  368) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  369) 		while (send_logout && retries-- > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  370) 			int ret = tbnet_logout_request(net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  371) 			if (ret != -ETIMEDOUT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  372) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  373) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  374) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  375) 		tb_ring_stop(net->rx_ring.ring);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  376) 		tb_ring_stop(net->tx_ring.ring);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  377) 		tbnet_free_buffers(&net->rx_ring);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  378) 		tbnet_free_buffers(&net->tx_ring);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  379) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  380) 		if (tb_xdomain_disable_paths(net->xd))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  381) 			netdev_warn(net->dev, "failed to disable DMA paths\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  382) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  383) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  384) 	net->login_retries = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  385) 	net->login_sent = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  386) 	net->login_received = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  387) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  388) 	mutex_unlock(&net->connection_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  389) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  390) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  391) static int tbnet_handle_packet(const void *buf, size_t size, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  392) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  393) 	const struct thunderbolt_ip_login *pkg = buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  394) 	struct tbnet *net = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  395) 	u32 command_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  396) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  397) 	u32 sequence;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  398) 	u64 route;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  399) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  400) 	/* Make sure the packet is for us */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  401) 	if (size < sizeof(struct thunderbolt_ip_header))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  402) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  403) 	if (!uuid_equal(&pkg->hdr.initiator_uuid, net->xd->remote_uuid))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  404) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  405) 	if (!uuid_equal(&pkg->hdr.target_uuid, net->xd->local_uuid))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  406) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  407) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  408) 	route = ((u64)pkg->hdr.route_hi << 32) | pkg->hdr.route_lo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  409) 	route &= ~BIT_ULL(63);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  410) 	if (route != net->xd->route)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  411) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  412) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  413) 	sequence = pkg->hdr.length_sn & TBIP_HDR_SN_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  414) 	sequence >>= TBIP_HDR_SN_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  415) 	command_id = pkg->hdr.command_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  416) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  417) 	switch (pkg->hdr.type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  418) 	case TBIP_LOGIN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  419) 		if (!netif_running(net->dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  420) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  421) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  422) 		ret = tbnet_login_response(net, route, sequence,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  423) 					   pkg->hdr.command_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  424) 		if (!ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  425) 			mutex_lock(&net->connection_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  426) 			net->login_received = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  427) 			net->transmit_path = pkg->transmit_path;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  428) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  429) 			/* If we reached the number of max retries or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  430) 			 * previous logout, schedule another round of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  431) 			 * login retries
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  432) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  433) 			if (net->login_retries >= TBNET_LOGIN_RETRIES ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  434) 			    !net->login_sent) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  435) 				net->login_retries = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  436) 				queue_delayed_work(system_long_wq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  437) 						   &net->login_work, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  438) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  439) 			mutex_unlock(&net->connection_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  440) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  441) 			queue_work(system_long_wq, &net->connected_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  442) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  443) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  444) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  445) 	case TBIP_LOGOUT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  446) 		ret = tbnet_logout_response(net, route, sequence, command_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  447) 		if (!ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  448) 			queue_work(system_long_wq, &net->disconnect_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  449) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  450) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  451) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  452) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  453) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  454) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  455) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  456) 		netdev_warn(net->dev, "failed to send ThunderboltIP response\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  457) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  458) 	return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  459) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  460) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  461) static unsigned int tbnet_available_buffers(const struct tbnet_ring *ring)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  462) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  463) 	return ring->prod - ring->cons;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  464) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  465) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  466) static int tbnet_alloc_rx_buffers(struct tbnet *net, unsigned int nbuffers)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  467) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  468) 	struct tbnet_ring *ring = &net->rx_ring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  469) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  470) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  471) 	while (nbuffers--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  472) 		struct device *dma_dev = tb_ring_dma_device(ring->ring);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  473) 		unsigned int index = ring->prod & (TBNET_RING_SIZE - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  474) 		struct tbnet_frame *tf = &ring->frames[index];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  475) 		dma_addr_t dma_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  476) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  477) 		if (tf->page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  478) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  479) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  480) 		/* Allocate page (order > 0) so that it can hold maximum
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  481) 		 * ThunderboltIP frame (4kB) and the additional room for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  482) 		 * SKB shared info required by build_skb().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  483) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  484) 		tf->page = dev_alloc_pages(TBNET_RX_PAGE_ORDER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  485) 		if (!tf->page) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  486) 			ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  487) 			goto err_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  488) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  489) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  490) 		dma_addr = dma_map_page(dma_dev, tf->page, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  491) 					TBNET_RX_PAGE_SIZE, DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  492) 		if (dma_mapping_error(dma_dev, dma_addr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  493) 			ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  494) 			goto err_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  495) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  496) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  497) 		tf->frame.buffer_phy = dma_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  498) 		tf->dev = net->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  499) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  500) 		tb_ring_rx(ring->ring, &tf->frame);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  501) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  502) 		ring->prod++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  503) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  504) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  505) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  506) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  507) err_free:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  508) 	tbnet_free_buffers(ring);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  509) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  510) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  511) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  512) static struct tbnet_frame *tbnet_get_tx_buffer(struct tbnet *net)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  513) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  514) 	struct tbnet_ring *ring = &net->tx_ring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  515) 	struct device *dma_dev = tb_ring_dma_device(ring->ring);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  516) 	struct tbnet_frame *tf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  517) 	unsigned int index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  518) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  519) 	if (!tbnet_available_buffers(ring))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  520) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  521) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  522) 	index = ring->cons++ & (TBNET_RING_SIZE - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  523) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  524) 	tf = &ring->frames[index];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  525) 	tf->frame.size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  526) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  527) 	dma_sync_single_for_cpu(dma_dev, tf->frame.buffer_phy,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  528) 				tbnet_frame_size(tf), DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  529) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  530) 	return tf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  531) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  532) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  533) static void tbnet_tx_callback(struct tb_ring *ring, struct ring_frame *frame,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  534) 			      bool canceled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  535) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  536) 	struct tbnet_frame *tf = container_of(frame, typeof(*tf), frame);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  537) 	struct tbnet *net = netdev_priv(tf->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  538) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  539) 	/* Return buffer to the ring */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  540) 	net->tx_ring.prod++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  541) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  542) 	if (tbnet_available_buffers(&net->tx_ring) >= TBNET_RING_SIZE / 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  543) 		netif_wake_queue(net->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  544) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  545) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  546) static int tbnet_alloc_tx_buffers(struct tbnet *net)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  547) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  548) 	struct tbnet_ring *ring = &net->tx_ring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  549) 	struct device *dma_dev = tb_ring_dma_device(ring->ring);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  550) 	unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  551) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  552) 	for (i = 0; i < TBNET_RING_SIZE; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  553) 		struct tbnet_frame *tf = &ring->frames[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  554) 		dma_addr_t dma_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  555) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  556) 		tf->page = alloc_page(GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  557) 		if (!tf->page) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  558) 			tbnet_free_buffers(ring);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  559) 			return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  560) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  561) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  562) 		dma_addr = dma_map_page(dma_dev, tf->page, 0, TBNET_FRAME_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  563) 					DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  564) 		if (dma_mapping_error(dma_dev, dma_addr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  565) 			__free_page(tf->page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  566) 			tf->page = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  567) 			tbnet_free_buffers(ring);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  568) 			return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  569) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  570) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  571) 		tf->dev = net->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  572) 		tf->frame.buffer_phy = dma_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  573) 		tf->frame.callback = tbnet_tx_callback;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  574) 		tf->frame.sof = TBIP_PDF_FRAME_START;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  575) 		tf->frame.eof = TBIP_PDF_FRAME_END;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  576) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  577) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  578) 	ring->cons = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  579) 	ring->prod = TBNET_RING_SIZE - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  580) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  581) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  582) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  583) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  584) static void tbnet_connected_work(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  585) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  586) 	struct tbnet *net = container_of(work, typeof(*net), connected_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  587) 	bool connected;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  588) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  589) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  590) 	if (netif_carrier_ok(net->dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  591) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  592) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  593) 	mutex_lock(&net->connection_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  594) 	connected = net->login_sent && net->login_received;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  595) 	mutex_unlock(&net->connection_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  596) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  597) 	if (!connected)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  598) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  599) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  600) 	/* Both logins successful so enable the high-speed DMA paths and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  601) 	 * start the network device queue.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  602) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  603) 	ret = tb_xdomain_enable_paths(net->xd, TBNET_LOCAL_PATH,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  604) 				      net->rx_ring.ring->hop,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  605) 				      net->transmit_path,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  606) 				      net->tx_ring.ring->hop);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  607) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  608) 		netdev_err(net->dev, "failed to enable DMA paths\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  609) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  610) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  611) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  612) 	tb_ring_start(net->tx_ring.ring);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  613) 	tb_ring_start(net->rx_ring.ring);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  614) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  615) 	ret = tbnet_alloc_rx_buffers(net, TBNET_RING_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  616) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  617) 		goto err_stop_rings;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  618) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  619) 	ret = tbnet_alloc_tx_buffers(net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  620) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  621) 		goto err_free_rx_buffers;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  622) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  623) 	netif_carrier_on(net->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  624) 	netif_start_queue(net->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  625) 	return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  626) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  627) err_free_rx_buffers:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  628) 	tbnet_free_buffers(&net->rx_ring);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  629) err_stop_rings:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  630) 	tb_ring_stop(net->rx_ring.ring);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  631) 	tb_ring_stop(net->tx_ring.ring);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  632) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  633) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  634) static void tbnet_login_work(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  635) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  636) 	struct tbnet *net = container_of(work, typeof(*net), login_work.work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  637) 	unsigned long delay = msecs_to_jiffies(TBNET_LOGIN_DELAY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  638) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  639) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  640) 	if (netif_carrier_ok(net->dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  641) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  642) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  643) 	ret = tbnet_login_request(net, net->login_retries % 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  644) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  645) 		if (net->login_retries++ < TBNET_LOGIN_RETRIES) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  646) 			queue_delayed_work(system_long_wq, &net->login_work,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  647) 					   delay);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  648) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  649) 			netdev_info(net->dev, "ThunderboltIP login timed out\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  650) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  651) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  652) 		net->login_retries = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  653) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  654) 		mutex_lock(&net->connection_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  655) 		net->login_sent = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  656) 		mutex_unlock(&net->connection_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  657) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  658) 		queue_work(system_long_wq, &net->connected_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  659) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  660) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  661) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  662) static void tbnet_disconnect_work(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  663) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  664) 	struct tbnet *net = container_of(work, typeof(*net), disconnect_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  665) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  666) 	tbnet_tear_down(net, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  667) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  668) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  669) static bool tbnet_check_frame(struct tbnet *net, const struct tbnet_frame *tf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  670) 			      const struct thunderbolt_ip_frame_header *hdr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  671) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  672) 	u32 frame_id, frame_count, frame_size, frame_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  673) 	unsigned int size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  674) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  675) 	if (tf->frame.flags & RING_DESC_CRC_ERROR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  676) 		net->stats.rx_crc_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  677) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  678) 	} else if (tf->frame.flags & RING_DESC_BUFFER_OVERRUN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  679) 		net->stats.rx_over_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  680) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  681) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  682) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  683) 	/* Should be greater than just header i.e. contains data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  684) 	size = tbnet_frame_size(tf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  685) 	if (size <= sizeof(*hdr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  686) 		net->stats.rx_length_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  687) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  688) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  689) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  690) 	frame_count = le32_to_cpu(hdr->frame_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  691) 	frame_size = le32_to_cpu(hdr->frame_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  692) 	frame_index = le16_to_cpu(hdr->frame_index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  693) 	frame_id = le16_to_cpu(hdr->frame_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  694) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  695) 	if ((frame_size > size - sizeof(*hdr)) || !frame_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  696) 		net->stats.rx_length_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  697) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  698) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  699) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  700) 	/* In case we're in the middle of packet, validate the frame
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  701) 	 * header based on first fragment of the packet.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  702) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  703) 	if (net->skb && net->rx_hdr.frame_count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  704) 		/* Check the frame count fits the count field */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  705) 		if (frame_count != net->rx_hdr.frame_count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  706) 			net->stats.rx_length_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  707) 			return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  708) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  709) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  710) 		/* Check the frame identifiers are incremented correctly,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  711) 		 * and id is matching.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  712) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  713) 		if (frame_index != net->rx_hdr.frame_index + 1 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  714) 		    frame_id != net->rx_hdr.frame_id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  715) 			net->stats.rx_missed_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  716) 			return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  717) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  718) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  719) 		if (net->skb->len + frame_size > TBNET_MAX_MTU) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  720) 			net->stats.rx_length_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  721) 			return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  722) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  723) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  724) 		return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  725) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  726) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  727) 	/* Start of packet, validate the frame header */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  728) 	if (frame_count == 0 || frame_count > TBNET_RING_SIZE / 4) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  729) 		net->stats.rx_length_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  730) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  731) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  732) 	if (frame_index != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  733) 		net->stats.rx_missed_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  734) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  735) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  736) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  737) 	return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  738) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  739) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  740) static int tbnet_poll(struct napi_struct *napi, int budget)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  741) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  742) 	struct tbnet *net = container_of(napi, struct tbnet, napi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  743) 	unsigned int cleaned_count = tbnet_available_buffers(&net->rx_ring);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  744) 	struct device *dma_dev = tb_ring_dma_device(net->rx_ring.ring);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  745) 	unsigned int rx_packets = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  746) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  747) 	while (rx_packets < budget) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  748) 		const struct thunderbolt_ip_frame_header *hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  749) 		unsigned int hdr_size = sizeof(*hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  750) 		struct sk_buff *skb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  751) 		struct ring_frame *frame;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  752) 		struct tbnet_frame *tf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  753) 		struct page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  754) 		bool last = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  755) 		u32 frame_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  756) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  757) 		/* Return some buffers to hardware, one at a time is too
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  758) 		 * slow so allocate MAX_SKB_FRAGS buffers at the same
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  759) 		 * time.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  760) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  761) 		if (cleaned_count >= MAX_SKB_FRAGS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  762) 			tbnet_alloc_rx_buffers(net, cleaned_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  763) 			cleaned_count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  764) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  765) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  766) 		frame = tb_ring_poll(net->rx_ring.ring);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  767) 		if (!frame)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  768) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  769) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  770) 		dma_unmap_page(dma_dev, frame->buffer_phy,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  771) 			       TBNET_RX_PAGE_SIZE, DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  772) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  773) 		tf = container_of(frame, typeof(*tf), frame);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  774) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  775) 		page = tf->page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  776) 		tf->page = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  777) 		net->rx_ring.cons++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  778) 		cleaned_count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  779) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  780) 		hdr = page_address(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  781) 		if (!tbnet_check_frame(net, tf, hdr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  782) 			__free_pages(page, TBNET_RX_PAGE_ORDER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  783) 			dev_kfree_skb_any(net->skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  784) 			net->skb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  785) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  786) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  787) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  788) 		frame_size = le32_to_cpu(hdr->frame_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  789) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  790) 		skb = net->skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  791) 		if (!skb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  792) 			skb = build_skb(page_address(page),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  793) 					TBNET_RX_PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  794) 			if (!skb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  795) 				__free_pages(page, TBNET_RX_PAGE_ORDER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  796) 				net->stats.rx_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  797) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  798) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  799) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  800) 			skb_reserve(skb, hdr_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  801) 			skb_put(skb, frame_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  802) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  803) 			net->skb = skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  804) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  805) 			skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  806) 					page, hdr_size, frame_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  807) 					TBNET_RX_PAGE_SIZE - hdr_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  808) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  809) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  810) 		net->rx_hdr.frame_size = frame_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  811) 		net->rx_hdr.frame_count = le32_to_cpu(hdr->frame_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  812) 		net->rx_hdr.frame_index = le16_to_cpu(hdr->frame_index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  813) 		net->rx_hdr.frame_id = le16_to_cpu(hdr->frame_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  814) 		last = net->rx_hdr.frame_index == net->rx_hdr.frame_count - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  815) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  816) 		rx_packets++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  817) 		net->stats.rx_bytes += frame_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  818) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  819) 		if (last) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  820) 			skb->protocol = eth_type_trans(skb, net->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  821) 			napi_gro_receive(&net->napi, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  822) 			net->skb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  823) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  824) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  825) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  826) 	net->stats.rx_packets += rx_packets;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  827) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  828) 	if (cleaned_count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  829) 		tbnet_alloc_rx_buffers(net, cleaned_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  830) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  831) 	if (rx_packets >= budget)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  832) 		return budget;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  833) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  834) 	napi_complete_done(napi, rx_packets);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  835) 	/* Re-enable the ring interrupt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  836) 	tb_ring_poll_complete(net->rx_ring.ring);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  837) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  838) 	return rx_packets;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  839) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  840) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  841) static void tbnet_start_poll(void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  842) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  843) 	struct tbnet *net = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  844) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  845) 	napi_schedule(&net->napi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  846) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  847) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  848) static int tbnet_open(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  849) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  850) 	struct tbnet *net = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  851) 	struct tb_xdomain *xd = net->xd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  852) 	u16 sof_mask, eof_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  853) 	struct tb_ring *ring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  854) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  855) 	netif_carrier_off(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  856) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  857) 	ring = tb_ring_alloc_tx(xd->tb->nhi, -1, TBNET_RING_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  858) 				RING_FLAG_FRAME);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  859) 	if (!ring) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  860) 		netdev_err(dev, "failed to allocate Tx ring\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  861) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  862) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  863) 	net->tx_ring.ring = ring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  864) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  865) 	sof_mask = BIT(TBIP_PDF_FRAME_START);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  866) 	eof_mask = BIT(TBIP_PDF_FRAME_END);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  867) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  868) 	ring = tb_ring_alloc_rx(xd->tb->nhi, -1, TBNET_RING_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  869) 				RING_FLAG_FRAME, sof_mask, eof_mask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  870) 				tbnet_start_poll, net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  871) 	if (!ring) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  872) 		netdev_err(dev, "failed to allocate Rx ring\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  873) 		tb_ring_free(net->tx_ring.ring);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  874) 		net->tx_ring.ring = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  875) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  876) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  877) 	net->rx_ring.ring = ring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  878) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  879) 	napi_enable(&net->napi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  880) 	start_login(net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  881) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  882) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  883) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  884) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  885) static int tbnet_stop(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  886) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  887) 	struct tbnet *net = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  888) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  889) 	napi_disable(&net->napi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  890) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  891) 	cancel_work_sync(&net->disconnect_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  892) 	tbnet_tear_down(net, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  893) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  894) 	tb_ring_free(net->rx_ring.ring);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  895) 	net->rx_ring.ring = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  896) 	tb_ring_free(net->tx_ring.ring);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  897) 	net->tx_ring.ring = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  898) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  899) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  900) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  901) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  902) static bool tbnet_xmit_csum_and_map(struct tbnet *net, struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  903) 	struct tbnet_frame **frames, u32 frame_count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  904) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  905) 	struct thunderbolt_ip_frame_header *hdr = page_address(frames[0]->page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  906) 	struct device *dma_dev = tb_ring_dma_device(net->tx_ring.ring);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  907) 	__wsum wsum = htonl(skb->len - skb_transport_offset(skb));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  908) 	unsigned int i, len, offset = skb_transport_offset(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  909) 	__be16 protocol = skb->protocol;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  910) 	void *data = skb->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  911) 	void *dest = hdr + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  912) 	__sum16 *tucso;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  913) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  914) 	if (skb->ip_summed != CHECKSUM_PARTIAL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  915) 		/* No need to calculate checksum so we just update the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  916) 		 * total frame count and sync the frames for DMA.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  917) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  918) 		for (i = 0; i < frame_count; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  919) 			hdr = page_address(frames[i]->page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  920) 			hdr->frame_count = cpu_to_le32(frame_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  921) 			dma_sync_single_for_device(dma_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  922) 				frames[i]->frame.buffer_phy,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  923) 				tbnet_frame_size(frames[i]), DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  924) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  925) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  926) 		return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  927) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  928) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  929) 	if (protocol == htons(ETH_P_8021Q)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  930) 		struct vlan_hdr *vhdr, vh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  931) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  932) 		vhdr = skb_header_pointer(skb, ETH_HLEN, sizeof(vh), &vh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  933) 		if (!vhdr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  934) 			return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  935) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  936) 		protocol = vhdr->h_vlan_encapsulated_proto;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  937) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  938) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  939) 	/* Data points on the beginning of packet.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  940) 	 * Check is the checksum absolute place in the packet.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  941) 	 * ipcso will update IP checksum.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  942) 	 * tucso will update TCP/UPD checksum.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  943) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  944) 	if (protocol == htons(ETH_P_IP)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  945) 		__sum16 *ipcso = dest + ((void *)&(ip_hdr(skb)->check) - data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  946) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  947) 		*ipcso = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  948) 		*ipcso = ip_fast_csum(dest + skb_network_offset(skb),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  949) 				      ip_hdr(skb)->ihl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  950) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  951) 		if (ip_hdr(skb)->protocol == IPPROTO_TCP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  952) 			tucso = dest + ((void *)&(tcp_hdr(skb)->check) - data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  953) 		else if (ip_hdr(skb)->protocol == IPPROTO_UDP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  954) 			tucso = dest + ((void *)&(udp_hdr(skb)->check) - data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  955) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  956) 			return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  957) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  958) 		*tucso = ~csum_tcpudp_magic(ip_hdr(skb)->saddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  959) 					    ip_hdr(skb)->daddr, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  960) 					    ip_hdr(skb)->protocol, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  961) 	} else if (skb_is_gso_v6(skb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  962) 		tucso = dest + ((void *)&(tcp_hdr(skb)->check) - data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  963) 		*tucso = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  964) 					  &ipv6_hdr(skb)->daddr, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  965) 					  IPPROTO_TCP, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  966) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  967) 	} else if (protocol == htons(ETH_P_IPV6)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  968) 		tucso = dest + skb_checksum_start_offset(skb) + skb->csum_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  969) 		*tucso = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  970) 					  &ipv6_hdr(skb)->daddr, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  971) 					  ipv6_hdr(skb)->nexthdr, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  972) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  973) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  974) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  975) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  976) 	/* First frame was headers, rest of the frames contain data.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  977) 	 * Calculate checksum over each frame.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  978) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  979) 	for (i = 0; i < frame_count; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  980) 		hdr = page_address(frames[i]->page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  981) 		dest = (void *)(hdr + 1) + offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  982) 		len = le32_to_cpu(hdr->frame_size) - offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  983) 		wsum = csum_partial(dest, len, wsum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  984) 		hdr->frame_count = cpu_to_le32(frame_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  985) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  986) 		offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  987) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  988) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  989) 	*tucso = csum_fold(wsum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  990) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  991) 	/* Checksum is finally calculated and we don't touch the memory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  992) 	 * anymore, so DMA sync the frames now.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  993) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  994) 	for (i = 0; i < frame_count; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  995) 		dma_sync_single_for_device(dma_dev, frames[i]->frame.buffer_phy,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  996) 			tbnet_frame_size(frames[i]), DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  997) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  998) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  999) 	return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) static void *tbnet_kmap_frag(struct sk_buff *skb, unsigned int frag_num,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) 			     unsigned int *len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) 	const skb_frag_t *frag = &skb_shinfo(skb)->frags[frag_num];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) 	*len = skb_frag_size(frag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) 	return kmap_atomic(skb_frag_page(frag)) + skb_frag_off(frag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) static netdev_tx_t tbnet_start_xmit(struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) 				    struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) 	struct tbnet *net = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) 	struct tbnet_frame *frames[MAX_SKB_FRAGS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) 	u16 frame_id = atomic_read(&net->frame_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) 	struct thunderbolt_ip_frame_header *hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) 	unsigned int len = skb_headlen(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) 	unsigned int data_len = skb->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) 	unsigned int nframes, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) 	unsigned int frag = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) 	void *src = skb->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) 	u32 frame_index = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) 	bool unmap = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) 	void *dest;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) 	nframes = DIV_ROUND_UP(data_len, TBNET_MAX_PAYLOAD_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) 	if (tbnet_available_buffers(&net->tx_ring) < nframes) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) 		netif_stop_queue(net->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) 		return NETDEV_TX_BUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) 	frames[frame_index] = tbnet_get_tx_buffer(net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) 	if (!frames[frame_index])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) 		goto err_drop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) 	hdr = page_address(frames[frame_index]->page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) 	dest = hdr + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) 	/* If overall packet is bigger than the frame data size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) 	while (data_len > TBNET_MAX_PAYLOAD_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) 		unsigned int size_left = TBNET_MAX_PAYLOAD_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) 		hdr->frame_size = cpu_to_le32(TBNET_MAX_PAYLOAD_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) 		hdr->frame_index = cpu_to_le16(frame_index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) 		hdr->frame_id = cpu_to_le16(frame_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) 		do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) 			if (len > size_left) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) 				/* Copy data onto Tx buffer data with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) 				 * full frame size then break and go to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) 				 * next frame
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) 				 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) 				memcpy(dest, src, size_left);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) 				len -= size_left;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) 				dest += size_left;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) 				src += size_left;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) 			memcpy(dest, src, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) 			size_left -= len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) 			dest += len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) 			if (unmap) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) 				kunmap_atomic(src);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) 				unmap = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) 			/* Ensure all fragments have been processed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) 			if (frag < skb_shinfo(skb)->nr_frags) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) 				/* Map and then unmap quickly */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) 				src = tbnet_kmap_frag(skb, frag++, &len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) 				unmap = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) 			} else if (unlikely(size_left > 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) 				goto err_drop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) 		} while (size_left > 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) 		data_len -= TBNET_MAX_PAYLOAD_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) 		frame_index++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) 		frames[frame_index] = tbnet_get_tx_buffer(net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) 		if (!frames[frame_index])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) 			goto err_drop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) 		hdr = page_address(frames[frame_index]->page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) 		dest = hdr + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) 	hdr->frame_size = cpu_to_le32(data_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) 	hdr->frame_index = cpu_to_le16(frame_index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) 	hdr->frame_id = cpu_to_le16(frame_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) 	frames[frame_index]->frame.size = data_len + sizeof(*hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) 	/* In case the remaining data_len is smaller than a frame */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) 	while (len < data_len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) 		memcpy(dest, src, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) 		data_len -= len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) 		dest += len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) 		if (unmap) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) 			kunmap_atomic(src);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) 			unmap = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) 		if (frag < skb_shinfo(skb)->nr_frags) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) 			src = tbnet_kmap_frag(skb, frag++, &len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) 			unmap = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) 		} else if (unlikely(data_len > 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) 			goto err_drop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) 	memcpy(dest, src, data_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) 	if (unmap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) 		kunmap_atomic(src);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) 	if (!tbnet_xmit_csum_and_map(net, skb, frames, frame_index + 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) 		goto err_drop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) 	for (i = 0; i < frame_index + 1; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) 		tb_ring_tx(net->tx_ring.ring, &frames[i]->frame);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) 	if (net->svc->prtcstns & TBNET_MATCH_FRAGS_ID)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) 		atomic_inc(&net->frame_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) 	net->stats.tx_packets++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) 	net->stats.tx_bytes += skb->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) 	dev_consume_skb_any(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) 	return NETDEV_TX_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) err_drop:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) 	/* We can re-use the buffers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) 	net->tx_ring.cons -= frame_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) 	dev_kfree_skb_any(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) 	net->stats.tx_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) 	return NETDEV_TX_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) static void tbnet_get_stats64(struct net_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) 			      struct rtnl_link_stats64 *stats)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) 	struct tbnet *net = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) 	stats->tx_packets = net->stats.tx_packets;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) 	stats->rx_packets = net->stats.rx_packets;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) 	stats->tx_bytes = net->stats.tx_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) 	stats->rx_bytes = net->stats.rx_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) 	stats->rx_errors = net->stats.rx_errors + net->stats.rx_length_errors +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) 		net->stats.rx_over_errors + net->stats.rx_crc_errors +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) 		net->stats.rx_missed_errors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) 	stats->tx_errors = net->stats.tx_errors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) 	stats->rx_length_errors = net->stats.rx_length_errors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) 	stats->rx_over_errors = net->stats.rx_over_errors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) 	stats->rx_crc_errors = net->stats.rx_crc_errors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) 	stats->rx_missed_errors = net->stats.rx_missed_errors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) static const struct net_device_ops tbnet_netdev_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) 	.ndo_open = tbnet_open,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) 	.ndo_stop = tbnet_stop,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) 	.ndo_start_xmit = tbnet_start_xmit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) 	.ndo_get_stats64 = tbnet_get_stats64,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) static void tbnet_generate_mac(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) 	const struct tbnet *net = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) 	const struct tb_xdomain *xd = net->xd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) 	u8 phy_port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) 	u32 hash;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) 	phy_port = tb_phy_port_from_link(TBNET_L0_PORT_NUM(xd->route));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) 	/* Unicast and locally administered MAC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) 	dev->dev_addr[0] = phy_port << 4 | 0x02;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) 	hash = jhash2((u32 *)xd->local_uuid, 4, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) 	memcpy(dev->dev_addr + 1, &hash, sizeof(hash));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) 	hash = jhash2((u32 *)xd->local_uuid, 4, hash);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) 	dev->dev_addr[5] = hash & 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) static int tbnet_probe(struct tb_service *svc, const struct tb_service_id *id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) 	struct tb_xdomain *xd = tb_service_parent(svc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) 	struct net_device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) 	struct tbnet *net;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) 	dev = alloc_etherdev(sizeof(*net));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) 	if (!dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) 	SET_NETDEV_DEV(dev, &svc->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) 	net = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) 	INIT_DELAYED_WORK(&net->login_work, tbnet_login_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) 	INIT_WORK(&net->connected_work, tbnet_connected_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) 	INIT_WORK(&net->disconnect_work, tbnet_disconnect_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) 	mutex_init(&net->connection_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) 	atomic_set(&net->command_id, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) 	atomic_set(&net->frame_id, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) 	net->svc = svc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) 	net->dev = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) 	net->xd = xd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) 	tbnet_generate_mac(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) 	strcpy(dev->name, "thunderbolt%d");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) 	dev->netdev_ops = &tbnet_netdev_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) 	/* ThunderboltIP takes advantage of TSO packets but instead of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) 	 * segmenting them we just split the packet into Thunderbolt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) 	 * frames (maximum payload size of each frame is 4084 bytes) and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) 	 * calculate checksum over the whole packet here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) 	 * The receiving side does the opposite if the host OS supports
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) 	 * LRO, otherwise it needs to split the large packet into MTU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) 	 * sized smaller packets.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) 	 * In order to receive large packets from the networking stack,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) 	 * we need to announce support for most of the offloading
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) 	 * features here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) 	dev->hw_features = NETIF_F_SG | NETIF_F_ALL_TSO | NETIF_F_GRO |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) 			   NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) 	dev->features = dev->hw_features | NETIF_F_HIGHDMA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) 	dev->hard_header_len += sizeof(struct thunderbolt_ip_frame_header);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) 	netif_napi_add(dev, &net->napi, tbnet_poll, NAPI_POLL_WEIGHT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) 	/* MTU range: 68 - 65522 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) 	dev->min_mtu = ETH_MIN_MTU;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) 	dev->max_mtu = TBNET_MAX_MTU - ETH_HLEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) 	net->handler.uuid = &tbnet_svc_uuid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) 	net->handler.callback = tbnet_handle_packet,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) 	net->handler.data = net;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) 	tb_register_protocol_handler(&net->handler);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) 	tb_service_set_drvdata(svc, net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) 	ret = register_netdev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) 		tb_unregister_protocol_handler(&net->handler);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) 		free_netdev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) static void tbnet_remove(struct tb_service *svc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) 	struct tbnet *net = tb_service_get_drvdata(svc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) 	unregister_netdev(net->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) 	tb_unregister_protocol_handler(&net->handler);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) 	free_netdev(net->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) static void tbnet_shutdown(struct tb_service *svc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) 	tbnet_tear_down(tb_service_get_drvdata(svc), true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) static int __maybe_unused tbnet_suspend(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) 	struct tb_service *svc = tb_to_service(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) 	struct tbnet *net = tb_service_get_drvdata(svc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) 	stop_login(net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) 	if (netif_running(net->dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) 		netif_device_detach(net->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) 		tbnet_tear_down(net, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) 	tb_unregister_protocol_handler(&net->handler);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) static int __maybe_unused tbnet_resume(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) 	struct tb_service *svc = tb_to_service(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) 	struct tbnet *net = tb_service_get_drvdata(svc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) 	tb_register_protocol_handler(&net->handler);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) 	netif_carrier_off(net->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) 	if (netif_running(net->dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) 		netif_device_attach(net->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) 		start_login(net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) static const struct dev_pm_ops tbnet_pm_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) 	SET_SYSTEM_SLEEP_PM_OPS(tbnet_suspend, tbnet_resume)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) static const struct tb_service_id tbnet_ids[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) 	{ TB_SERVICE("network", 1) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) 	{ },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) MODULE_DEVICE_TABLE(tbsvc, tbnet_ids);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) static struct tb_service_driver tbnet_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) 	.driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) 		.owner = THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) 		.name = "thunderbolt-net",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) 		.pm = &tbnet_pm_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) 	.probe = tbnet_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) 	.remove = tbnet_remove,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) 	.shutdown = tbnet_shutdown,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) 	.id_table = tbnet_ids,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) static int __init tbnet_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) 	tbnet_dir = tb_property_create_dir(&tbnet_dir_uuid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) 	if (!tbnet_dir)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) 	tb_property_add_immediate(tbnet_dir, "prtcid", 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) 	tb_property_add_immediate(tbnet_dir, "prtcvers", 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) 	tb_property_add_immediate(tbnet_dir, "prtcrevs", 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) 	/* Currently only announce support for match frags ID (bit 1). Bit 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) 	 * is reserved for full E2E flow control which we do not support at
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) 	 * the moment.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) 	tb_property_add_immediate(tbnet_dir, "prtcstns",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) 				  TBNET_MATCH_FRAGS_ID);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) 	ret = tb_register_property_dir("network", tbnet_dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) 		tb_property_free_dir(tbnet_dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) 	return tb_register_service_driver(&tbnet_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) module_init(tbnet_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) static void __exit tbnet_exit(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) 	tb_unregister_service_driver(&tbnet_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) 	tb_unregister_property_dir("network", tbnet_dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) 	tb_property_free_dir(tbnet_dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) module_exit(tbnet_exit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) MODULE_AUTHOR("Amir Levy <amir.jer.levy@intel.com>");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) MODULE_AUTHOR("Michael Jamet <michael.jamet@intel.com>");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) MODULE_AUTHOR("Mika Westerberg <mika.westerberg@linux.intel.com>");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) MODULE_DESCRIPTION("Thunderbolt network driver");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) MODULE_LICENSE("GPL v2");