Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    3)  * USB4 specific functionality
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    4)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    5)  * Copyright (C) 2019, Intel Corporation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    6)  * Authors: Mika Westerberg <mika.westerberg@linux.intel.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    7)  *	    Rajmohan Mani <rajmohan.mani@intel.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    8)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    9) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   10) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   11) #include <linux/ktime.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   12) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   13) #include "sb_regs.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   14) #include "tb.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   15) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   16) #define USB4_DATA_DWORDS		16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   17) #define USB4_DATA_RETRIES		3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   18) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   19) enum usb4_switch_op {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   20) 	USB4_SWITCH_OP_QUERY_DP_RESOURCE = 0x10,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   21) 	USB4_SWITCH_OP_ALLOC_DP_RESOURCE = 0x11,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   22) 	USB4_SWITCH_OP_DEALLOC_DP_RESOURCE = 0x12,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   23) 	USB4_SWITCH_OP_NVM_WRITE = 0x20,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   24) 	USB4_SWITCH_OP_NVM_AUTH = 0x21,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   25) 	USB4_SWITCH_OP_NVM_READ = 0x22,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   26) 	USB4_SWITCH_OP_NVM_SET_OFFSET = 0x23,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   27) 	USB4_SWITCH_OP_DROM_READ = 0x24,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   28) 	USB4_SWITCH_OP_NVM_SECTOR_SIZE = 0x25,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   29) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   30) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   31) enum usb4_sb_target {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   32) 	USB4_SB_TARGET_ROUTER,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   33) 	USB4_SB_TARGET_PARTNER,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   34) 	USB4_SB_TARGET_RETIMER,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   35) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   36) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   37) #define USB4_NVM_READ_OFFSET_MASK	GENMASK(23, 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   38) #define USB4_NVM_READ_OFFSET_SHIFT	2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   39) #define USB4_NVM_READ_LENGTH_MASK	GENMASK(27, 24)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   40) #define USB4_NVM_READ_LENGTH_SHIFT	24
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   41) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   42) #define USB4_NVM_SET_OFFSET_MASK	USB4_NVM_READ_OFFSET_MASK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   43) #define USB4_NVM_SET_OFFSET_SHIFT	USB4_NVM_READ_OFFSET_SHIFT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   44) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   45) #define USB4_DROM_ADDRESS_MASK		GENMASK(14, 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   46) #define USB4_DROM_ADDRESS_SHIFT		2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   47) #define USB4_DROM_SIZE_MASK		GENMASK(19, 15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   48) #define USB4_DROM_SIZE_SHIFT		15
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   49) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   50) #define USB4_NVM_SECTOR_SIZE_MASK	GENMASK(23, 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   51) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   52) typedef int (*read_block_fn)(void *, unsigned int, void *, size_t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   53) typedef int (*write_block_fn)(void *, const void *, size_t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   54) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   55) static int usb4_switch_wait_for_bit(struct tb_switch *sw, u32 offset, u32 bit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   56) 				    u32 value, int timeout_msec)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   57) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   58) 	ktime_t timeout = ktime_add_ms(ktime_get(), timeout_msec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   59) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   60) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   61) 		u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   62) 		int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   63) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   64) 		ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, offset, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   65) 		if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   66) 			return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   67) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   68) 		if ((val & bit) == value)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   69) 			return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   70) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   71) 		usleep_range(50, 100);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   72) 	} while (ktime_before(ktime_get(), timeout));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   73) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   74) 	return -ETIMEDOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   75) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   76) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   77) static int usb4_switch_op_read_data(struct tb_switch *sw, void *data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   78) 				    size_t dwords)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   79) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   80) 	if (dwords > USB4_DATA_DWORDS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   81) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   82) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   83) 	return tb_sw_read(sw, data, TB_CFG_SWITCH, ROUTER_CS_9, dwords);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   84) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   85) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   86) static int usb4_switch_op_write_data(struct tb_switch *sw, const void *data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   87) 				     size_t dwords)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   88) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   89) 	if (dwords > USB4_DATA_DWORDS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   90) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   91) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   92) 	return tb_sw_write(sw, data, TB_CFG_SWITCH, ROUTER_CS_9, dwords);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   93) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   94) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   95) static int usb4_switch_op_read_metadata(struct tb_switch *sw, u32 *metadata)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   96) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   97) 	return tb_sw_read(sw, metadata, TB_CFG_SWITCH, ROUTER_CS_25, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   98) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   99) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  100) static int usb4_switch_op_write_metadata(struct tb_switch *sw, u32 metadata)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  101) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  102) 	return tb_sw_write(sw, &metadata, TB_CFG_SWITCH, ROUTER_CS_25, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  103) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  104) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  105) static int usb4_do_read_data(u16 address, void *buf, size_t size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  106) 			     read_block_fn read_block, void *read_block_data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  107) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  108) 	unsigned int retries = USB4_DATA_RETRIES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  109) 	unsigned int offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  110) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  111) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  112) 		unsigned int dwaddress, dwords;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  113) 		u8 data[USB4_DATA_DWORDS * 4];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  114) 		size_t nbytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  115) 		int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  116) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  117) 		offset = address & 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  118) 		nbytes = min_t(size_t, size + offset, USB4_DATA_DWORDS * 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  119) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  120) 		dwaddress = address / 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  121) 		dwords = ALIGN(nbytes, 4) / 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  122) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  123) 		ret = read_block(read_block_data, dwaddress, data, dwords);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  124) 		if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  125) 			if (ret != -ENODEV && retries--)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  126) 				continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  127) 			return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  128) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  129) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  130) 		nbytes -= offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  131) 		memcpy(buf, data + offset, nbytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  132) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  133) 		size -= nbytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  134) 		address += nbytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  135) 		buf += nbytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  136) 	} while (size > 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  137) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  138) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  139) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  140) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  141) static int usb4_do_write_data(unsigned int address, const void *buf, size_t size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  142) 	write_block_fn write_next_block, void *write_block_data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  143) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  144) 	unsigned int retries = USB4_DATA_RETRIES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  145) 	unsigned int offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  146) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  147) 	offset = address & 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  148) 	address = address & ~3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  149) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  150) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  151) 		u32 nbytes = min_t(u32, size, USB4_DATA_DWORDS * 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  152) 		u8 data[USB4_DATA_DWORDS * 4];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  153) 		int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  154) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  155) 		memcpy(data + offset, buf, nbytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  156) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  157) 		ret = write_next_block(write_block_data, data, nbytes / 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  158) 		if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  159) 			if (ret == -ETIMEDOUT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  160) 				if (retries--)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  161) 					continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  162) 				ret = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  163) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  164) 			return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  165) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  166) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  167) 		size -= nbytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  168) 		address += nbytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  169) 		buf += nbytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  170) 	} while (size > 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  171) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  172) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  173) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  174) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  175) static int usb4_switch_op(struct tb_switch *sw, u16 opcode, u8 *status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  176) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  177) 	u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  178) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  179) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  180) 	val = opcode | ROUTER_CS_26_OV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  181) 	ret = tb_sw_write(sw, &val, TB_CFG_SWITCH, ROUTER_CS_26, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  182) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  183) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  184) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  185) 	ret = usb4_switch_wait_for_bit(sw, ROUTER_CS_26, ROUTER_CS_26_OV, 0, 500);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  186) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  187) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  188) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  189) 	ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, ROUTER_CS_26, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  190) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  191) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  192) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  193) 	if (val & ROUTER_CS_26_ONS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  194) 		return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  195) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  196) 	*status = (val & ROUTER_CS_26_STATUS_MASK) >> ROUTER_CS_26_STATUS_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  197) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  198) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  199) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  200) static void usb4_switch_check_wakes(struct tb_switch *sw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  201) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  202) 	struct tb_port *port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  203) 	bool wakeup = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  204) 	u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  205) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  206) 	if (!device_may_wakeup(&sw->dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  207) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  208) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  209) 	if (tb_route(sw)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  210) 		if (tb_sw_read(sw, &val, TB_CFG_SWITCH, ROUTER_CS_6, 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  211) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  212) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  213) 		tb_sw_dbg(sw, "PCIe wake: %s, USB3 wake: %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  214) 			  (val & ROUTER_CS_6_WOPS) ? "yes" : "no",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  215) 			  (val & ROUTER_CS_6_WOUS) ? "yes" : "no");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  216) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  217) 		wakeup = val & (ROUTER_CS_6_WOPS | ROUTER_CS_6_WOUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  218) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  219) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  220) 	/* Check for any connected downstream ports for USB4 wake */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  221) 	tb_switch_for_each_port(sw, port) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  222) 		if (!tb_port_has_remote(port))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  223) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  224) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  225) 		if (tb_port_read(port, &val, TB_CFG_PORT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  226) 				 port->cap_usb4 + PORT_CS_18, 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  227) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  228) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  229) 		tb_port_dbg(port, "USB4 wake: %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  230) 			    (val & PORT_CS_18_WOU4S) ? "yes" : "no");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  231) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  232) 		if (val & PORT_CS_18_WOU4S)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  233) 			wakeup = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  234) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  235) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  236) 	if (wakeup)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  237) 		pm_wakeup_event(&sw->dev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  238) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  239) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  240) static bool link_is_usb4(struct tb_port *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  241) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  242) 	u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  243) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  244) 	if (!port->cap_usb4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  245) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  246) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  247) 	if (tb_port_read(port, &val, TB_CFG_PORT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  248) 			 port->cap_usb4 + PORT_CS_18, 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  249) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  250) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  251) 	return !(val & PORT_CS_18_TCM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  252) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  253) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  254) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  255)  * usb4_switch_setup() - Additional setup for USB4 device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  256)  * @sw: USB4 router to setup
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  257)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  258)  * USB4 routers need additional settings in order to enable all the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  259)  * tunneling. This function enables USB and PCIe tunneling if it can be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  260)  * enabled (e.g the parent switch also supports them). If USB tunneling
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  261)  * is not available for some reason (like that there is Thunderbolt 3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  262)  * switch upstream) then the internal xHCI controller is enabled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  263)  * instead.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  264)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  265) int usb4_switch_setup(struct tb_switch *sw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  266) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  267) 	struct tb_port *downstream_port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  268) 	struct tb_switch *parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  269) 	bool tbt3, xhci;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  270) 	u32 val = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  271) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  272) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  273) 	usb4_switch_check_wakes(sw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  274) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  275) 	if (!tb_route(sw))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  276) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  277) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  278) 	ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, ROUTER_CS_6, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  279) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  280) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  281) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  282) 	parent = tb_switch_parent(sw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  283) 	downstream_port = tb_port_at(tb_route(sw), parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  284) 	sw->link_usb4 = link_is_usb4(downstream_port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  285) 	tb_sw_dbg(sw, "link: %s\n", sw->link_usb4 ? "USB4" : "TBT3");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  286) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  287) 	xhci = val & ROUTER_CS_6_HCI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  288) 	tbt3 = !(val & ROUTER_CS_6_TNS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  289) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  290) 	tb_sw_dbg(sw, "TBT3 support: %s, xHCI: %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  291) 		  tbt3 ? "yes" : "no", xhci ? "yes" : "no");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  292) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  293) 	ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, ROUTER_CS_5, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  294) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  295) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  296) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  297) 	if (sw->link_usb4 && tb_switch_find_port(parent, TB_TYPE_USB3_DOWN)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  298) 		val |= ROUTER_CS_5_UTO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  299) 		xhci = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  300) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  301) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  302) 	/* Only enable PCIe tunneling if the parent router supports it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  303) 	if (tb_switch_find_port(parent, TB_TYPE_PCIE_DOWN)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  304) 		val |= ROUTER_CS_5_PTO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  305) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  306) 		 * xHCI can be enabled if PCIe tunneling is supported
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  307) 		 * and the parent does not have any USB3 dowstream
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  308) 		 * adapters (so we cannot do USB 3.x tunneling).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  309) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  310) 		if (xhci)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  311) 			val |= ROUTER_CS_5_HCO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  312) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  313) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  314) 	/* TBT3 supported by the CM */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  315) 	val |= ROUTER_CS_5_C3S;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  316) 	/* Tunneling configuration is ready now */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  317) 	val |= ROUTER_CS_5_CV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  318) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  319) 	ret = tb_sw_write(sw, &val, TB_CFG_SWITCH, ROUTER_CS_5, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  320) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  321) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  322) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  323) 	return usb4_switch_wait_for_bit(sw, ROUTER_CS_6, ROUTER_CS_6_CR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  324) 					ROUTER_CS_6_CR, 50);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  325) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  326) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  327) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  328)  * usb4_switch_read_uid() - Read UID from USB4 router
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  329)  * @sw: USB4 router
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  330)  * @uid: UID is stored here
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  331)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  332)  * Reads 64-bit UID from USB4 router config space.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  333)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  334) int usb4_switch_read_uid(struct tb_switch *sw, u64 *uid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  335) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  336) 	return tb_sw_read(sw, uid, TB_CFG_SWITCH, ROUTER_CS_7, 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  337) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  338) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  339) static int usb4_switch_drom_read_block(void *data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  340) 				       unsigned int dwaddress, void *buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  341) 				       size_t dwords)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  342) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  343) 	struct tb_switch *sw = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  344) 	u8 status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  345) 	u32 metadata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  346) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  347) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  348) 	metadata = (dwords << USB4_DROM_SIZE_SHIFT) & USB4_DROM_SIZE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  349) 	metadata |= (dwaddress << USB4_DROM_ADDRESS_SHIFT) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  350) 		USB4_DROM_ADDRESS_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  351) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  352) 	ret = usb4_switch_op_write_metadata(sw, metadata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  353) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  354) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  355) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  356) 	ret = usb4_switch_op(sw, USB4_SWITCH_OP_DROM_READ, &status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  357) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  358) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  359) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  360) 	if (status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  361) 		return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  362) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  363) 	return usb4_switch_op_read_data(sw, buf, dwords);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  364) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  365) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  366) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  367)  * usb4_switch_drom_read() - Read arbitrary bytes from USB4 router DROM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  368)  * @sw: USB4 router
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  369)  * @address: Byte address inside DROM to start reading
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  370)  * @buf: Buffer where the DROM content is stored
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  371)  * @size: Number of bytes to read from DROM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  372)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  373)  * Uses USB4 router operations to read router DROM. For devices this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  374)  * should always work but for hosts it may return %-EOPNOTSUPP in which
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  375)  * case the host router does not have DROM.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  376)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  377) int usb4_switch_drom_read(struct tb_switch *sw, unsigned int address, void *buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  378) 			  size_t size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  379) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  380) 	return usb4_do_read_data(address, buf, size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  381) 				 usb4_switch_drom_read_block, sw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  382) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  383) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  384) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  385)  * usb4_switch_lane_bonding_possible() - Are conditions met for lane bonding
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  386)  * @sw: USB4 router
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  387)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  388)  * Checks whether conditions are met so that lane bonding can be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  389)  * established with the upstream router. Call only for device routers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  390)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  391) bool usb4_switch_lane_bonding_possible(struct tb_switch *sw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  392) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  393) 	struct tb_port *up;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  394) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  395) 	u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  396) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  397) 	up = tb_upstream_port(sw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  398) 	ret = tb_port_read(up, &val, TB_CFG_PORT, up->cap_usb4 + PORT_CS_18, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  399) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  400) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  401) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  402) 	return !!(val & PORT_CS_18_BE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  403) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  404) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  405) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  406)  * usb4_switch_set_wake() - Enabled/disable wake
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  407)  * @sw: USB4 router
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  408)  * @flags: Wakeup flags (%0 to disable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  409)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  410)  * Enables/disables router to wake up from sleep.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  411)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  412) int usb4_switch_set_wake(struct tb_switch *sw, unsigned int flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  413) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  414) 	struct tb_port *port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  415) 	u64 route = tb_route(sw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  416) 	u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  417) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  418) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  419) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  420) 	 * Enable wakes coming from all USB4 downstream ports (from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  421) 	 * child routers). For device routers do this also for the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  422) 	 * upstream USB4 port.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  423) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  424) 	tb_switch_for_each_port(sw, port) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  425) 		if (!tb_port_is_null(port))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  426) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  427) 		if (!route && tb_is_upstream_port(port))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  428) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  429) 		if (!port->cap_usb4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  430) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  431) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  432) 		ret = tb_port_read(port, &val, TB_CFG_PORT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  433) 				   port->cap_usb4 + PORT_CS_19, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  434) 		if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  435) 			return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  436) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  437) 		val &= ~(PORT_CS_19_WOC | PORT_CS_19_WOD | PORT_CS_19_WOU4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  438) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  439) 		if (flags & TB_WAKE_ON_CONNECT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  440) 			val |= PORT_CS_19_WOC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  441) 		if (flags & TB_WAKE_ON_DISCONNECT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  442) 			val |= PORT_CS_19_WOD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  443) 		if (flags & TB_WAKE_ON_USB4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  444) 			val |= PORT_CS_19_WOU4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  445) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  446) 		ret = tb_port_write(port, &val, TB_CFG_PORT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  447) 				    port->cap_usb4 + PORT_CS_19, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  448) 		if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  449) 			return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  450) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  451) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  452) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  453) 	 * Enable wakes from PCIe and USB 3.x on this router. Only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  454) 	 * needed for device routers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  455) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  456) 	if (route) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  457) 		ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, ROUTER_CS_5, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  458) 		if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  459) 			return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  460) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  461) 		val &= ~(ROUTER_CS_5_WOP | ROUTER_CS_5_WOU);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  462) 		if (flags & TB_WAKE_ON_USB3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  463) 			val |= ROUTER_CS_5_WOU;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  464) 		if (flags & TB_WAKE_ON_PCIE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  465) 			val |= ROUTER_CS_5_WOP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  466) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  467) 		ret = tb_sw_write(sw, &val, TB_CFG_SWITCH, ROUTER_CS_5, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  468) 		if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  469) 			return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  470) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  471) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  472) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  473) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  474) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  475) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  476)  * usb4_switch_set_sleep() - Prepare the router to enter sleep
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  477)  * @sw: USB4 router
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  478)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  479)  * Sets sleep bit for the router. Returns when the router sleep ready
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  480)  * bit has been asserted.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  481)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  482) int usb4_switch_set_sleep(struct tb_switch *sw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  483) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  484) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  485) 	u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  486) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  487) 	/* Set sleep bit and wait for sleep ready to be asserted */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  488) 	ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, ROUTER_CS_5, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  489) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  490) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  491) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  492) 	val |= ROUTER_CS_5_SLP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  493) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  494) 	ret = tb_sw_write(sw, &val, TB_CFG_SWITCH, ROUTER_CS_5, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  495) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  496) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  497) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  498) 	return usb4_switch_wait_for_bit(sw, ROUTER_CS_6, ROUTER_CS_6_SLPR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  499) 					ROUTER_CS_6_SLPR, 500);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  500) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  501) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  502) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  503)  * usb4_switch_nvm_sector_size() - Return router NVM sector size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  504)  * @sw: USB4 router
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  505)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  506)  * If the router supports NVM operations this function returns the NVM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  507)  * sector size in bytes. If NVM operations are not supported returns
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  508)  * %-EOPNOTSUPP.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  509)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  510) int usb4_switch_nvm_sector_size(struct tb_switch *sw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  511) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  512) 	u32 metadata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  513) 	u8 status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  514) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  515) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  516) 	ret = usb4_switch_op(sw, USB4_SWITCH_OP_NVM_SECTOR_SIZE, &status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  517) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  518) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  519) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  520) 	if (status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  521) 		return status == 0x2 ? -EOPNOTSUPP : -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  522) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  523) 	ret = usb4_switch_op_read_metadata(sw, &metadata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  524) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  525) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  526) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  527) 	return metadata & USB4_NVM_SECTOR_SIZE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  528) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  529) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  530) static int usb4_switch_nvm_read_block(void *data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  531) 	unsigned int dwaddress, void *buf, size_t dwords)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  532) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  533) 	struct tb_switch *sw = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  534) 	u8 status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  535) 	u32 metadata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  536) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  537) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  538) 	metadata = (dwords << USB4_NVM_READ_LENGTH_SHIFT) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  539) 		   USB4_NVM_READ_LENGTH_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  540) 	metadata |= (dwaddress << USB4_NVM_READ_OFFSET_SHIFT) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  541) 		   USB4_NVM_READ_OFFSET_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  542) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  543) 	ret = usb4_switch_op_write_metadata(sw, metadata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  544) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  545) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  546) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  547) 	ret = usb4_switch_op(sw, USB4_SWITCH_OP_NVM_READ, &status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  548) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  549) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  550) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  551) 	if (status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  552) 		return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  553) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  554) 	return usb4_switch_op_read_data(sw, buf, dwords);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  555) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  556) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  557) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  558)  * usb4_switch_nvm_read() - Read arbitrary bytes from router NVM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  559)  * @sw: USB4 router
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  560)  * @address: Starting address in bytes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  561)  * @buf: Read data is placed here
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  562)  * @size: How many bytes to read
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  563)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  564)  * Reads NVM contents of the router. If NVM is not supported returns
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  565)  * %-EOPNOTSUPP.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  566)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  567) int usb4_switch_nvm_read(struct tb_switch *sw, unsigned int address, void *buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  568) 			 size_t size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  569) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  570) 	return usb4_do_read_data(address, buf, size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  571) 				 usb4_switch_nvm_read_block, sw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  572) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  573) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  574) static int usb4_switch_nvm_set_offset(struct tb_switch *sw,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  575) 				      unsigned int address)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  576) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  577) 	u32 metadata, dwaddress;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  578) 	u8 status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  579) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  580) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  581) 	dwaddress = address / 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  582) 	metadata = (dwaddress << USB4_NVM_SET_OFFSET_SHIFT) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  583) 		   USB4_NVM_SET_OFFSET_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  584) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  585) 	ret = usb4_switch_op_write_metadata(sw, metadata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  586) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  587) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  588) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  589) 	ret = usb4_switch_op(sw, USB4_SWITCH_OP_NVM_SET_OFFSET, &status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  590) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  591) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  592) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  593) 	return status ? -EIO : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  594) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  595) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  596) static int usb4_switch_nvm_write_next_block(void *data, const void *buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  597) 					    size_t dwords)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  598) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  599) 	struct tb_switch *sw = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  600) 	u8 status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  601) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  602) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  603) 	ret = usb4_switch_op_write_data(sw, buf, dwords);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  604) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  605) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  606) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  607) 	ret = usb4_switch_op(sw, USB4_SWITCH_OP_NVM_WRITE, &status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  608) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  609) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  610) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  611) 	return status ? -EIO : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  612) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  613) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  614) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  615)  * usb4_switch_nvm_write() - Write to the router NVM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  616)  * @sw: USB4 router
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  617)  * @address: Start address where to write in bytes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  618)  * @buf: Pointer to the data to write
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  619)  * @size: Size of @buf in bytes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  620)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  621)  * Writes @buf to the router NVM using USB4 router operations. If NVM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  622)  * write is not supported returns %-EOPNOTSUPP.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  623)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  624) int usb4_switch_nvm_write(struct tb_switch *sw, unsigned int address,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  625) 			  const void *buf, size_t size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  626) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  627) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  628) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  629) 	ret = usb4_switch_nvm_set_offset(sw, address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  630) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  631) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  632) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  633) 	return usb4_do_write_data(address, buf, size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  634) 				  usb4_switch_nvm_write_next_block, sw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  635) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  636) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  637) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  638)  * usb4_switch_nvm_authenticate() - Authenticate new NVM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  639)  * @sw: USB4 router
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  640)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  641)  * After the new NVM has been written via usb4_switch_nvm_write(), this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  642)  * function triggers NVM authentication process. If the authentication
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  643)  * is successful the router is power cycled and the new NVM starts
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  644)  * running. In case of failure returns negative errno.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  645)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  646) int usb4_switch_nvm_authenticate(struct tb_switch *sw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  647) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  648) 	u8 status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  649) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  650) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  651) 	ret = usb4_switch_op(sw, USB4_SWITCH_OP_NVM_AUTH, &status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  652) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  653) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  654) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  655) 	switch (status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  656) 	case 0x0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  657) 		tb_sw_dbg(sw, "NVM authentication successful\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  658) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  659) 	case 0x1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  660) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  661) 	case 0x2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  662) 		return -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  663) 	case 0x3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  664) 		return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  665) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  666) 		return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  667) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  668) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  669) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  670) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  671)  * usb4_switch_query_dp_resource() - Query availability of DP IN resource
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  672)  * @sw: USB4 router
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  673)  * @in: DP IN adapter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  674)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  675)  * For DP tunneling this function can be used to query availability of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  676)  * DP IN resource. Returns true if the resource is available for DP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  677)  * tunneling, false otherwise.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  678)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  679) bool usb4_switch_query_dp_resource(struct tb_switch *sw, struct tb_port *in)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  680) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  681) 	u8 status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  682) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  683) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  684) 	ret = usb4_switch_op_write_metadata(sw, in->port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  685) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  686) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  687) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  688) 	ret = usb4_switch_op(sw, USB4_SWITCH_OP_QUERY_DP_RESOURCE, &status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  689) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  690) 	 * If DP resource allocation is not supported assume it is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  691) 	 * always available.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  692) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  693) 	if (ret == -EOPNOTSUPP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  694) 		return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  695) 	else if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  696) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  697) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  698) 	return !status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  699) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  700) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  701) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  702)  * usb4_switch_alloc_dp_resource() - Allocate DP IN resource
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  703)  * @sw: USB4 router
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  704)  * @in: DP IN adapter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  705)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  706)  * Allocates DP IN resource for DP tunneling using USB4 router
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  707)  * operations. If the resource was allocated returns %0. Otherwise
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  708)  * returns negative errno, in particular %-EBUSY if the resource is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  709)  * already allocated.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  710)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  711) int usb4_switch_alloc_dp_resource(struct tb_switch *sw, struct tb_port *in)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  712) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  713) 	u8 status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  714) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  715) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  716) 	ret = usb4_switch_op_write_metadata(sw, in->port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  717) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  718) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  719) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  720) 	ret = usb4_switch_op(sw, USB4_SWITCH_OP_ALLOC_DP_RESOURCE, &status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  721) 	if (ret == -EOPNOTSUPP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  722) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  723) 	else if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  724) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  725) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  726) 	return status ? -EBUSY : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  727) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  728) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  729) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  730)  * usb4_switch_dealloc_dp_resource() - Releases allocated DP IN resource
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  731)  * @sw: USB4 router
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  732)  * @in: DP IN adapter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  733)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  734)  * Releases the previously allocated DP IN resource.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  735)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  736) int usb4_switch_dealloc_dp_resource(struct tb_switch *sw, struct tb_port *in)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  737) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  738) 	u8 status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  739) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  740) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  741) 	ret = usb4_switch_op_write_metadata(sw, in->port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  742) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  743) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  744) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  745) 	ret = usb4_switch_op(sw, USB4_SWITCH_OP_DEALLOC_DP_RESOURCE, &status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  746) 	if (ret == -EOPNOTSUPP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  747) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  748) 	else if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  749) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  750) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  751) 	return status ? -EIO : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  752) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  753) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  754) static int usb4_port_idx(const struct tb_switch *sw, const struct tb_port *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  755) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  756) 	struct tb_port *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  757) 	int usb4_idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  758) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  759) 	/* Assume port is primary */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  760) 	tb_switch_for_each_port(sw, p) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  761) 		if (!tb_port_is_null(p))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  762) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  763) 		if (tb_is_upstream_port(p))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  764) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  765) 		if (!p->link_nr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  766) 			if (p == port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  767) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  768) 			usb4_idx++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  769) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  770) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  771) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  772) 	return usb4_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  773) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  774) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  775) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  776)  * usb4_switch_map_pcie_down() - Map USB4 port to a PCIe downstream adapter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  777)  * @sw: USB4 router
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  778)  * @port: USB4 port
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  779)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  780)  * USB4 routers have direct mapping between USB4 ports and PCIe
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  781)  * downstream adapters where the PCIe topology is extended. This
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  782)  * function returns the corresponding downstream PCIe adapter or %NULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  783)  * if no such mapping was possible.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  784)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  785) struct tb_port *usb4_switch_map_pcie_down(struct tb_switch *sw,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  786) 					  const struct tb_port *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  787) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  788) 	int usb4_idx = usb4_port_idx(sw, port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  789) 	struct tb_port *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  790) 	int pcie_idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  791) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  792) 	/* Find PCIe down port matching usb4_port */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  793) 	tb_switch_for_each_port(sw, p) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  794) 		if (!tb_port_is_pcie_down(p))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  795) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  796) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  797) 		if (pcie_idx == usb4_idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  798) 			return p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  799) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  800) 		pcie_idx++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  801) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  802) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  803) 	return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  804) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  805) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  806) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  807)  * usb4_switch_map_usb3_down() - Map USB4 port to a USB3 downstream adapter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  808)  * @sw: USB4 router
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  809)  * @port: USB4 port
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  810)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  811)  * USB4 routers have direct mapping between USB4 ports and USB 3.x
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  812)  * downstream adapters where the USB 3.x topology is extended. This
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  813)  * function returns the corresponding downstream USB 3.x adapter or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  814)  * %NULL if no such mapping was possible.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  815)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  816) struct tb_port *usb4_switch_map_usb3_down(struct tb_switch *sw,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  817) 					  const struct tb_port *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  818) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  819) 	int usb4_idx = usb4_port_idx(sw, port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  820) 	struct tb_port *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  821) 	int usb_idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  822) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  823) 	/* Find USB3 down port matching usb4_port */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  824) 	tb_switch_for_each_port(sw, p) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  825) 		if (!tb_port_is_usb3_down(p))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  826) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  827) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  828) 		if (usb_idx == usb4_idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  829) 			return p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  830) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  831) 		usb_idx++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  832) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  833) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  834) 	return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  835) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  836) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  837) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  838)  * usb4_port_unlock() - Unlock USB4 downstream port
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  839)  * @port: USB4 port to unlock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  840)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  841)  * Unlocks USB4 downstream port so that the connection manager can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  842)  * access the router below this port.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  843)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  844) int usb4_port_unlock(struct tb_port *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  845) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  846) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  847) 	u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  848) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  849) 	ret = tb_port_read(port, &val, TB_CFG_PORT, ADP_CS_4, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  850) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  851) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  852) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  853) 	val &= ~ADP_CS_4_LCK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  854) 	return tb_port_write(port, &val, TB_CFG_PORT, ADP_CS_4, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  855) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  856) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  857) static int usb4_port_set_configured(struct tb_port *port, bool configured)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  858) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  859) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  860) 	u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  861) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  862) 	if (!port->cap_usb4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  863) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  864) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  865) 	ret = tb_port_read(port, &val, TB_CFG_PORT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  866) 			   port->cap_usb4 + PORT_CS_19, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  867) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  868) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  869) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  870) 	if (configured)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  871) 		val |= PORT_CS_19_PC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  872) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  873) 		val &= ~PORT_CS_19_PC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  874) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  875) 	return tb_port_write(port, &val, TB_CFG_PORT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  876) 			     port->cap_usb4 + PORT_CS_19, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  877) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  878) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  879) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  880)  * usb4_port_configure() - Set USB4 port configured
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  881)  * @port: USB4 router
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  882)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  883)  * Sets the USB4 link to be configured for power management purposes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  884)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  885) int usb4_port_configure(struct tb_port *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  886) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  887) 	return usb4_port_set_configured(port, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  888) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  889) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  890) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  891)  * usb4_port_unconfigure() - Set USB4 port unconfigured
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  892)  * @port: USB4 router
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  893)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  894)  * Sets the USB4 link to be unconfigured for power management purposes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  895)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  896) void usb4_port_unconfigure(struct tb_port *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  897) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  898) 	usb4_port_set_configured(port, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  899) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  900) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  901) static int usb4_set_xdomain_configured(struct tb_port *port, bool configured)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  902) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  903) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  904) 	u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  905) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  906) 	if (!port->cap_usb4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  907) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  908) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  909) 	ret = tb_port_read(port, &val, TB_CFG_PORT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  910) 			   port->cap_usb4 + PORT_CS_19, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  911) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  912) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  913) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  914) 	if (configured)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  915) 		val |= PORT_CS_19_PID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  916) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  917) 		val &= ~PORT_CS_19_PID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  918) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  919) 	return tb_port_write(port, &val, TB_CFG_PORT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  920) 			     port->cap_usb4 + PORT_CS_19, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  921) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  922) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  923) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  924)  * usb4_port_configure_xdomain() - Configure port for XDomain
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  925)  * @port: USB4 port connected to another host
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  926)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  927)  * Marks the USB4 port as being connected to another host. Returns %0 in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  928)  * success and negative errno in failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  929)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  930) int usb4_port_configure_xdomain(struct tb_port *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  931) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  932) 	return usb4_set_xdomain_configured(port, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  933) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  934) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  935) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  936)  * usb4_port_unconfigure_xdomain() - Unconfigure port for XDomain
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  937)  * @port: USB4 port that was connected to another host
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  938)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  939)  * Clears USB4 port from being marked as XDomain.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  940)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  941) void usb4_port_unconfigure_xdomain(struct tb_port *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  942) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  943) 	usb4_set_xdomain_configured(port, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  944) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  945) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  946) static int usb4_port_wait_for_bit(struct tb_port *port, u32 offset, u32 bit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  947) 				  u32 value, int timeout_msec)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  948) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  949) 	ktime_t timeout = ktime_add_ms(ktime_get(), timeout_msec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  950) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  951) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  952) 		u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  953) 		int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  954) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  955) 		ret = tb_port_read(port, &val, TB_CFG_PORT, offset, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  956) 		if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  957) 			return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  958) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  959) 		if ((val & bit) == value)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  960) 			return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  961) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  962) 		usleep_range(50, 100);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  963) 	} while (ktime_before(ktime_get(), timeout));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  964) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  965) 	return -ETIMEDOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  966) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  967) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  968) static int usb4_port_read_data(struct tb_port *port, void *data, size_t dwords)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  969) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  970) 	if (dwords > USB4_DATA_DWORDS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  971) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  972) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  973) 	return tb_port_read(port, data, TB_CFG_PORT, port->cap_usb4 + PORT_CS_2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  974) 			    dwords);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  975) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  976) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  977) static int usb4_port_write_data(struct tb_port *port, const void *data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  978) 				size_t dwords)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  979) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  980) 	if (dwords > USB4_DATA_DWORDS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  981) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  982) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  983) 	return tb_port_write(port, data, TB_CFG_PORT, port->cap_usb4 + PORT_CS_2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  984) 			     dwords);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  985) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  986) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  987) static int usb4_port_sb_read(struct tb_port *port, enum usb4_sb_target target,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  988) 			     u8 index, u8 reg, void *buf, u8 size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  989) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  990) 	size_t dwords = DIV_ROUND_UP(size, 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  991) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  992) 	u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  993) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  994) 	if (!port->cap_usb4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  995) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  996) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  997) 	val = reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  998) 	val |= size << PORT_CS_1_LENGTH_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  999) 	val |= (target << PORT_CS_1_TARGET_SHIFT) & PORT_CS_1_TARGET_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) 	if (target == USB4_SB_TARGET_RETIMER)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) 		val |= (index << PORT_CS_1_RETIMER_INDEX_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) 	val |= PORT_CS_1_PND;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) 	ret = tb_port_write(port, &val, TB_CFG_PORT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) 			    port->cap_usb4 + PORT_CS_1, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) 	ret = usb4_port_wait_for_bit(port, port->cap_usb4 + PORT_CS_1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) 				     PORT_CS_1_PND, 0, 500);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) 	ret = tb_port_read(port, &val, TB_CFG_PORT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) 			    port->cap_usb4 + PORT_CS_1, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) 	if (val & PORT_CS_1_NR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) 	if (val & PORT_CS_1_RC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) 		return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) 	return buf ? usb4_port_read_data(port, buf, dwords) : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) static int usb4_port_sb_write(struct tb_port *port, enum usb4_sb_target target,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) 			      u8 index, u8 reg, const void *buf, u8 size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) 	size_t dwords = DIV_ROUND_UP(size, 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) 	u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) 	if (!port->cap_usb4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) 	if (buf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) 		ret = usb4_port_write_data(port, buf, dwords);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) 		if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) 			return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) 	val = reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) 	val |= size << PORT_CS_1_LENGTH_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) 	val |= PORT_CS_1_WNR_WRITE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) 	val |= (target << PORT_CS_1_TARGET_SHIFT) & PORT_CS_1_TARGET_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) 	if (target == USB4_SB_TARGET_RETIMER)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) 		val |= (index << PORT_CS_1_RETIMER_INDEX_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) 	val |= PORT_CS_1_PND;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) 	ret = tb_port_write(port, &val, TB_CFG_PORT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) 			    port->cap_usb4 + PORT_CS_1, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) 	ret = usb4_port_wait_for_bit(port, port->cap_usb4 + PORT_CS_1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) 				     PORT_CS_1_PND, 0, 500);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) 	ret = tb_port_read(port, &val, TB_CFG_PORT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) 			    port->cap_usb4 + PORT_CS_1, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) 	if (val & PORT_CS_1_NR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) 	if (val & PORT_CS_1_RC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) 		return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) static int usb4_port_sb_op(struct tb_port *port, enum usb4_sb_target target,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) 			   u8 index, enum usb4_sb_opcode opcode, int timeout_msec)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) 	ktime_t timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) 	u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) 	val = opcode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) 	ret = usb4_port_sb_write(port, target, index, USB4_SB_OPCODE, &val,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) 				 sizeof(val));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) 	timeout = ktime_add_ms(ktime_get(), timeout_msec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) 		/* Check results */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) 		ret = usb4_port_sb_read(port, target, index, USB4_SB_OPCODE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) 					&val, sizeof(val));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) 		if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) 			return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) 		switch (val) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) 		case 0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) 			return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) 		case USB4_SB_OPCODE_ERR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) 			return -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) 		case USB4_SB_OPCODE_ONS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) 			return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) 		default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) 			if (val != opcode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) 				return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) 	} while (ktime_before(ktime_get(), timeout));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) 	return -ETIMEDOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117)  * usb4_port_enumerate_retimers() - Send RT broadcast transaction
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118)  * @port: USB4 port
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120)  * This forces the USB4 port to send broadcast RT transaction which
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121)  * makes the retimers on the link to assign index to themselves. Returns
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122)  * %0 in case of success and negative errno if there was an error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) int usb4_port_enumerate_retimers(struct tb_port *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) 	u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) 	val = USB4_SB_OPCODE_ENUMERATE_RETIMERS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) 	return usb4_port_sb_write(port, USB4_SB_TARGET_ROUTER, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) 				  USB4_SB_OPCODE, &val, sizeof(val));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) static inline int usb4_port_retimer_op(struct tb_port *port, u8 index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) 				       enum usb4_sb_opcode opcode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) 				       int timeout_msec)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) 	return usb4_port_sb_op(port, USB4_SB_TARGET_RETIMER, index, opcode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) 			       timeout_msec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142)  * usb4_port_retimer_read() - Read from retimer sideband registers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143)  * @port: USB4 port
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144)  * @index: Retimer index
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145)  * @reg: Sideband register to read
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146)  * @buf: Data from @reg is stored here
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147)  * @size: Number of bytes to read
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149)  * Function reads retimer sideband registers starting from @reg. The
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150)  * retimer is connected to @port at @index. Returns %0 in case of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151)  * success, and read data is copied to @buf. If there is no retimer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152)  * present at given @index returns %-ENODEV. In any other failure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153)  * returns negative errno.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) int usb4_port_retimer_read(struct tb_port *port, u8 index, u8 reg, void *buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) 			   u8 size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) 	return usb4_port_sb_read(port, USB4_SB_TARGET_RETIMER, index, reg, buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) 				 size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163)  * usb4_port_retimer_write() - Write to retimer sideband registers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164)  * @port: USB4 port
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165)  * @index: Retimer index
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166)  * @reg: Sideband register to write
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167)  * @buf: Data that is written starting from @reg
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168)  * @size: Number of bytes to write
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170)  * Writes retimer sideband registers starting from @reg. The retimer is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171)  * connected to @port at @index. Returns %0 in case of success. If there
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172)  * is no retimer present at given @index returns %-ENODEV. In any other
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173)  * failure returns negative errno.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) int usb4_port_retimer_write(struct tb_port *port, u8 index, u8 reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) 			    const void *buf, u8 size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) 	return usb4_port_sb_write(port, USB4_SB_TARGET_RETIMER, index, reg, buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) 				  size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183)  * usb4_port_retimer_is_last() - Is the retimer last on-board retimer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184)  * @port: USB4 port
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185)  * @index: Retimer index
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187)  * If the retimer at @index is last one (connected directly to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188)  * Type-C port) this function returns %1. If it is not returns %0. If
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189)  * the retimer is not present returns %-ENODEV. Otherwise returns
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190)  * negative errno.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) int usb4_port_retimer_is_last(struct tb_port *port, u8 index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) 	u32 metadata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) 	ret = usb4_port_retimer_op(port, index, USB4_SB_OPCODE_QUERY_LAST_RETIMER,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) 				   500);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) 	ret = usb4_port_retimer_read(port, index, USB4_SB_METADATA, &metadata,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) 				     sizeof(metadata));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) 	return ret ? ret : metadata & 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208)  * usb4_port_retimer_nvm_sector_size() - Read retimer NVM sector size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209)  * @port: USB4 port
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210)  * @index: Retimer index
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212)  * Reads NVM sector size (in bytes) of a retimer at @index. This
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213)  * operation can be used to determine whether the retimer supports NVM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214)  * upgrade for example. Returns sector size in bytes or negative errno
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215)  * in case of error. Specifically returns %-ENODEV if there is no
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216)  * retimer at @index.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) int usb4_port_retimer_nvm_sector_size(struct tb_port *port, u8 index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) 	u32 metadata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) 	ret = usb4_port_retimer_op(port, index, USB4_SB_OPCODE_GET_NVM_SECTOR_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) 				   500);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) 	ret = usb4_port_retimer_read(port, index, USB4_SB_METADATA, &metadata,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) 				     sizeof(metadata));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) 	return ret ? ret : metadata & USB4_NVM_SECTOR_SIZE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) static int usb4_port_retimer_nvm_set_offset(struct tb_port *port, u8 index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) 					    unsigned int address)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) 	u32 metadata, dwaddress;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) 	dwaddress = address / 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) 	metadata = (dwaddress << USB4_NVM_SET_OFFSET_SHIFT) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) 		  USB4_NVM_SET_OFFSET_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) 	ret = usb4_port_retimer_write(port, index, USB4_SB_METADATA, &metadata,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) 				      sizeof(metadata));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) 	return usb4_port_retimer_op(port, index, USB4_SB_OPCODE_NVM_SET_OFFSET,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) 				    500);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) struct retimer_info {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) 	struct tb_port *port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) 	u8 index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) static int usb4_port_retimer_nvm_write_next_block(void *data, const void *buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) 						  size_t dwords)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) 	const struct retimer_info *info = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) 	struct tb_port *port = info->port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) 	u8 index = info->index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) 	ret = usb4_port_retimer_write(port, index, USB4_SB_DATA,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) 				      buf, dwords * 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) 	return usb4_port_retimer_op(port, index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) 			USB4_SB_OPCODE_NVM_BLOCK_WRITE, 1000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276)  * usb4_port_retimer_nvm_write() - Write to retimer NVM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277)  * @port: USB4 port
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278)  * @index: Retimer index
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279)  * @address: Byte address where to start the write
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280)  * @buf: Data to write
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281)  * @size: Size in bytes how much to write
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283)  * Writes @size bytes from @buf to the retimer NVM. Used for NVM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284)  * upgrade. Returns %0 if the data was written successfully and negative
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285)  * errno in case of failure. Specifically returns %-ENODEV if there is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286)  * no retimer at @index.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) int usb4_port_retimer_nvm_write(struct tb_port *port, u8 index, unsigned int address,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) 				const void *buf, size_t size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) 	struct retimer_info info = { .port = port, .index = index };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) 	ret = usb4_port_retimer_nvm_set_offset(port, index, address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) 	return usb4_do_write_data(address, buf, size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) 			usb4_port_retimer_nvm_write_next_block, &info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303)  * usb4_port_retimer_nvm_authenticate() - Start retimer NVM upgrade
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304)  * @port: USB4 port
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305)  * @index: Retimer index
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307)  * After the new NVM image has been written via usb4_port_retimer_nvm_write()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308)  * this function can be used to trigger the NVM upgrade process. If
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309)  * successful the retimer restarts with the new NVM and may not have the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310)  * index set so one needs to call usb4_port_enumerate_retimers() to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311)  * force index to be assigned.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) int usb4_port_retimer_nvm_authenticate(struct tb_port *port, u8 index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) 	u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) 	 * We need to use the raw operation here because once the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) 	 * authentication completes the retimer index is not set anymore
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) 	 * so we do not get back the status now.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) 	val = USB4_SB_OPCODE_NVM_AUTH_WRITE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) 	return usb4_port_sb_write(port, USB4_SB_TARGET_RETIMER, index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) 				  USB4_SB_OPCODE, &val, sizeof(val));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328)  * usb4_port_retimer_nvm_authenticate_status() - Read status of NVM upgrade
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329)  * @port: USB4 port
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330)  * @index: Retimer index
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331)  * @status: Raw status code read from metadata
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333)  * This can be called after usb4_port_retimer_nvm_authenticate() and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334)  * usb4_port_enumerate_retimers() to fetch status of the NVM upgrade.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336)  * Returns %0 if the authentication status was successfully read. The
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337)  * completion metadata (the result) is then stored into @status. If
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338)  * reading the status fails, returns negative errno.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) int usb4_port_retimer_nvm_authenticate_status(struct tb_port *port, u8 index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) 					      u32 *status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) 	u32 metadata, val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) 	ret = usb4_port_retimer_read(port, index, USB4_SB_OPCODE, &val,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) 				     sizeof(val));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) 	switch (val) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) 	case 0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) 		*status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) 	case USB4_SB_OPCODE_ERR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) 		ret = usb4_port_retimer_read(port, index, USB4_SB_METADATA,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) 					     &metadata, sizeof(metadata));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) 		if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) 			return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) 		*status = metadata & USB4_SB_METADATA_NVM_AUTH_WRITE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) 	case USB4_SB_OPCODE_ONS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) 		return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) 		return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) static int usb4_port_retimer_nvm_read_block(void *data, unsigned int dwaddress,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) 					    void *buf, size_t dwords)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) 	const struct retimer_info *info = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) 	struct tb_port *port = info->port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) 	u8 index = info->index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) 	u32 metadata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) 	metadata = dwaddress << USB4_NVM_READ_OFFSET_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) 	if (dwords < USB4_DATA_DWORDS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) 		metadata |= dwords << USB4_NVM_READ_LENGTH_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) 	ret = usb4_port_retimer_write(port, index, USB4_SB_METADATA, &metadata,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) 				      sizeof(metadata));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) 	ret = usb4_port_retimer_op(port, index, USB4_SB_OPCODE_NVM_READ, 500);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) 	return usb4_port_retimer_read(port, index, USB4_SB_DATA, buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) 				      dwords * 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400)  * usb4_port_retimer_nvm_read() - Read contents of retimer NVM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401)  * @port: USB4 port
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402)  * @index: Retimer index
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403)  * @address: NVM address (in bytes) to start reading
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404)  * @buf: Data read from NVM is stored here
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405)  * @size: Number of bytes to read
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407)  * Reads retimer NVM and copies the contents to @buf. Returns %0 if the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408)  * read was successful and negative errno in case of failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409)  * Specifically returns %-ENODEV if there is no retimer at @index.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) int usb4_port_retimer_nvm_read(struct tb_port *port, u8 index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) 			       unsigned int address, void *buf, size_t size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) 	struct retimer_info info = { .port = port, .index = index };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) 	return usb4_do_read_data(address, buf, size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) 			usb4_port_retimer_nvm_read_block, &info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421)  * usb4_usb3_port_max_link_rate() - Maximum support USB3 link rate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422)  * @port: USB3 adapter port
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424)  * Return maximum supported link rate of a USB3 adapter in Mb/s.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425)  * Negative errno in case of error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) int usb4_usb3_port_max_link_rate(struct tb_port *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) 	int ret, lr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) 	u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) 	if (!tb_port_is_usb3_down(port) && !tb_port_is_usb3_up(port))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) 	ret = tb_port_read(port, &val, TB_CFG_PORT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) 			   port->cap_adap + ADP_USB3_CS_4, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) 	lr = (val & ADP_USB3_CS_4_MSLR_MASK) >> ADP_USB3_CS_4_MSLR_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) 	return lr == ADP_USB3_CS_4_MSLR_20G ? 20000 : 10000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445)  * usb4_usb3_port_actual_link_rate() - Established USB3 link rate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446)  * @port: USB3 adapter port
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448)  * Return actual established link rate of a USB3 adapter in Mb/s. If the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449)  * link is not up returns %0 and negative errno in case of failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) int usb4_usb3_port_actual_link_rate(struct tb_port *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) 	int ret, lr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) 	u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) 	if (!tb_port_is_usb3_down(port) && !tb_port_is_usb3_up(port))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) 	ret = tb_port_read(port, &val, TB_CFG_PORT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) 			   port->cap_adap + ADP_USB3_CS_4, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) 	if (!(val & ADP_USB3_CS_4_ULV))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) 	lr = val & ADP_USB3_CS_4_ALR_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) 	return lr == ADP_USB3_CS_4_ALR_20G ? 20000 : 10000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) static int usb4_usb3_port_cm_request(struct tb_port *port, bool request)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) 	u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) 	if (!tb_port_is_usb3_down(port))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) 	if (tb_route(port->sw))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) 	ret = tb_port_read(port, &val, TB_CFG_PORT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) 			   port->cap_adap + ADP_USB3_CS_2, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) 	if (request)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) 		val |= ADP_USB3_CS_2_CMR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) 		val &= ~ADP_USB3_CS_2_CMR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) 	ret = tb_port_write(port, &val, TB_CFG_PORT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) 			    port->cap_adap + ADP_USB3_CS_2, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) 	 * We can use val here directly as the CMR bit is in the same place
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) 	 * as HCA. Just mask out others.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) 	val &= ADP_USB3_CS_2_CMR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) 	return usb4_port_wait_for_bit(port, port->cap_adap + ADP_USB3_CS_1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) 				      ADP_USB3_CS_1_HCA, val, 1500);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) static inline int usb4_usb3_port_set_cm_request(struct tb_port *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) 	return usb4_usb3_port_cm_request(port, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) static inline int usb4_usb3_port_clear_cm_request(struct tb_port *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) 	return usb4_usb3_port_cm_request(port, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) static unsigned int usb3_bw_to_mbps(u32 bw, u8 scale)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) 	unsigned long uframes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) 	uframes = bw * 512UL << scale;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) 	return DIV_ROUND_CLOSEST(uframes * 8000, 1000 * 1000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) static u32 mbps_to_usb3_bw(unsigned int mbps, u8 scale)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) 	unsigned long uframes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) 	/* 1 uframe is 1/8 ms (125 us) -> 1 / 8000 s */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) 	uframes = ((unsigned long)mbps * 1000 *  1000) / 8000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) 	return DIV_ROUND_UP(uframes, 512UL << scale);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) static int usb4_usb3_port_read_allocated_bandwidth(struct tb_port *port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) 						   int *upstream_bw,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) 						   int *downstream_bw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) 	u32 val, bw, scale;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) 	ret = tb_port_read(port, &val, TB_CFG_PORT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) 			   port->cap_adap + ADP_USB3_CS_2, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) 	ret = tb_port_read(port, &scale, TB_CFG_PORT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) 			   port->cap_adap + ADP_USB3_CS_3, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) 	scale &= ADP_USB3_CS_3_SCALE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) 	bw = val & ADP_USB3_CS_2_AUBW_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) 	*upstream_bw = usb3_bw_to_mbps(bw, scale);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) 	bw = (val & ADP_USB3_CS_2_ADBW_MASK) >> ADP_USB3_CS_2_ADBW_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) 	*downstream_bw = usb3_bw_to_mbps(bw, scale);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561)  * usb4_usb3_port_allocated_bandwidth() - Bandwidth allocated for USB3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562)  * @port: USB3 adapter port
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563)  * @upstream_bw: Allocated upstream bandwidth is stored here
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564)  * @downstream_bw: Allocated downstream bandwidth is stored here
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566)  * Stores currently allocated USB3 bandwidth into @upstream_bw and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567)  * @downstream_bw in Mb/s. Returns %0 in case of success and negative
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568)  * errno in failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) int usb4_usb3_port_allocated_bandwidth(struct tb_port *port, int *upstream_bw,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) 				       int *downstream_bw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) 	ret = usb4_usb3_port_set_cm_request(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) 	ret = usb4_usb3_port_read_allocated_bandwidth(port, upstream_bw,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) 						      downstream_bw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) 	usb4_usb3_port_clear_cm_request(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) static int usb4_usb3_port_read_consumed_bandwidth(struct tb_port *port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) 						  int *upstream_bw,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) 						  int *downstream_bw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) 	u32 val, bw, scale;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) 	ret = tb_port_read(port, &val, TB_CFG_PORT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) 			   port->cap_adap + ADP_USB3_CS_1, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) 	ret = tb_port_read(port, &scale, TB_CFG_PORT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) 			   port->cap_adap + ADP_USB3_CS_3, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) 	scale &= ADP_USB3_CS_3_SCALE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) 	bw = val & ADP_USB3_CS_1_CUBW_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) 	*upstream_bw = usb3_bw_to_mbps(bw, scale);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) 	bw = (val & ADP_USB3_CS_1_CDBW_MASK) >> ADP_USB3_CS_1_CDBW_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) 	*downstream_bw = usb3_bw_to_mbps(bw, scale);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) static int usb4_usb3_port_write_allocated_bandwidth(struct tb_port *port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) 						    int upstream_bw,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) 						    int downstream_bw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) 	u32 val, ubw, dbw, scale;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) 	/* Read the used scale, hardware default is 0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) 	ret = tb_port_read(port, &scale, TB_CFG_PORT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) 			   port->cap_adap + ADP_USB3_CS_3, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) 	scale &= ADP_USB3_CS_3_SCALE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) 	ubw = mbps_to_usb3_bw(upstream_bw, scale);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) 	dbw = mbps_to_usb3_bw(downstream_bw, scale);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) 	ret = tb_port_read(port, &val, TB_CFG_PORT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) 			   port->cap_adap + ADP_USB3_CS_2, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) 	val &= ~(ADP_USB3_CS_2_AUBW_MASK | ADP_USB3_CS_2_ADBW_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) 	val |= dbw << ADP_USB3_CS_2_ADBW_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) 	val |= ubw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) 	return tb_port_write(port, &val, TB_CFG_PORT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) 			     port->cap_adap + ADP_USB3_CS_2, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645)  * usb4_usb3_port_allocate_bandwidth() - Allocate bandwidth for USB3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646)  * @port: USB3 adapter port
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647)  * @upstream_bw: New upstream bandwidth
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648)  * @downstream_bw: New downstream bandwidth
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650)  * This can be used to set how much bandwidth is allocated for the USB3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651)  * tunneled isochronous traffic. @upstream_bw and @downstream_bw are the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652)  * new values programmed to the USB3 adapter allocation registers. If
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653)  * the values are lower than what is currently consumed the allocation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654)  * is set to what is currently consumed instead (consumed bandwidth
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655)  * cannot be taken away by CM). The actual new values are returned in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656)  * @upstream_bw and @downstream_bw.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658)  * Returns %0 in case of success and negative errno if there was a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659)  * failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) int usb4_usb3_port_allocate_bandwidth(struct tb_port *port, int *upstream_bw,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) 				      int *downstream_bw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) 	int ret, consumed_up, consumed_down, allocate_up, allocate_down;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) 	ret = usb4_usb3_port_set_cm_request(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) 	ret = usb4_usb3_port_read_consumed_bandwidth(port, &consumed_up,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) 						     &consumed_down);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) 		goto err_request;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) 	/* Don't allow it go lower than what is consumed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) 	allocate_up = max(*upstream_bw, consumed_up);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) 	allocate_down = max(*downstream_bw, consumed_down);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) 	ret = usb4_usb3_port_write_allocated_bandwidth(port, allocate_up,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) 						       allocate_down);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) 		goto err_request;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) 	*upstream_bw = allocate_up;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) 	*downstream_bw = allocate_down;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) err_request:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) 	usb4_usb3_port_clear_cm_request(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693)  * usb4_usb3_port_release_bandwidth() - Release allocated USB3 bandwidth
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694)  * @port: USB3 adapter port
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695)  * @upstream_bw: New allocated upstream bandwidth
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696)  * @downstream_bw: New allocated downstream bandwidth
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698)  * Releases USB3 allocated bandwidth down to what is actually consumed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699)  * The new bandwidth is returned in @upstream_bw and @downstream_bw.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701)  * Returns 0% in success and negative errno in case of failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) int usb4_usb3_port_release_bandwidth(struct tb_port *port, int *upstream_bw,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) 				     int *downstream_bw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) 	int ret, consumed_up, consumed_down;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) 	ret = usb4_usb3_port_set_cm_request(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) 	ret = usb4_usb3_port_read_consumed_bandwidth(port, &consumed_up,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) 						     &consumed_down);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) 		goto err_request;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) 	 * Always keep 1000 Mb/s to make sure xHCI has at least some
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) 	 * bandwidth available for isochronous traffic.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) 	if (consumed_up < 1000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) 		consumed_up = 1000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) 	if (consumed_down < 1000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) 		consumed_down = 1000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) 	ret = usb4_usb3_port_write_allocated_bandwidth(port, consumed_up,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) 						       consumed_down);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) 		goto err_request;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) 	*upstream_bw = consumed_up;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) 	*downstream_bw = consumed_down;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) err_request:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) 	usb4_usb3_port_clear_cm_request(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) }