^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Thunderbolt driver - switch/port utility functions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Copyright (C) 2018, Intel Corporation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/idr.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/nvmem-provider.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/pm_runtime.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/sched/signal.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/sizes.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include "tb.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) /* Switch NVM support */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #define NVM_CSS 0x10
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) struct nvm_auth_status {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) struct list_head list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) uuid_t uuid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) u32 status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) enum nvm_write_ops {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) WRITE_AND_AUTHENTICATE = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) WRITE_ONLY = 2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) * Hold NVM authentication failure status per switch This information
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) * needs to stay around even when the switch gets power cycled so we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) * keep it separately.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) static LIST_HEAD(nvm_auth_status_cache);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) static DEFINE_MUTEX(nvm_auth_status_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) static struct nvm_auth_status *__nvm_get_auth_status(const struct tb_switch *sw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) struct nvm_auth_status *st;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) list_for_each_entry(st, &nvm_auth_status_cache, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) if (uuid_equal(&st->uuid, sw->uuid))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) return st;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) static void nvm_get_auth_status(const struct tb_switch *sw, u32 *status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) struct nvm_auth_status *st;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) mutex_lock(&nvm_auth_status_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) st = __nvm_get_auth_status(sw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) mutex_unlock(&nvm_auth_status_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) *status = st ? st->status : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) static void nvm_set_auth_status(const struct tb_switch *sw, u32 status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) struct nvm_auth_status *st;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) if (WARN_ON(!sw->uuid))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) mutex_lock(&nvm_auth_status_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) st = __nvm_get_auth_status(sw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) if (!st) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) st = kzalloc(sizeof(*st), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) if (!st)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) memcpy(&st->uuid, sw->uuid, sizeof(st->uuid));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) INIT_LIST_HEAD(&st->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) list_add_tail(&st->list, &nvm_auth_status_cache);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) st->status = status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) mutex_unlock(&nvm_auth_status_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) static void nvm_clear_auth_status(const struct tb_switch *sw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) struct nvm_auth_status *st;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) mutex_lock(&nvm_auth_status_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) st = __nvm_get_auth_status(sw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) if (st) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) list_del(&st->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) kfree(st);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) mutex_unlock(&nvm_auth_status_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) static int nvm_validate_and_write(struct tb_switch *sw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) unsigned int image_size, hdr_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) const u8 *buf = sw->nvm->buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) u16 ds_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) if (!buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) image_size = sw->nvm->buf_data_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) if (image_size < NVM_MIN_SIZE || image_size > NVM_MAX_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) * FARB pointer must point inside the image and must at least
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) * contain parts of the digital section we will be reading here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) hdr_size = (*(u32 *)buf) & 0xffffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) if (hdr_size + NVM_DEVID + 2 >= image_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) /* Digital section start should be aligned to 4k page */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) if (!IS_ALIGNED(hdr_size, SZ_4K))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) * Read digital section size and check that it also fits inside
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) * the image.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) ds_size = *(u16 *)(buf + hdr_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) if (ds_size >= image_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) if (!sw->safe_mode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) u16 device_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) * Make sure the device ID in the image matches the one
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) * we read from the switch config space.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) device_id = *(u16 *)(buf + hdr_size + NVM_DEVID);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) if (device_id != sw->config.device_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) if (sw->generation < 3) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) /* Write CSS headers first */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) ret = dma_port_flash_write(sw->dma_port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) DMA_PORT_CSS_ADDRESS, buf + NVM_CSS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) DMA_PORT_CSS_MAX_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) /* Skip headers in the image */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) buf += hdr_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) image_size -= hdr_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) if (tb_switch_is_usb4(sw))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) ret = usb4_switch_nvm_write(sw, 0, buf, image_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) ret = dma_port_flash_write(sw->dma_port, 0, buf, image_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) if (!ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) sw->nvm->flushed = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) static int nvm_authenticate_host_dma_port(struct tb_switch *sw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) * Root switch NVM upgrade requires that we disconnect the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) * existing paths first (in case it is not in safe mode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) * already).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) if (!sw->safe_mode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) u32 status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) ret = tb_domain_disconnect_all_paths(sw->tb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) * The host controller goes away pretty soon after this if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) * everything goes well so getting timeout is expected.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) ret = dma_port_flash_update_auth(sw->dma_port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) if (!ret || ret == -ETIMEDOUT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) * Any error from update auth operation requires power
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) * cycling of the host router.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) tb_sw_warn(sw, "failed to authenticate NVM, power cycling\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) if (dma_port_flash_update_auth_status(sw->dma_port, &status) > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) nvm_set_auth_status(sw, status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) * From safe mode we can get out by just power cycling the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) * switch.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) dma_port_power_cycle(sw->dma_port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) static int nvm_authenticate_device_dma_port(struct tb_switch *sw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) int ret, retries = 10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) ret = dma_port_flash_update_auth(sw->dma_port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) switch (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) case 0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) case -ETIMEDOUT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) case -EACCES:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) case -EINVAL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) /* Power cycle is required */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) * Poll here for the authentication status. It takes some time
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) * for the device to respond (we get timeout for a while). Once
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) * we get response the device needs to be power cycled in order
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) * to the new NVM to be taken into use.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) u32 status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) ret = dma_port_flash_update_auth_status(sw->dma_port, &status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) if (ret < 0 && ret != -ETIMEDOUT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) if (ret > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) if (status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) tb_sw_warn(sw, "failed to authenticate NVM\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) nvm_set_auth_status(sw, status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) tb_sw_info(sw, "power cycling the switch now\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) dma_port_power_cycle(sw->dma_port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) msleep(500);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) } while (--retries);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) return -ETIMEDOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) static void nvm_authenticate_start_dma_port(struct tb_switch *sw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) struct pci_dev *root_port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) * During host router NVM upgrade we should not allow root port to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) * go into D3cold because some root ports cannot trigger PME
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) * itself. To be on the safe side keep the root port in D0 during
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) * the whole upgrade process.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) root_port = pcie_find_root_port(sw->tb->nhi->pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) if (root_port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) pm_runtime_get_noresume(&root_port->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) static void nvm_authenticate_complete_dma_port(struct tb_switch *sw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) struct pci_dev *root_port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) root_port = pcie_find_root_port(sw->tb->nhi->pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) if (root_port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) pm_runtime_put(&root_port->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) static inline bool nvm_readable(struct tb_switch *sw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) if (tb_switch_is_usb4(sw)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) * USB4 devices must support NVM operations but it is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) * optional for hosts. Therefore we query the NVM sector
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) * size here and if it is supported assume NVM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) * operations are implemented.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) return usb4_switch_nvm_sector_size(sw) > 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) /* Thunderbolt 2 and 3 devices support NVM through DMA port */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) return !!sw->dma_port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) static inline bool nvm_upgradeable(struct tb_switch *sw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) if (sw->no_nvm_upgrade)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) return nvm_readable(sw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) static inline int nvm_read(struct tb_switch *sw, unsigned int address,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) void *buf, size_t size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) if (tb_switch_is_usb4(sw))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) return usb4_switch_nvm_read(sw, address, buf, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) return dma_port_flash_read(sw->dma_port, address, buf, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) static int nvm_authenticate(struct tb_switch *sw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) if (tb_switch_is_usb4(sw))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) return usb4_switch_nvm_authenticate(sw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) if (!tb_route(sw)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) nvm_authenticate_start_dma_port(sw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) ret = nvm_authenticate_host_dma_port(sw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) ret = nvm_authenticate_device_dma_port(sw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) static int tb_switch_nvm_read(void *priv, unsigned int offset, void *val,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) size_t bytes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) struct tb_nvm *nvm = priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) struct tb_switch *sw = tb_to_switch(nvm->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) pm_runtime_get_sync(&sw->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) if (!mutex_trylock(&sw->tb->lock)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) ret = restart_syscall();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) ret = nvm_read(sw, offset, val, bytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) mutex_unlock(&sw->tb->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) pm_runtime_mark_last_busy(&sw->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) pm_runtime_put_autosuspend(&sw->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) static int tb_switch_nvm_write(void *priv, unsigned int offset, void *val,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) size_t bytes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) struct tb_nvm *nvm = priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) struct tb_switch *sw = tb_to_switch(nvm->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) if (!mutex_trylock(&sw->tb->lock))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) return restart_syscall();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) * Since writing the NVM image might require some special steps,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) * for example when CSS headers are written, we cache the image
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) * locally here and handle the special cases when the user asks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) * us to authenticate the image.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) ret = tb_nvm_write_buf(nvm, offset, val, bytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) mutex_unlock(&sw->tb->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) static int tb_switch_nvm_add(struct tb_switch *sw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) struct tb_nvm *nvm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) if (!nvm_readable(sw))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) * The NVM format of non-Intel hardware is not known so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) * currently restrict NVM upgrade for Intel hardware. We may
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) * relax this in the future when we learn other NVM formats.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) if (sw->config.vendor_id != PCI_VENDOR_ID_INTEL &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) sw->config.vendor_id != 0x8087) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) dev_info(&sw->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) "NVM format of vendor %#x is not known, disabling NVM upgrade\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) sw->config.vendor_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) nvm = tb_nvm_alloc(&sw->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) if (IS_ERR(nvm))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) return PTR_ERR(nvm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) * If the switch is in safe-mode the only accessible portion of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) * the NVM is the non-active one where userspace is expected to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) * write new functional NVM.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) if (!sw->safe_mode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) u32 nvm_size, hdr_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) ret = nvm_read(sw, NVM_FLASH_SIZE, &val, sizeof(val));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) goto err_nvm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) hdr_size = sw->generation < 3 ? SZ_8K : SZ_16K;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) nvm_size = (SZ_1M << (val & 7)) / 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) nvm_size = (nvm_size - hdr_size) / 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) ret = nvm_read(sw, NVM_VERSION, &val, sizeof(val));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) goto err_nvm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) nvm->major = val >> 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) nvm->minor = val >> 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) ret = tb_nvm_add_active(nvm, nvm_size, tb_switch_nvm_read);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) goto err_nvm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) if (!sw->no_nvm_upgrade) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) ret = tb_nvm_add_non_active(nvm, NVM_MAX_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) tb_switch_nvm_write);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) goto err_nvm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) sw->nvm = nvm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) err_nvm:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) tb_nvm_free(nvm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) static void tb_switch_nvm_remove(struct tb_switch *sw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) struct tb_nvm *nvm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) nvm = sw->nvm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) sw->nvm = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) if (!nvm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) /* Remove authentication status in case the switch is unplugged */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) if (!nvm->authenticating)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) nvm_clear_auth_status(sw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) tb_nvm_free(nvm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) /* port utility functions */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) static const char *tb_port_type(struct tb_regs_port_header *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) switch (port->type >> 16) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) case 0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) switch ((u8) port->type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) case 0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) return "Inactive";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) case 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) return "Port";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) case 2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) return "NHI";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) return "unknown";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) case 0x2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) return "Ethernet";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) case 0x8:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) return "SATA";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) case 0xe:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) return "DP/HDMI";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) case 0x10:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) return "PCIe";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) case 0x20:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) return "USB";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) return "unknown";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) static void tb_dump_port(struct tb *tb, struct tb_regs_port_header *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) tb_dbg(tb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) " Port %d: %x:%x (Revision: %d, TB Version: %d, Type: %s (%#x))\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) port->port_number, port->vendor_id, port->device_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) port->revision, port->thunderbolt_version, tb_port_type(port),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) port->type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) tb_dbg(tb, " Max hop id (in/out): %d/%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) port->max_in_hop_id, port->max_out_hop_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) tb_dbg(tb, " Max counters: %d\n", port->max_counters);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) tb_dbg(tb, " NFC Credits: %#x\n", port->nfc_credits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) * tb_port_state() - get connectedness state of a port
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) * The port must have a TB_CAP_PHY (i.e. it should be a real port).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) * Return: Returns an enum tb_port_state on success or an error code on failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) static int tb_port_state(struct tb_port *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) struct tb_cap_phy phy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) int res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) if (port->cap_phy == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) tb_port_WARN(port, "does not have a PHY\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) res = tb_port_read(port, &phy, TB_CFG_PORT, port->cap_phy, 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) if (res)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) return res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) return phy.state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) * tb_wait_for_port() - wait for a port to become ready
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) * Wait up to 1 second for a port to reach state TB_PORT_UP. If
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) * wait_if_unplugged is set then we also wait if the port is in state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) * TB_PORT_UNPLUGGED (it takes a while for the device to be registered after
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) * switch resume). Otherwise we only wait if a device is registered but the link
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) * has not yet been established.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) * Return: Returns an error code on failure. Returns 0 if the port is not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) * connected or failed to reach state TB_PORT_UP within one second. Returns 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) * if the port is connected and in state TB_PORT_UP.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) int tb_wait_for_port(struct tb_port *port, bool wait_if_unplugged)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) int retries = 10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) int state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) if (!port->cap_phy) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) tb_port_WARN(port, "does not have PHY\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) if (tb_is_upstream_port(port)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) tb_port_WARN(port, "is the upstream port\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) while (retries--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) state = tb_port_state(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) if (state < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) return state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) if (state == TB_PORT_DISABLED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) tb_port_dbg(port, "is disabled (state: 0)\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) if (state == TB_PORT_UNPLUGGED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) if (wait_if_unplugged) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) /* used during resume */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) tb_port_dbg(port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) "is unplugged (state: 7), retrying...\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) msleep(100);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) tb_port_dbg(port, "is unplugged (state: 7)\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) if (state == TB_PORT_UP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) tb_port_dbg(port, "is connected, link is up (state: 2)\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) * After plug-in the state is TB_PORT_CONNECTING. Give it some
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) * time.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) tb_port_dbg(port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) "is connected, link is not up (state: %d), retrying...\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) msleep(100);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) tb_port_warn(port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) "failed to reach state TB_PORT_UP. Ignoring port...\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) * tb_port_add_nfc_credits() - add/remove non flow controlled credits to port
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) * Change the number of NFC credits allocated to @port by @credits. To remove
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) * NFC credits pass a negative amount of credits.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) * Return: Returns 0 on success or an error code on failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) int tb_port_add_nfc_credits(struct tb_port *port, int credits)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) u32 nfc_credits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) if (credits == 0 || port->sw->is_unplugged)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) * USB4 restricts programming NFC buffers to lane adapters only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) * so skip other ports.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) if (tb_switch_is_usb4(port->sw) && !tb_port_is_null(port))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) nfc_credits = port->config.nfc_credits & ADP_CS_4_NFC_BUFFERS_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) nfc_credits += credits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) tb_port_dbg(port, "adding %d NFC credits to %lu", credits,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) port->config.nfc_credits & ADP_CS_4_NFC_BUFFERS_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) port->config.nfc_credits &= ~ADP_CS_4_NFC_BUFFERS_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) port->config.nfc_credits |= nfc_credits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) return tb_port_write(port, &port->config.nfc_credits,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) TB_CFG_PORT, ADP_CS_4, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) * tb_port_set_initial_credits() - Set initial port link credits allocated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) * @port: Port to set the initial credits
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) * @credits: Number of credits to to allocate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) * Set initial credits value to be used for ingress shared buffering.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) int tb_port_set_initial_credits(struct tb_port *port, u32 credits)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) u32 data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) ret = tb_port_read(port, &data, TB_CFG_PORT, ADP_CS_5, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) data &= ~ADP_CS_5_LCA_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) data |= (credits << ADP_CS_5_LCA_SHIFT) & ADP_CS_5_LCA_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) return tb_port_write(port, &data, TB_CFG_PORT, ADP_CS_5, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) * tb_port_clear_counter() - clear a counter in TB_CFG_COUNTER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) * Return: Returns 0 on success or an error code on failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) int tb_port_clear_counter(struct tb_port *port, int counter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) u32 zero[3] = { 0, 0, 0 };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) tb_port_dbg(port, "clearing counter %d\n", counter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) return tb_port_write(port, zero, TB_CFG_COUNTERS, 3 * counter, 3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) * tb_port_unlock() - Unlock downstream port
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) * @port: Port to unlock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) * Needed for USB4 but can be called for any CIO/USB4 ports. Makes the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) * downstream router accessible for CM.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) int tb_port_unlock(struct tb_port *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) if (tb_switch_is_icm(port->sw))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) if (!tb_port_is_null(port))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) if (tb_switch_is_usb4(port->sw))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) return usb4_port_unlock(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) static int __tb_port_enable(struct tb_port *port, bool enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) u32 phy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) if (!tb_port_is_null(port))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) ret = tb_port_read(port, &phy, TB_CFG_PORT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) port->cap_phy + LANE_ADP_CS_1, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) if (enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) phy &= ~LANE_ADP_CS_1_LD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) phy |= LANE_ADP_CS_1_LD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) return tb_port_write(port, &phy, TB_CFG_PORT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) port->cap_phy + LANE_ADP_CS_1, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) * tb_port_enable() - Enable lane adapter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) * @port: Port to enable (can be %NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) * This is used for lane 0 and 1 adapters to enable it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) int tb_port_enable(struct tb_port *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) return __tb_port_enable(port, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) * tb_port_disable() - Disable lane adapter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) * @port: Port to disable (can be %NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) * This is used for lane 0 and 1 adapters to disable it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) int tb_port_disable(struct tb_port *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) return __tb_port_enable(port, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) * tb_init_port() - initialize a port
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) * This is a helper method for tb_switch_alloc. Does not check or initialize
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) * any downstream switches.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) * Return: Returns 0 on success or an error code on failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) static int tb_init_port(struct tb_port *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) int res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) int cap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) res = tb_port_read(port, &port->config, TB_CFG_PORT, 0, 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) if (res) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) if (res == -ENODEV) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) tb_dbg(port->sw->tb, " Port %d: not implemented\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) port->port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) port->disabled = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) return res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) /* Port 0 is the switch itself and has no PHY. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) if (port->config.type == TB_TYPE_PORT && port->port != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) cap = tb_port_find_cap(port, TB_PORT_CAP_PHY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) if (cap > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) port->cap_phy = cap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) tb_port_WARN(port, "non switch port without a PHY\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) cap = tb_port_find_cap(port, TB_PORT_CAP_USB4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) if (cap > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) port->cap_usb4 = cap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) } else if (port->port != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) cap = tb_port_find_cap(port, TB_PORT_CAP_ADAP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) if (cap > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) port->cap_adap = cap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) tb_dump_port(port->sw->tb, &port->config);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) INIT_LIST_HEAD(&port->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) static int tb_port_alloc_hopid(struct tb_port *port, bool in, int min_hopid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) int max_hopid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) int port_max_hopid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) struct ida *ida;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) if (in) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) port_max_hopid = port->config.max_in_hop_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) ida = &port->in_hopids;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) port_max_hopid = port->config.max_out_hop_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) ida = &port->out_hopids;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) * NHI can use HopIDs 1-max for other adapters HopIDs 0-7 are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) * reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) if (!tb_port_is_nhi(port) && min_hopid < TB_PATH_MIN_HOPID)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) min_hopid = TB_PATH_MIN_HOPID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) if (max_hopid < 0 || max_hopid > port_max_hopid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) max_hopid = port_max_hopid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) return ida_simple_get(ida, min_hopid, max_hopid + 1, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) * tb_port_alloc_in_hopid() - Allocate input HopID from port
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) * @port: Port to allocate HopID for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) * @min_hopid: Minimum acceptable input HopID
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) * @max_hopid: Maximum acceptable input HopID
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) * Return: HopID between @min_hopid and @max_hopid or negative errno in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) * case of error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) int tb_port_alloc_in_hopid(struct tb_port *port, int min_hopid, int max_hopid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) return tb_port_alloc_hopid(port, true, min_hopid, max_hopid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) * tb_port_alloc_out_hopid() - Allocate output HopID from port
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) * @port: Port to allocate HopID for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) * @min_hopid: Minimum acceptable output HopID
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) * @max_hopid: Maximum acceptable output HopID
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) * Return: HopID between @min_hopid and @max_hopid or negative errno in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) * case of error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) int tb_port_alloc_out_hopid(struct tb_port *port, int min_hopid, int max_hopid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) return tb_port_alloc_hopid(port, false, min_hopid, max_hopid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) * tb_port_release_in_hopid() - Release allocated input HopID from port
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) * @port: Port whose HopID to release
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) * @hopid: HopID to release
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) void tb_port_release_in_hopid(struct tb_port *port, int hopid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) ida_simple_remove(&port->in_hopids, hopid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) * tb_port_release_out_hopid() - Release allocated output HopID from port
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) * @port: Port whose HopID to release
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) * @hopid: HopID to release
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) void tb_port_release_out_hopid(struct tb_port *port, int hopid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) ida_simple_remove(&port->out_hopids, hopid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) static inline bool tb_switch_is_reachable(const struct tb_switch *parent,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) const struct tb_switch *sw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) u64 mask = (1ULL << parent->config.depth * 8) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) return (tb_route(parent) & mask) == (tb_route(sw) & mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) * tb_next_port_on_path() - Return next port for given port on a path
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) * @start: Start port of the walk
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) * @end: End port of the walk
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) * @prev: Previous port (%NULL if this is the first)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) * This function can be used to walk from one port to another if they
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) * are connected through zero or more switches. If the @prev is dual
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) * link port, the function follows that link and returns another end on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) * that same link.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) * If the @end port has been reached, return %NULL.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) * Domain tb->lock must be held when this function is called.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) struct tb_port *tb_next_port_on_path(struct tb_port *start, struct tb_port *end,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) struct tb_port *prev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) struct tb_port *next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) if (!prev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) return start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) if (prev->sw == end->sw) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) if (prev == end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) return end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) if (tb_switch_is_reachable(prev->sw, end->sw)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) next = tb_port_at(tb_route(end->sw), prev->sw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) /* Walk down the topology if next == prev */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) if (prev->remote &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) (next == prev || next->dual_link_port == prev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) next = prev->remote;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) if (tb_is_upstream_port(prev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) next = prev->remote;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) next = tb_upstream_port(prev->sw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) * Keep the same link if prev and next are both
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) * dual link ports.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) if (next->dual_link_port &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) next->link_nr != prev->link_nr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) next = next->dual_link_port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) return next != prev ? next : NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) * tb_port_get_link_speed() - Get current link speed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) * @port: Port to check (USB4 or CIO)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) * Returns link speed in Gb/s or negative errno in case of failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) int tb_port_get_link_speed(struct tb_port *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) u32 val, speed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) if (!port->cap_phy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) ret = tb_port_read(port, &val, TB_CFG_PORT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) port->cap_phy + LANE_ADP_CS_1, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) speed = (val & LANE_ADP_CS_1_CURRENT_SPEED_MASK) >>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) LANE_ADP_CS_1_CURRENT_SPEED_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) return speed == LANE_ADP_CS_1_CURRENT_SPEED_GEN3 ? 20 : 10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) static int tb_port_get_link_width(struct tb_port *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) if (!port->cap_phy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) ret = tb_port_read(port, &val, TB_CFG_PORT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) port->cap_phy + LANE_ADP_CS_1, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) return (val & LANE_ADP_CS_1_CURRENT_WIDTH_MASK) >>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) LANE_ADP_CS_1_CURRENT_WIDTH_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) static bool tb_port_is_width_supported(struct tb_port *port, int width)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) u32 phy, widths;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) if (!port->cap_phy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) ret = tb_port_read(port, &phy, TB_CFG_PORT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) port->cap_phy + LANE_ADP_CS_0, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) widths = (phy & LANE_ADP_CS_0_SUPPORTED_WIDTH_MASK) >>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) LANE_ADP_CS_0_SUPPORTED_WIDTH_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) return !!(widths & width);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) static int tb_port_set_link_width(struct tb_port *port, unsigned int width)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) if (!port->cap_phy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) ret = tb_port_read(port, &val, TB_CFG_PORT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) port->cap_phy + LANE_ADP_CS_1, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) val &= ~LANE_ADP_CS_1_TARGET_WIDTH_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) switch (width) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) case 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) val |= LANE_ADP_CS_1_TARGET_WIDTH_SINGLE <<
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) LANE_ADP_CS_1_TARGET_WIDTH_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) case 2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) val |= LANE_ADP_CS_1_TARGET_WIDTH_DUAL <<
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) LANE_ADP_CS_1_TARGET_WIDTH_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) val |= LANE_ADP_CS_1_LB;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) return tb_port_write(port, &val, TB_CFG_PORT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) port->cap_phy + LANE_ADP_CS_1, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) static int tb_port_lane_bonding_enable(struct tb_port *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) * Enable lane bonding for both links if not already enabled by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) * for example the boot firmware.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) ret = tb_port_get_link_width(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) if (ret == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) ret = tb_port_set_link_width(port, 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) ret = tb_port_get_link_width(port->dual_link_port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) if (ret == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) ret = tb_port_set_link_width(port->dual_link_port, 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) tb_port_set_link_width(port, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) port->bonded = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) port->dual_link_port->bonded = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) static void tb_port_lane_bonding_disable(struct tb_port *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) port->dual_link_port->bonded = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) port->bonded = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) tb_port_set_link_width(port->dual_link_port, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) tb_port_set_link_width(port, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) * tb_port_is_enabled() - Is the adapter port enabled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) * @port: Port to check
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) bool tb_port_is_enabled(struct tb_port *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) switch (port->config.type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) case TB_TYPE_PCIE_UP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) case TB_TYPE_PCIE_DOWN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) return tb_pci_port_is_enabled(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) case TB_TYPE_DP_HDMI_IN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) case TB_TYPE_DP_HDMI_OUT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) return tb_dp_port_is_enabled(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) case TB_TYPE_USB3_UP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) case TB_TYPE_USB3_DOWN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) return tb_usb3_port_is_enabled(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) * tb_usb3_port_is_enabled() - Is the USB3 adapter port enabled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) * @port: USB3 adapter port to check
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) bool tb_usb3_port_is_enabled(struct tb_port *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) u32 data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) if (tb_port_read(port, &data, TB_CFG_PORT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) port->cap_adap + ADP_USB3_CS_0, 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) return !!(data & ADP_USB3_CS_0_PE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) * tb_usb3_port_enable() - Enable USB3 adapter port
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) * @port: USB3 adapter port to enable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) * @enable: Enable/disable the USB3 adapter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) int tb_usb3_port_enable(struct tb_port *port, bool enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) u32 word = enable ? (ADP_USB3_CS_0_PE | ADP_USB3_CS_0_V)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) : ADP_USB3_CS_0_V;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) if (!port->cap_adap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) return -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) return tb_port_write(port, &word, TB_CFG_PORT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) port->cap_adap + ADP_USB3_CS_0, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) * tb_pci_port_is_enabled() - Is the PCIe adapter port enabled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) * @port: PCIe port to check
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) bool tb_pci_port_is_enabled(struct tb_port *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) u32 data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) if (tb_port_read(port, &data, TB_CFG_PORT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) port->cap_adap + ADP_PCIE_CS_0, 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) return !!(data & ADP_PCIE_CS_0_PE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) * tb_pci_port_enable() - Enable PCIe adapter port
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) * @port: PCIe port to enable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) * @enable: Enable/disable the PCIe adapter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) int tb_pci_port_enable(struct tb_port *port, bool enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) u32 word = enable ? ADP_PCIE_CS_0_PE : 0x0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) if (!port->cap_adap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) return -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) return tb_port_write(port, &word, TB_CFG_PORT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) port->cap_adap + ADP_PCIE_CS_0, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) * tb_dp_port_hpd_is_active() - Is HPD already active
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) * @port: DP out port to check
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) * Checks if the DP OUT adapter port has HDP bit already set.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) int tb_dp_port_hpd_is_active(struct tb_port *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) u32 data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) ret = tb_port_read(port, &data, TB_CFG_PORT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) port->cap_adap + ADP_DP_CS_2, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) return !!(data & ADP_DP_CS_2_HDP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) * tb_dp_port_hpd_clear() - Clear HPD from DP IN port
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) * @port: Port to clear HPD
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) * If the DP IN port has HDP set, this function can be used to clear it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) int tb_dp_port_hpd_clear(struct tb_port *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) u32 data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) ret = tb_port_read(port, &data, TB_CFG_PORT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) port->cap_adap + ADP_DP_CS_3, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) data |= ADP_DP_CS_3_HDPC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) return tb_port_write(port, &data, TB_CFG_PORT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) port->cap_adap + ADP_DP_CS_3, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) * tb_dp_port_set_hops() - Set video/aux Hop IDs for DP port
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) * @port: DP IN/OUT port to set hops
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) * @video: Video Hop ID
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) * @aux_tx: AUX TX Hop ID
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) * @aux_rx: AUX RX Hop ID
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) * Programs specified Hop IDs for DP IN/OUT port.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) int tb_dp_port_set_hops(struct tb_port *port, unsigned int video,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) unsigned int aux_tx, unsigned int aux_rx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) u32 data[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) ret = tb_port_read(port, data, TB_CFG_PORT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) port->cap_adap + ADP_DP_CS_0, ARRAY_SIZE(data));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) data[0] &= ~ADP_DP_CS_0_VIDEO_HOPID_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) data[1] &= ~ADP_DP_CS_1_AUX_RX_HOPID_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) data[1] &= ~ADP_DP_CS_1_AUX_RX_HOPID_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) data[0] |= (video << ADP_DP_CS_0_VIDEO_HOPID_SHIFT) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) ADP_DP_CS_0_VIDEO_HOPID_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) data[1] |= aux_tx & ADP_DP_CS_1_AUX_TX_HOPID_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) data[1] |= (aux_rx << ADP_DP_CS_1_AUX_RX_HOPID_SHIFT) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) ADP_DP_CS_1_AUX_RX_HOPID_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) return tb_port_write(port, data, TB_CFG_PORT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) port->cap_adap + ADP_DP_CS_0, ARRAY_SIZE(data));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) * tb_dp_port_is_enabled() - Is DP adapter port enabled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) * @port: DP adapter port to check
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) bool tb_dp_port_is_enabled(struct tb_port *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) u32 data[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) if (tb_port_read(port, data, TB_CFG_PORT, port->cap_adap + ADP_DP_CS_0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) ARRAY_SIZE(data)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) return !!(data[0] & (ADP_DP_CS_0_VE | ADP_DP_CS_0_AE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) * tb_dp_port_enable() - Enables/disables DP paths of a port
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) * @port: DP IN/OUT port
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) * @enable: Enable/disable DP path
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) * Once Hop IDs are programmed DP paths can be enabled or disabled by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) * calling this function.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) int tb_dp_port_enable(struct tb_port *port, bool enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) u32 data[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) ret = tb_port_read(port, data, TB_CFG_PORT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) port->cap_adap + ADP_DP_CS_0, ARRAY_SIZE(data));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) if (enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) data[0] |= ADP_DP_CS_0_VE | ADP_DP_CS_0_AE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) data[0] &= ~(ADP_DP_CS_0_VE | ADP_DP_CS_0_AE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) return tb_port_write(port, data, TB_CFG_PORT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) port->cap_adap + ADP_DP_CS_0, ARRAY_SIZE(data));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) /* switch utility functions */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) static const char *tb_switch_generation_name(const struct tb_switch *sw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) switch (sw->generation) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) case 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) return "Thunderbolt 1";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) case 2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) return "Thunderbolt 2";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) case 3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) return "Thunderbolt 3";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) case 4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) return "USB4";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) return "Unknown";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) static void tb_dump_switch(const struct tb *tb, const struct tb_switch *sw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) const struct tb_regs_switch_header *regs = &sw->config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) tb_dbg(tb, " %s Switch: %x:%x (Revision: %d, TB Version: %d)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) tb_switch_generation_name(sw), regs->vendor_id, regs->device_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) regs->revision, regs->thunderbolt_version);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) tb_dbg(tb, " Max Port Number: %d\n", regs->max_port_number);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) tb_dbg(tb, " Config:\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) tb_dbg(tb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) " Upstream Port Number: %d Depth: %d Route String: %#llx Enabled: %d, PlugEventsDelay: %dms\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) regs->upstream_port_number, regs->depth,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) (((u64) regs->route_hi) << 32) | regs->route_lo,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) regs->enabled, regs->plug_events_delay);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) tb_dbg(tb, " unknown1: %#x unknown4: %#x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) regs->__unknown1, regs->__unknown4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) * reset_switch() - reconfigure route, enable and send TB_CFG_PKG_RESET
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) * @sw: Switch to reset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) * Return: Returns 0 on success or an error code on failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) int tb_switch_reset(struct tb_switch *sw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) struct tb_cfg_result res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) if (sw->generation > 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) tb_sw_dbg(sw, "resetting switch\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) res.err = tb_sw_write(sw, ((u32 *) &sw->config) + 2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) TB_CFG_SWITCH, 2, 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) if (res.err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) return res.err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) res = tb_cfg_reset(sw->tb->ctl, tb_route(sw), TB_CFG_DEFAULT_TIMEOUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) if (res.err > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) return res.err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) * tb_plug_events_active() - enable/disable plug events on a switch
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) * Also configures a sane plug_events_delay of 255ms.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) * Return: Returns 0 on success or an error code on failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) static int tb_plug_events_active(struct tb_switch *sw, bool active)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) u32 data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) int res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) if (tb_switch_is_icm(sw) || tb_switch_is_usb4(sw))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) sw->config.plug_events_delay = 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) res = tb_sw_write(sw, ((u32 *) &sw->config) + 4, TB_CFG_SWITCH, 4, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) if (res)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) return res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) res = tb_sw_read(sw, &data, TB_CFG_SWITCH, sw->cap_plug_events + 1, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) if (res)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) return res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) if (active) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) data = data & 0xFFFFFF83;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) switch (sw->config.device_id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) case PCI_DEVICE_ID_INTEL_LIGHT_RIDGE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) case PCI_DEVICE_ID_INTEL_EAGLE_RIDGE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) case PCI_DEVICE_ID_INTEL_PORT_RIDGE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) data |= 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) data = data | 0x7c;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) return tb_sw_write(sw, &data, TB_CFG_SWITCH,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) sw->cap_plug_events + 1, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) static ssize_t authorized_show(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) struct device_attribute *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) struct tb_switch *sw = tb_to_switch(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) return sprintf(buf, "%u\n", sw->authorized);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) static int tb_switch_set_authorized(struct tb_switch *sw, unsigned int val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) int ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) if (!mutex_trylock(&sw->tb->lock))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) return restart_syscall();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) if (sw->authorized)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) switch (val) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) /* Approve switch */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) case 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) if (sw->key)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) ret = tb_domain_approve_switch_key(sw->tb, sw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) ret = tb_domain_approve_switch(sw->tb, sw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) /* Challenge switch */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) case 2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) if (sw->key)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) ret = tb_domain_challenge_switch_key(sw->tb, sw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) if (!ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) sw->authorized = val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) /* Notify status change to the userspace */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) kobject_uevent(&sw->dev.kobj, KOBJ_CHANGE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) mutex_unlock(&sw->tb->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) static ssize_t authorized_store(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) struct device_attribute *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) const char *buf, size_t count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) struct tb_switch *sw = tb_to_switch(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) unsigned int val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) ssize_t ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) ret = kstrtouint(buf, 0, &val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) if (val > 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) pm_runtime_get_sync(&sw->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) ret = tb_switch_set_authorized(sw, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) pm_runtime_mark_last_busy(&sw->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) pm_runtime_put_autosuspend(&sw->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) return ret ? ret : count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) static DEVICE_ATTR_RW(authorized);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) static ssize_t boot_show(struct device *dev, struct device_attribute *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) struct tb_switch *sw = tb_to_switch(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) return sprintf(buf, "%u\n", sw->boot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) static DEVICE_ATTR_RO(boot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) static ssize_t device_show(struct device *dev, struct device_attribute *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) struct tb_switch *sw = tb_to_switch(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) return sprintf(buf, "%#x\n", sw->device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) static DEVICE_ATTR_RO(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) static ssize_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) device_name_show(struct device *dev, struct device_attribute *attr, char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) struct tb_switch *sw = tb_to_switch(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) return sprintf(buf, "%s\n", sw->device_name ? sw->device_name : "");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) static DEVICE_ATTR_RO(device_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) static ssize_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) generation_show(struct device *dev, struct device_attribute *attr, char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) struct tb_switch *sw = tb_to_switch(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) return sprintf(buf, "%u\n", sw->generation);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) static DEVICE_ATTR_RO(generation);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) static ssize_t key_show(struct device *dev, struct device_attribute *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) struct tb_switch *sw = tb_to_switch(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) ssize_t ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) if (!mutex_trylock(&sw->tb->lock))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) return restart_syscall();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) if (sw->key)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) ret = sprintf(buf, "%*phN\n", TB_SWITCH_KEY_SIZE, sw->key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) ret = sprintf(buf, "\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) mutex_unlock(&sw->tb->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) static ssize_t key_store(struct device *dev, struct device_attribute *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) const char *buf, size_t count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) struct tb_switch *sw = tb_to_switch(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) u8 key[TB_SWITCH_KEY_SIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) ssize_t ret = count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) bool clear = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) if (!strcmp(buf, "\n"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) clear = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) else if (hex2bin(key, buf, sizeof(key)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) if (!mutex_trylock(&sw->tb->lock))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) return restart_syscall();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) if (sw->authorized) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) ret = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) kfree(sw->key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) if (clear) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) sw->key = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) sw->key = kmemdup(key, sizeof(key), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) if (!sw->key)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) mutex_unlock(&sw->tb->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) static DEVICE_ATTR(key, 0600, key_show, key_store);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) static ssize_t speed_show(struct device *dev, struct device_attribute *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) struct tb_switch *sw = tb_to_switch(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) return sprintf(buf, "%u.0 Gb/s\n", sw->link_speed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) * Currently all lanes must run at the same speed but we expose here
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) * both directions to allow possible asymmetric links in the future.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) static DEVICE_ATTR(rx_speed, 0444, speed_show, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) static DEVICE_ATTR(tx_speed, 0444, speed_show, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) static ssize_t lanes_show(struct device *dev, struct device_attribute *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) struct tb_switch *sw = tb_to_switch(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) return sprintf(buf, "%u\n", sw->link_width);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) * Currently link has same amount of lanes both directions (1 or 2) but
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) * expose them separately to allow possible asymmetric links in the future.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) static DEVICE_ATTR(rx_lanes, 0444, lanes_show, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) static DEVICE_ATTR(tx_lanes, 0444, lanes_show, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) static ssize_t nvm_authenticate_show(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) struct device_attribute *attr, char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) struct tb_switch *sw = tb_to_switch(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) u32 status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) nvm_get_auth_status(sw, &status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) return sprintf(buf, "%#x\n", status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) static ssize_t nvm_authenticate_sysfs(struct device *dev, const char *buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) bool disconnect)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) struct tb_switch *sw = tb_to_switch(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) int val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) pm_runtime_get_sync(&sw->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) if (!mutex_trylock(&sw->tb->lock)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) ret = restart_syscall();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) goto exit_rpm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) /* If NVMem devices are not yet added */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) if (!sw->nvm) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) ret = -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) goto exit_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) ret = kstrtoint(buf, 10, &val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) goto exit_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) /* Always clear the authentication status */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) nvm_clear_auth_status(sw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) if (val > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) if (!sw->nvm->flushed) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) if (!sw->nvm->buf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) goto exit_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) ret = nvm_validate_and_write(sw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) if (ret || val == WRITE_ONLY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) goto exit_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) if (val == WRITE_AND_AUTHENTICATE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) if (disconnect) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) ret = tb_lc_force_power(sw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) sw->nvm->authenticating = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) ret = nvm_authenticate(sw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) exit_unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) mutex_unlock(&sw->tb->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) exit_rpm:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) pm_runtime_mark_last_busy(&sw->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) pm_runtime_put_autosuspend(&sw->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) static ssize_t nvm_authenticate_store(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) struct device_attribute *attr, const char *buf, size_t count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) int ret = nvm_authenticate_sysfs(dev, buf, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) static DEVICE_ATTR_RW(nvm_authenticate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) static ssize_t nvm_authenticate_on_disconnect_show(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) struct device_attribute *attr, char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) return nvm_authenticate_show(dev, attr, buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) static ssize_t nvm_authenticate_on_disconnect_store(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) struct device_attribute *attr, const char *buf, size_t count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) ret = nvm_authenticate_sysfs(dev, buf, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) return ret ? ret : count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) static DEVICE_ATTR_RW(nvm_authenticate_on_disconnect);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) static ssize_t nvm_version_show(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) struct device_attribute *attr, char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) struct tb_switch *sw = tb_to_switch(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) if (!mutex_trylock(&sw->tb->lock))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) return restart_syscall();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) if (sw->safe_mode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) ret = -ENODATA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) else if (!sw->nvm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) ret = -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) ret = sprintf(buf, "%x.%x\n", sw->nvm->major, sw->nvm->minor);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) mutex_unlock(&sw->tb->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) static DEVICE_ATTR_RO(nvm_version);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) static ssize_t vendor_show(struct device *dev, struct device_attribute *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) struct tb_switch *sw = tb_to_switch(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) return sprintf(buf, "%#x\n", sw->vendor);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) static DEVICE_ATTR_RO(vendor);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) static ssize_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) vendor_name_show(struct device *dev, struct device_attribute *attr, char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) struct tb_switch *sw = tb_to_switch(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) return sprintf(buf, "%s\n", sw->vendor_name ? sw->vendor_name : "");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) static DEVICE_ATTR_RO(vendor_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) static ssize_t unique_id_show(struct device *dev, struct device_attribute *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) struct tb_switch *sw = tb_to_switch(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) return sprintf(buf, "%pUb\n", sw->uuid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) static DEVICE_ATTR_RO(unique_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) static struct attribute *switch_attrs[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) &dev_attr_authorized.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) &dev_attr_boot.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) &dev_attr_device.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) &dev_attr_device_name.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) &dev_attr_generation.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) &dev_attr_key.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) &dev_attr_nvm_authenticate.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) &dev_attr_nvm_authenticate_on_disconnect.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) &dev_attr_nvm_version.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) &dev_attr_rx_speed.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) &dev_attr_rx_lanes.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) &dev_attr_tx_speed.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) &dev_attr_tx_lanes.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) &dev_attr_vendor.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) &dev_attr_vendor_name.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) &dev_attr_unique_id.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) static umode_t switch_attr_is_visible(struct kobject *kobj,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) struct attribute *attr, int n)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) struct device *dev = kobj_to_dev(kobj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) struct tb_switch *sw = tb_to_switch(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) if (attr == &dev_attr_device.attr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) if (!sw->device)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) } else if (attr == &dev_attr_device_name.attr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) if (!sw->device_name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) } else if (attr == &dev_attr_vendor.attr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) if (!sw->vendor)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) } else if (attr == &dev_attr_vendor_name.attr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) if (!sw->vendor_name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) } else if (attr == &dev_attr_key.attr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) if (tb_route(sw) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) sw->tb->security_level == TB_SECURITY_SECURE &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) sw->security_level == TB_SECURITY_SECURE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) return attr->mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) } else if (attr == &dev_attr_rx_speed.attr ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) attr == &dev_attr_rx_lanes.attr ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) attr == &dev_attr_tx_speed.attr ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) attr == &dev_attr_tx_lanes.attr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) if (tb_route(sw))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) return attr->mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) } else if (attr == &dev_attr_nvm_authenticate.attr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) if (nvm_upgradeable(sw))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) return attr->mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) } else if (attr == &dev_attr_nvm_version.attr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) if (nvm_readable(sw))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) return attr->mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) } else if (attr == &dev_attr_boot.attr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) if (tb_route(sw))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) return attr->mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) } else if (attr == &dev_attr_nvm_authenticate_on_disconnect.attr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) if (sw->quirks & QUIRK_FORCE_POWER_LINK_CONTROLLER)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) return attr->mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) return sw->safe_mode ? 0 : attr->mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) static struct attribute_group switch_group = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) .is_visible = switch_attr_is_visible,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) .attrs = switch_attrs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) static const struct attribute_group *switch_groups[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) &switch_group,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) static void tb_switch_release(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) struct tb_switch *sw = tb_to_switch(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) struct tb_port *port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) dma_port_free(sw->dma_port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) tb_switch_for_each_port(sw, port) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) ida_destroy(&port->in_hopids);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) ida_destroy(&port->out_hopids);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) kfree(sw->uuid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) kfree(sw->device_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) kfree(sw->vendor_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) kfree(sw->ports);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) kfree(sw->drom);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) kfree(sw->key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) kfree(sw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) * Currently only need to provide the callbacks. Everything else is handled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) * in the connection manager.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) static int __maybe_unused tb_switch_runtime_suspend(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) struct tb_switch *sw = tb_to_switch(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) const struct tb_cm_ops *cm_ops = sw->tb->cm_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) if (cm_ops->runtime_suspend_switch)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) return cm_ops->runtime_suspend_switch(sw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) static int __maybe_unused tb_switch_runtime_resume(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) struct tb_switch *sw = tb_to_switch(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) const struct tb_cm_ops *cm_ops = sw->tb->cm_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) if (cm_ops->runtime_resume_switch)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) return cm_ops->runtime_resume_switch(sw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) static const struct dev_pm_ops tb_switch_pm_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) SET_RUNTIME_PM_OPS(tb_switch_runtime_suspend, tb_switch_runtime_resume,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) struct device_type tb_switch_type = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) .name = "thunderbolt_device",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) .release = tb_switch_release,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) .pm = &tb_switch_pm_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) static int tb_switch_get_generation(struct tb_switch *sw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) switch (sw->config.device_id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) case PCI_DEVICE_ID_INTEL_LIGHT_RIDGE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814) case PCI_DEVICE_ID_INTEL_EAGLE_RIDGE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) case PCI_DEVICE_ID_INTEL_LIGHT_PEAK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) case PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_2C:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) case PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) case PCI_DEVICE_ID_INTEL_PORT_RIDGE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) case PCI_DEVICE_ID_INTEL_REDWOOD_RIDGE_2C_BRIDGE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820) case PCI_DEVICE_ID_INTEL_REDWOOD_RIDGE_4C_BRIDGE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) case PCI_DEVICE_ID_INTEL_WIN_RIDGE_2C_BRIDGE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_BRIDGE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_BRIDGE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) return 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_LP_BRIDGE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_2C_BRIDGE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_4C_BRIDGE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831) case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_2C_BRIDGE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_4C_BRIDGE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_2C_BRIDGE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_4C_BRIDGE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_DD_BRIDGE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) case PCI_DEVICE_ID_INTEL_ICL_NHI0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) case PCI_DEVICE_ID_INTEL_ICL_NHI1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838) return 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841) if (tb_switch_is_usb4(sw))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) return 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) * For unknown switches assume generation to be 1 to be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846) * on the safe side.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848) tb_sw_warn(sw, "unsupported switch device id %#x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849) sw->config.device_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854) static bool tb_switch_exceeds_max_depth(const struct tb_switch *sw, int depth)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856) int max_depth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) if (tb_switch_is_usb4(sw) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859) (sw->tb->root_switch && tb_switch_is_usb4(sw->tb->root_switch)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860) max_depth = USB4_SWITCH_MAX_DEPTH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862) max_depth = TB_SWITCH_MAX_DEPTH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) return depth > max_depth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868) * tb_switch_alloc() - allocate a switch
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869) * @tb: Pointer to the owning domain
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870) * @parent: Parent device for this switch
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871) * @route: Route string for this switch
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873) * Allocates and initializes a switch. Will not upload configuration to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874) * the switch. For that you need to call tb_switch_configure()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875) * separately. The returned switch should be released by calling
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876) * tb_switch_put().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) * Return: Pointer to the allocated switch or ERR_PTR() in case of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) * failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881) struct tb_switch *tb_switch_alloc(struct tb *tb, struct device *parent,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) u64 route)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884) struct tb_switch *sw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885) int upstream_port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886) int i, ret, depth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) /* Unlock the downstream port so we can access the switch below */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889) if (route) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890) struct tb_switch *parent_sw = tb_to_switch(parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891) struct tb_port *down;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893) down = tb_port_at(route, parent_sw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894) tb_port_unlock(down);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897) depth = tb_route_length(route);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899) upstream_port = tb_cfg_get_upstream_port(tb->ctl, route);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900) if (upstream_port < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901) return ERR_PTR(upstream_port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903) sw = kzalloc(sizeof(*sw), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904) if (!sw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907) sw->tb = tb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908) ret = tb_cfg_read(tb->ctl, &sw->config, route, 0, TB_CFG_SWITCH, 0, 5);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910) goto err_free_sw_ports;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912) sw->generation = tb_switch_get_generation(sw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914) tb_dbg(tb, "current switch config:\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915) tb_dump_switch(tb, sw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917) /* configure switch */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918) sw->config.upstream_port_number = upstream_port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919) sw->config.depth = depth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920) sw->config.route_hi = upper_32_bits(route);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921) sw->config.route_lo = lower_32_bits(route);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922) sw->config.enabled = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924) /* Make sure we do not exceed maximum topology limit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925) if (tb_switch_exceeds_max_depth(sw, depth)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926) ret = -EADDRNOTAVAIL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927) goto err_free_sw_ports;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930) /* initialize ports */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931) sw->ports = kcalloc(sw->config.max_port_number + 1, sizeof(*sw->ports),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933) if (!sw->ports) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935) goto err_free_sw_ports;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938) for (i = 0; i <= sw->config.max_port_number; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939) /* minimum setup for tb_find_cap and tb_drom_read to work */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940) sw->ports[i].sw = sw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941) sw->ports[i].port = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943) /* Control port does not need HopID allocation */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944) if (i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945) ida_init(&sw->ports[i].in_hopids);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946) ida_init(&sw->ports[i].out_hopids);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950) ret = tb_switch_find_vse_cap(sw, TB_VSE_CAP_PLUG_EVENTS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951) if (ret > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952) sw->cap_plug_events = ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954) ret = tb_switch_find_vse_cap(sw, TB_VSE_CAP_LINK_CONTROLLER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955) if (ret > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956) sw->cap_lc = ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958) /* Root switch is always authorized */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959) if (!route)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960) sw->authorized = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962) device_initialize(&sw->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963) sw->dev.parent = parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964) sw->dev.bus = &tb_bus_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965) sw->dev.type = &tb_switch_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966) sw->dev.groups = switch_groups;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967) dev_set_name(&sw->dev, "%u-%llx", tb->index, tb_route(sw));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969) return sw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971) err_free_sw_ports:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972) kfree(sw->ports);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973) kfree(sw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975) return ERR_PTR(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979) * tb_switch_alloc_safe_mode() - allocate a switch that is in safe mode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980) * @tb: Pointer to the owning domain
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981) * @parent: Parent device for this switch
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982) * @route: Route string for this switch
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984) * This creates a switch in safe mode. This means the switch pretty much
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985) * lacks all capabilities except DMA configuration port before it is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986) * flashed with a valid NVM firmware.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988) * The returned switch must be released by calling tb_switch_put().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990) * Return: Pointer to the allocated switch or ERR_PTR() in case of failure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992) struct tb_switch *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993) tb_switch_alloc_safe_mode(struct tb *tb, struct device *parent, u64 route)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995) struct tb_switch *sw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997) sw = kzalloc(sizeof(*sw), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998) if (!sw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999) return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001) sw->tb = tb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002) sw->config.depth = tb_route_length(route);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003) sw->config.route_hi = upper_32_bits(route);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004) sw->config.route_lo = lower_32_bits(route);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005) sw->safe_mode = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007) device_initialize(&sw->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008) sw->dev.parent = parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009) sw->dev.bus = &tb_bus_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010) sw->dev.type = &tb_switch_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011) sw->dev.groups = switch_groups;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2012) dev_set_name(&sw->dev, "%u-%llx", tb->index, tb_route(sw));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2013)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2014) return sw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2015) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2016)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2017) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2018) * tb_switch_configure() - Uploads configuration to the switch
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2019) * @sw: Switch to configure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2020) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2021) * Call this function before the switch is added to the system. It will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2022) * upload configuration to the switch and makes it available for the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2023) * connection manager to use. Can be called to the switch again after
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2024) * resume from low power states to re-initialize it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2025) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2026) * Return: %0 in case of success and negative errno in case of failure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2027) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2028) int tb_switch_configure(struct tb_switch *sw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2029) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2030) struct tb *tb = sw->tb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2031) u64 route;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2032) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2033)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2034) route = tb_route(sw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2035)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2036) tb_dbg(tb, "%s Switch at %#llx (depth: %d, up port: %d)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2037) sw->config.enabled ? "restoring" : "initializing", route,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2038) tb_route_length(route), sw->config.upstream_port_number);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2039)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2040) sw->config.enabled = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2041)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2042) if (tb_switch_is_usb4(sw)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2043) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2044) * For USB4 devices, we need to program the CM version
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2045) * accordingly so that it knows to expose all the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2046) * additional capabilities.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2047) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2048) sw->config.cmuv = USB4_VERSION_1_0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2049)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2050) /* Enumerate the switch */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2051) ret = tb_sw_write(sw, (u32 *)&sw->config + 1, TB_CFG_SWITCH,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2052) ROUTER_CS_1, 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2053) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2054) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2055)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2056) ret = usb4_switch_setup(sw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2057) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2058) if (sw->config.vendor_id != PCI_VENDOR_ID_INTEL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2059) tb_sw_warn(sw, "unknown switch vendor id %#x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2060) sw->config.vendor_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2061)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2062) if (!sw->cap_plug_events) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2063) tb_sw_warn(sw, "cannot find TB_VSE_CAP_PLUG_EVENTS aborting\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2064) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2065) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2066)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2067) /* Enumerate the switch */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2068) ret = tb_sw_write(sw, (u32 *)&sw->config + 1, TB_CFG_SWITCH,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2069) ROUTER_CS_1, 3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2070) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2071) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2072) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2073)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2074) return tb_plug_events_active(sw, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2075) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2076)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2077) static int tb_switch_set_uuid(struct tb_switch *sw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2078) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2079) bool uid = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2080) u32 uuid[4];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2081) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2082)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2083) if (sw->uuid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2084) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2085)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2086) if (tb_switch_is_usb4(sw)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2087) ret = usb4_switch_read_uid(sw, &sw->uid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2088) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2089) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2090) uid = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2091) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2092) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2093) * The newer controllers include fused UUID as part of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2094) * link controller specific registers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2095) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2096) ret = tb_lc_read_uuid(sw, uuid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2097) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2098) if (ret != -EINVAL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2099) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2100) uid = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2101) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2102) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2104) if (uid) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2105) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2106) * ICM generates UUID based on UID and fills the upper
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2107) * two words with ones. This is not strictly following
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2108) * UUID format but we want to be compatible with it so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2109) * we do the same here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2110) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2111) uuid[0] = sw->uid & 0xffffffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2112) uuid[1] = (sw->uid >> 32) & 0xffffffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2113) uuid[2] = 0xffffffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2114) uuid[3] = 0xffffffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2115) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2117) sw->uuid = kmemdup(uuid, sizeof(uuid), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2118) if (!sw->uuid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2119) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2120) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2121) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2123) static int tb_switch_add_dma_port(struct tb_switch *sw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2124) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2125) u32 status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2126) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2128) switch (sw->generation) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2129) case 2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2130) /* Only root switch can be upgraded */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2131) if (tb_route(sw))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2132) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2134) fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2135) case 3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2136) ret = tb_switch_set_uuid(sw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2137) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2138) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2139) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2140)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2141) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2142) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2143) * DMA port is the only thing available when the switch
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2144) * is in safe mode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2145) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2146) if (!sw->safe_mode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2147) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2148) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2149) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2150)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2151) /* Root switch DMA port requires running firmware */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2152) if (!tb_route(sw) && !tb_switch_is_icm(sw))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2153) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2154)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2155) sw->dma_port = dma_port_alloc(sw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2156) if (!sw->dma_port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2157) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2158)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2159) if (sw->no_nvm_upgrade)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2160) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2161)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2162) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2163) * If there is status already set then authentication failed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2164) * when the dma_port_flash_update_auth() returned. Power cycling
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2165) * is not needed (it was done already) so only thing we do here
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2166) * is to unblock runtime PM of the root port.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2167) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2168) nvm_get_auth_status(sw, &status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2169) if (status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2170) if (!tb_route(sw))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2171) nvm_authenticate_complete_dma_port(sw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2172) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2173) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2175) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2176) * Check status of the previous flash authentication. If there
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2177) * is one we need to power cycle the switch in any case to make
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2178) * it functional again.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2179) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2180) ret = dma_port_flash_update_auth_status(sw->dma_port, &status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2181) if (ret <= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2182) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2183)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2184) /* Now we can allow root port to suspend again */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2185) if (!tb_route(sw))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2186) nvm_authenticate_complete_dma_port(sw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2188) if (status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2189) tb_sw_info(sw, "switch flash authentication failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2190) nvm_set_auth_status(sw, status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2191) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2193) tb_sw_info(sw, "power cycling the switch now\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2194) dma_port_power_cycle(sw->dma_port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2195)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2196) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2197) * We return error here which causes the switch adding failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2198) * It should appear back after power cycle is complete.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2199) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2200) return -ESHUTDOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2201) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2202)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2203) static void tb_switch_default_link_ports(struct tb_switch *sw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2204) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2205) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2206)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2207) for (i = 1; i <= sw->config.max_port_number; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2208) struct tb_port *port = &sw->ports[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2209) struct tb_port *subordinate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2210)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2211) if (!tb_port_is_null(port))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2212) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2213)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2214) /* Check for the subordinate port */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2215) if (i == sw->config.max_port_number ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2216) !tb_port_is_null(&sw->ports[i + 1]))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2217) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2218)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2219) /* Link them if not already done so (by DROM) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2220) subordinate = &sw->ports[i + 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2221) if (!port->dual_link_port && !subordinate->dual_link_port) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2222) port->link_nr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2223) port->dual_link_port = subordinate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2224) subordinate->link_nr = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2225) subordinate->dual_link_port = port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2226)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2227) tb_sw_dbg(sw, "linked ports %d <-> %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2228) port->port, subordinate->port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2229) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2230) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2231) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2232)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2233) static bool tb_switch_lane_bonding_possible(struct tb_switch *sw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2234) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2235) const struct tb_port *up = tb_upstream_port(sw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2236)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2237) if (!up->dual_link_port || !up->dual_link_port->remote)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2238) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2239)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2240) if (tb_switch_is_usb4(sw))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2241) return usb4_switch_lane_bonding_possible(sw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2242) return tb_lc_lane_bonding_possible(sw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2243) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2244)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2245) static int tb_switch_update_link_attributes(struct tb_switch *sw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2246) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2247) struct tb_port *up;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2248) bool change = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2249) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2250)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2251) if (!tb_route(sw) || tb_switch_is_icm(sw))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2252) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2253)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2254) up = tb_upstream_port(sw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2255)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2256) ret = tb_port_get_link_speed(up);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2257) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2258) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2259) if (sw->link_speed != ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2260) change = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2261) sw->link_speed = ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2262)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2263) ret = tb_port_get_link_width(up);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2264) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2265) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2266) if (sw->link_width != ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2267) change = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2268) sw->link_width = ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2269)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2270) /* Notify userspace that there is possible link attribute change */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2271) if (device_is_registered(&sw->dev) && change)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2272) kobject_uevent(&sw->dev.kobj, KOBJ_CHANGE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2273)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2274) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2275) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2276)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2277) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2278) * tb_switch_lane_bonding_enable() - Enable lane bonding
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2279) * @sw: Switch to enable lane bonding
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2280) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2281) * Connection manager can call this function to enable lane bonding of a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2282) * switch. If conditions are correct and both switches support the feature,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2283) * lanes are bonded. It is safe to call this to any switch.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2284) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2285) int tb_switch_lane_bonding_enable(struct tb_switch *sw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2286) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2287) struct tb_switch *parent = tb_to_switch(sw->dev.parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2288) struct tb_port *up, *down;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2289) u64 route = tb_route(sw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2290) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2291)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2292) if (!route)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2293) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2294)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2295) if (!tb_switch_lane_bonding_possible(sw))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2296) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2297)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2298) up = tb_upstream_port(sw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2299) down = tb_port_at(route, parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2300)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2301) if (!tb_port_is_width_supported(up, 2) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2302) !tb_port_is_width_supported(down, 2))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2303) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2304)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2305) ret = tb_port_lane_bonding_enable(up);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2306) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2307) tb_port_warn(up, "failed to enable lane bonding\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2308) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2309) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2310)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2311) ret = tb_port_lane_bonding_enable(down);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2312) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2313) tb_port_warn(down, "failed to enable lane bonding\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2314) tb_port_lane_bonding_disable(up);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2315) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2316) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2317)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2318) tb_switch_update_link_attributes(sw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2319)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2320) tb_sw_dbg(sw, "lane bonding enabled\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2321) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2322) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2323)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2324) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2325) * tb_switch_lane_bonding_disable() - Disable lane bonding
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2326) * @sw: Switch whose lane bonding to disable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2327) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2328) * Disables lane bonding between @sw and parent. This can be called even
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2329) * if lanes were not bonded originally.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2330) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2331) void tb_switch_lane_bonding_disable(struct tb_switch *sw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2332) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2333) struct tb_switch *parent = tb_to_switch(sw->dev.parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2334) struct tb_port *up, *down;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2335)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2336) if (!tb_route(sw))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2337) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2338)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2339) up = tb_upstream_port(sw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2340) if (!up->bonded)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2341) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2342)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2343) down = tb_port_at(tb_route(sw), parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2344)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2345) tb_port_lane_bonding_disable(up);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2346) tb_port_lane_bonding_disable(down);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2347)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2348) tb_switch_update_link_attributes(sw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2349) tb_sw_dbg(sw, "lane bonding disabled\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2350) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2351)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2352) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2353) * tb_switch_configure_link() - Set link configured
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2354) * @sw: Switch whose link is configured
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2355) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2356) * Sets the link upstream from @sw configured (from both ends) so that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2357) * it will not be disconnected when the domain exits sleep. Can be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2358) * called for any switch.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2359) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2360) * It is recommended that this is called after lane bonding is enabled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2361) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2362) * Returns %0 on success and negative errno in case of error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2363) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2364) int tb_switch_configure_link(struct tb_switch *sw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2365) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2366) struct tb_port *up, *down;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2367) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2368)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2369) if (!tb_route(sw) || tb_switch_is_icm(sw))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2370) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2371)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2372) up = tb_upstream_port(sw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2373) if (tb_switch_is_usb4(up->sw))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2374) ret = usb4_port_configure(up);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2375) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2376) ret = tb_lc_configure_port(up);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2377) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2378) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2379)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2380) down = up->remote;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2381) if (tb_switch_is_usb4(down->sw))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2382) return usb4_port_configure(down);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2383) return tb_lc_configure_port(down);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2384) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2385)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2386) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2387) * tb_switch_unconfigure_link() - Unconfigure link
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2388) * @sw: Switch whose link is unconfigured
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2389) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2390) * Sets the link unconfigured so the @sw will be disconnected if the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2391) * domain exists sleep.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2392) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2393) void tb_switch_unconfigure_link(struct tb_switch *sw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2394) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2395) struct tb_port *up, *down;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2396)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2397) if (sw->is_unplugged)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2398) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2399) if (!tb_route(sw) || tb_switch_is_icm(sw))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2400) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2401)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2402) up = tb_upstream_port(sw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2403) if (tb_switch_is_usb4(up->sw))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2404) usb4_port_unconfigure(up);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2405) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2406) tb_lc_unconfigure_port(up);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2407)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2408) down = up->remote;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2409) if (tb_switch_is_usb4(down->sw))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2410) usb4_port_unconfigure(down);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2411) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2412) tb_lc_unconfigure_port(down);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2413) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2414)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2415) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2416) * tb_switch_add() - Add a switch to the domain
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2417) * @sw: Switch to add
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2418) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2419) * This is the last step in adding switch to the domain. It will read
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2420) * identification information from DROM and initializes ports so that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2421) * they can be used to connect other switches. The switch will be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2422) * exposed to the userspace when this function successfully returns. To
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2423) * remove and release the switch, call tb_switch_remove().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2424) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2425) * Return: %0 in case of success and negative errno in case of failure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2426) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2427) int tb_switch_add(struct tb_switch *sw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2428) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2429) int i, ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2430)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2431) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2432) * Initialize DMA control port now before we read DROM. Recent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2433) * host controllers have more complete DROM on NVM that includes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2434) * vendor and model identification strings which we then expose
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2435) * to the userspace. NVM can be accessed through DMA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2436) * configuration based mailbox.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2437) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2438) ret = tb_switch_add_dma_port(sw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2439) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2440) dev_err(&sw->dev, "failed to add DMA port\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2441) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2442) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2443)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2444) if (!sw->safe_mode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2445) /* read drom */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2446) ret = tb_drom_read(sw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2447) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2448) dev_err(&sw->dev, "reading DROM failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2449) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2450) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2451) tb_sw_dbg(sw, "uid: %#llx\n", sw->uid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2452)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2453) ret = tb_switch_set_uuid(sw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2454) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2455) dev_err(&sw->dev, "failed to set UUID\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2456) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2457) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2458)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2459) for (i = 0; i <= sw->config.max_port_number; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2460) if (sw->ports[i].disabled) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2461) tb_port_dbg(&sw->ports[i], "disabled by eeprom\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2462) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2463) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2464) ret = tb_init_port(&sw->ports[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2465) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2466) dev_err(&sw->dev, "failed to initialize port %d\n", i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2467) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2468) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2469) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2470)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2471) tb_switch_default_link_ports(sw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2472)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2473) ret = tb_switch_update_link_attributes(sw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2474) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2475) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2476)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2477) ret = tb_switch_tmu_init(sw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2478) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2479) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2480) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2481)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2482) ret = device_add(&sw->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2483) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2484) dev_err(&sw->dev, "failed to add device: %d\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2485) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2486) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2487)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2488) if (tb_route(sw)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2489) dev_info(&sw->dev, "new device found, vendor=%#x device=%#x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2490) sw->vendor, sw->device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2491) if (sw->vendor_name && sw->device_name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2492) dev_info(&sw->dev, "%s %s\n", sw->vendor_name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2493) sw->device_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2494) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2495)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2496) ret = tb_switch_nvm_add(sw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2497) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2498) dev_err(&sw->dev, "failed to add NVM devices\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2499) device_del(&sw->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2500) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2501) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2502)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2503) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2504) * Thunderbolt routers do not generate wakeups themselves but
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2505) * they forward wakeups from tunneled protocols, so enable it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2506) * here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2507) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2508) device_init_wakeup(&sw->dev, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2509)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2510) pm_runtime_set_active(&sw->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2511) if (sw->rpm) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2512) pm_runtime_set_autosuspend_delay(&sw->dev, TB_AUTOSUSPEND_DELAY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2513) pm_runtime_use_autosuspend(&sw->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2514) pm_runtime_mark_last_busy(&sw->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2515) pm_runtime_enable(&sw->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2516) pm_request_autosuspend(&sw->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2517) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2518)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2519) tb_switch_debugfs_init(sw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2520) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2521) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2522)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2523) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2524) * tb_switch_remove() - Remove and release a switch
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2525) * @sw: Switch to remove
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2526) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2527) * This will remove the switch from the domain and release it after last
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2528) * reference count drops to zero. If there are switches connected below
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2529) * this switch, they will be removed as well.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2530) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2531) void tb_switch_remove(struct tb_switch *sw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2532) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2533) struct tb_port *port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2534)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2535) tb_switch_debugfs_remove(sw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2536)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2537) if (sw->rpm) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2538) pm_runtime_get_sync(&sw->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2539) pm_runtime_disable(&sw->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2540) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2541)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2542) /* port 0 is the switch itself and never has a remote */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2543) tb_switch_for_each_port(sw, port) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2544) if (tb_port_has_remote(port)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2545) tb_switch_remove(port->remote->sw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2546) port->remote = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2547) } else if (port->xdomain) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2548) tb_xdomain_remove(port->xdomain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2549) port->xdomain = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2550) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2551)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2552) /* Remove any downstream retimers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2553) tb_retimer_remove_all(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2554) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2555)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2556) if (!sw->is_unplugged)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2557) tb_plug_events_active(sw, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2558)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2559) tb_switch_nvm_remove(sw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2560)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2561) if (tb_route(sw))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2562) dev_info(&sw->dev, "device disconnected\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2563) device_unregister(&sw->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2564) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2565)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2566) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2567) * tb_sw_set_unplugged() - set is_unplugged on switch and downstream switches
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2568) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2569) void tb_sw_set_unplugged(struct tb_switch *sw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2570) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2571) struct tb_port *port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2572)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2573) if (sw == sw->tb->root_switch) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2574) tb_sw_WARN(sw, "cannot unplug root switch\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2575) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2576) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2577) if (sw->is_unplugged) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2578) tb_sw_WARN(sw, "is_unplugged already set\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2579) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2580) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2581) sw->is_unplugged = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2582) tb_switch_for_each_port(sw, port) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2583) if (tb_port_has_remote(port))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2584) tb_sw_set_unplugged(port->remote->sw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2585) else if (port->xdomain)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2586) port->xdomain->is_unplugged = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2587) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2588) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2589)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2590) static int tb_switch_set_wake(struct tb_switch *sw, unsigned int flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2591) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2592) if (flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2593) tb_sw_dbg(sw, "enabling wakeup: %#x\n", flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2594) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2595) tb_sw_dbg(sw, "disabling wakeup\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2596)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2597) if (tb_switch_is_usb4(sw))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2598) return usb4_switch_set_wake(sw, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2599) return tb_lc_set_wake(sw, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2600) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2601)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2602) int tb_switch_resume(struct tb_switch *sw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2603) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2604) struct tb_port *port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2605) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2606)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2607) tb_sw_dbg(sw, "resuming switch\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2608)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2609) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2610) * Check for UID of the connected switches except for root
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2611) * switch which we assume cannot be removed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2612) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2613) if (tb_route(sw)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2614) u64 uid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2615)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2616) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2617) * Check first that we can still read the switch config
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2618) * space. It may be that there is now another domain
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2619) * connected.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2620) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2621) err = tb_cfg_get_upstream_port(sw->tb->ctl, tb_route(sw));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2622) if (err < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2623) tb_sw_info(sw, "switch not present anymore\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2624) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2625) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2626)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2627) if (tb_switch_is_usb4(sw))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2628) err = usb4_switch_read_uid(sw, &uid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2629) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2630) err = tb_drom_read_uid_only(sw, &uid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2631) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2632) tb_sw_warn(sw, "uid read failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2633) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2634) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2635) if (sw->uid != uid) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2636) tb_sw_info(sw,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2637) "changed while suspended (uid %#llx -> %#llx)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2638) sw->uid, uid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2639) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2640) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2641) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2642)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2643) err = tb_switch_configure(sw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2644) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2645) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2646)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2647) /* Disable wakes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2648) tb_switch_set_wake(sw, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2649)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2650) err = tb_switch_tmu_init(sw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2651) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2652) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2653)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2654) /* check for surviving downstream switches */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2655) tb_switch_for_each_port(sw, port) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2656) if (!tb_port_has_remote(port) && !port->xdomain)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2657) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2658)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2659) if (tb_wait_for_port(port, true) <= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2660) tb_port_warn(port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2661) "lost during suspend, disconnecting\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2662) if (tb_port_has_remote(port))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2663) tb_sw_set_unplugged(port->remote->sw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2664) else if (port->xdomain)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2665) port->xdomain->is_unplugged = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2666) } else if (tb_port_has_remote(port) || port->xdomain) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2667) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2668) * Always unlock the port so the downstream
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2669) * switch/domain is accessible.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2670) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2671) if (tb_port_unlock(port))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2672) tb_port_warn(port, "failed to unlock port\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2673) if (port->remote && tb_switch_resume(port->remote->sw)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2674) tb_port_warn(port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2675) "lost during suspend, disconnecting\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2676) tb_sw_set_unplugged(port->remote->sw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2677) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2678) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2679) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2680) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2681) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2682)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2683) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2684) * tb_switch_suspend() - Put a switch to sleep
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2685) * @sw: Switch to suspend
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2686) * @runtime: Is this runtime suspend or system sleep
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2687) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2688) * Suspends router and all its children. Enables wakes according to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2689) * value of @runtime and then sets sleep bit for the router. If @sw is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2690) * host router the domain is ready to go to sleep once this function
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2691) * returns.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2692) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2693) void tb_switch_suspend(struct tb_switch *sw, bool runtime)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2694) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2695) unsigned int flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2696) struct tb_port *port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2697) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2698)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2699) tb_sw_dbg(sw, "suspending switch\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2700)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2701) err = tb_plug_events_active(sw, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2702) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2703) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2704)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2705) tb_switch_for_each_port(sw, port) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2706) if (tb_port_has_remote(port))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2707) tb_switch_suspend(port->remote->sw, runtime);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2708) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2709)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2710) if (runtime) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2711) /* Trigger wake when something is plugged in/out */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2712) flags |= TB_WAKE_ON_CONNECT | TB_WAKE_ON_DISCONNECT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2713) flags |= TB_WAKE_ON_USB4 | TB_WAKE_ON_USB3 | TB_WAKE_ON_PCIE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2714) } else if (device_may_wakeup(&sw->dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2715) flags |= TB_WAKE_ON_USB4 | TB_WAKE_ON_USB3 | TB_WAKE_ON_PCIE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2716) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2717)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2718) tb_switch_set_wake(sw, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2719)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2720) if (tb_switch_is_usb4(sw))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2721) usb4_switch_set_sleep(sw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2722) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2723) tb_lc_set_sleep(sw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2724) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2725)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2726) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2727) * tb_switch_query_dp_resource() - Query availability of DP resource
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2728) * @sw: Switch whose DP resource is queried
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2729) * @in: DP IN port
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2730) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2731) * Queries availability of DP resource for DP tunneling using switch
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2732) * specific means. Returns %true if resource is available.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2733) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2734) bool tb_switch_query_dp_resource(struct tb_switch *sw, struct tb_port *in)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2735) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2736) if (tb_switch_is_usb4(sw))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2737) return usb4_switch_query_dp_resource(sw, in);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2738) return tb_lc_dp_sink_query(sw, in);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2739) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2740)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2741) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2742) * tb_switch_alloc_dp_resource() - Allocate available DP resource
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2743) * @sw: Switch whose DP resource is allocated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2744) * @in: DP IN port
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2745) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2746) * Allocates DP resource for DP tunneling. The resource must be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2747) * available for this to succeed (see tb_switch_query_dp_resource()).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2748) * Returns %0 in success and negative errno otherwise.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2749) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2750) int tb_switch_alloc_dp_resource(struct tb_switch *sw, struct tb_port *in)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2751) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2752) if (tb_switch_is_usb4(sw))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2753) return usb4_switch_alloc_dp_resource(sw, in);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2754) return tb_lc_dp_sink_alloc(sw, in);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2755) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2756)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2757) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2758) * tb_switch_dealloc_dp_resource() - De-allocate DP resource
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2759) * @sw: Switch whose DP resource is de-allocated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2760) * @in: DP IN port
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2761) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2762) * De-allocates DP resource that was previously allocated for DP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2763) * tunneling.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2764) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2765) void tb_switch_dealloc_dp_resource(struct tb_switch *sw, struct tb_port *in)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2766) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2767) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2768)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2769) if (tb_switch_is_usb4(sw))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2770) ret = usb4_switch_dealloc_dp_resource(sw, in);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2771) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2772) ret = tb_lc_dp_sink_dealloc(sw, in);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2773)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2774) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2775) tb_sw_warn(sw, "failed to de-allocate DP resource for port %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2776) in->port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2777) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2778)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2779) struct tb_sw_lookup {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2780) struct tb *tb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2781) u8 link;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2782) u8 depth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2783) const uuid_t *uuid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2784) u64 route;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2785) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2786)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2787) static int tb_switch_match(struct device *dev, const void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2788) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2789) struct tb_switch *sw = tb_to_switch(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2790) const struct tb_sw_lookup *lookup = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2791)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2792) if (!sw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2793) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2794) if (sw->tb != lookup->tb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2795) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2796)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2797) if (lookup->uuid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2798) return !memcmp(sw->uuid, lookup->uuid, sizeof(*lookup->uuid));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2799)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2800) if (lookup->route) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2801) return sw->config.route_lo == lower_32_bits(lookup->route) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2802) sw->config.route_hi == upper_32_bits(lookup->route);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2803) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2804)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2805) /* Root switch is matched only by depth */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2806) if (!lookup->depth)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2807) return !sw->depth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2808)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2809) return sw->link == lookup->link && sw->depth == lookup->depth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2810) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2811)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2812) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2813) * tb_switch_find_by_link_depth() - Find switch by link and depth
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2814) * @tb: Domain the switch belongs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2815) * @link: Link number the switch is connected
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2816) * @depth: Depth of the switch in link
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2817) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2818) * Returned switch has reference count increased so the caller needs to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2819) * call tb_switch_put() when done with the switch.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2820) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2821) struct tb_switch *tb_switch_find_by_link_depth(struct tb *tb, u8 link, u8 depth)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2822) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2823) struct tb_sw_lookup lookup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2824) struct device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2825)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2826) memset(&lookup, 0, sizeof(lookup));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2827) lookup.tb = tb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2828) lookup.link = link;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2829) lookup.depth = depth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2830)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2831) dev = bus_find_device(&tb_bus_type, NULL, &lookup, tb_switch_match);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2832) if (dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2833) return tb_to_switch(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2834)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2835) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2836) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2837)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2838) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2839) * tb_switch_find_by_uuid() - Find switch by UUID
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2840) * @tb: Domain the switch belongs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2841) * @uuid: UUID to look for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2842) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2843) * Returned switch has reference count increased so the caller needs to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2844) * call tb_switch_put() when done with the switch.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2845) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2846) struct tb_switch *tb_switch_find_by_uuid(struct tb *tb, const uuid_t *uuid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2847) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2848) struct tb_sw_lookup lookup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2849) struct device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2850)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2851) memset(&lookup, 0, sizeof(lookup));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2852) lookup.tb = tb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2853) lookup.uuid = uuid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2854)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2855) dev = bus_find_device(&tb_bus_type, NULL, &lookup, tb_switch_match);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2856) if (dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2857) return tb_to_switch(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2858)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2859) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2860) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2861)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2862) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2863) * tb_switch_find_by_route() - Find switch by route string
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2864) * @tb: Domain the switch belongs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2865) * @route: Route string to look for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2866) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2867) * Returned switch has reference count increased so the caller needs to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2868) * call tb_switch_put() when done with the switch.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2869) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2870) struct tb_switch *tb_switch_find_by_route(struct tb *tb, u64 route)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2871) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2872) struct tb_sw_lookup lookup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2873) struct device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2874)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2875) if (!route)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2876) return tb_switch_get(tb->root_switch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2877)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2878) memset(&lookup, 0, sizeof(lookup));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2879) lookup.tb = tb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2880) lookup.route = route;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2881)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2882) dev = bus_find_device(&tb_bus_type, NULL, &lookup, tb_switch_match);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2883) if (dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2884) return tb_to_switch(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2885)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2886) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2887) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2888)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2889) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2890) * tb_switch_find_port() - return the first port of @type on @sw or NULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2891) * @sw: Switch to find the port from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2892) * @type: Port type to look for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2893) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2894) struct tb_port *tb_switch_find_port(struct tb_switch *sw,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2895) enum tb_port_type type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2896) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2897) struct tb_port *port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2898)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2899) tb_switch_for_each_port(sw, port) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2900) if (port->config.type == type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2901) return port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2902) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2903)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2904) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2905) }