^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Thunderbolt/USB4 retimer support.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (C) 2020, Intel Corporation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Authors: Kranthi Kuntala <kranthi.kuntala@intel.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * Mika Westerberg <mika.westerberg@linux.intel.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/pm_runtime.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/sched/signal.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include "sb_regs.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include "tb.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #define TB_MAX_RETIMER_INDEX 6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) static int tb_retimer_nvm_read(void *priv, unsigned int offset, void *val,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) size_t bytes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) struct tb_nvm *nvm = priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) struct tb_retimer *rt = tb_to_retimer(nvm->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) pm_runtime_get_sync(&rt->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) if (!mutex_trylock(&rt->tb->lock)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) ret = restart_syscall();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) ret = usb4_port_retimer_nvm_read(rt->port, rt->index, offset, val, bytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) mutex_unlock(&rt->tb->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) pm_runtime_mark_last_busy(&rt->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) pm_runtime_put_autosuspend(&rt->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) static int tb_retimer_nvm_write(void *priv, unsigned int offset, void *val,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) size_t bytes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) struct tb_nvm *nvm = priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) struct tb_retimer *rt = tb_to_retimer(nvm->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) if (!mutex_trylock(&rt->tb->lock))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) return restart_syscall();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) ret = tb_nvm_write_buf(nvm, offset, val, bytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) mutex_unlock(&rt->tb->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) static int tb_retimer_nvm_add(struct tb_retimer *rt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) struct tb_nvm *nvm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) u32 val, nvm_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) nvm = tb_nvm_alloc(&rt->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) if (IS_ERR(nvm))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) return PTR_ERR(nvm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) ret = usb4_port_retimer_nvm_read(rt->port, rt->index, NVM_VERSION, &val,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) sizeof(val));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) goto err_nvm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) nvm->major = val >> 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) nvm->minor = val >> 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) ret = usb4_port_retimer_nvm_read(rt->port, rt->index, NVM_FLASH_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) &val, sizeof(val));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) goto err_nvm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) nvm_size = (SZ_1M << (val & 7)) / 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) nvm_size = (nvm_size - SZ_16K) / 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) ret = tb_nvm_add_active(nvm, nvm_size, tb_retimer_nvm_read);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) goto err_nvm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) ret = tb_nvm_add_non_active(nvm, NVM_MAX_SIZE, tb_retimer_nvm_write);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) goto err_nvm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) rt->nvm = nvm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) err_nvm:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) tb_nvm_free(nvm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) static int tb_retimer_nvm_validate_and_write(struct tb_retimer *rt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) unsigned int image_size, hdr_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) const u8 *buf = rt->nvm->buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) u16 ds_size, device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) image_size = rt->nvm->buf_data_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) if (image_size < NVM_MIN_SIZE || image_size > NVM_MAX_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) * FARB pointer must point inside the image and must at least
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) * contain parts of the digital section we will be reading here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) hdr_size = (*(u32 *)buf) & 0xffffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) if (hdr_size + NVM_DEVID + 2 >= image_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) /* Digital section start should be aligned to 4k page */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) if (!IS_ALIGNED(hdr_size, SZ_4K))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) * Read digital section size and check that it also fits inside
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) * the image.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) ds_size = *(u16 *)(buf + hdr_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) if (ds_size >= image_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) * Make sure the device ID in the image matches the retimer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) * hardware.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) device = *(u16 *)(buf + hdr_size + NVM_DEVID);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) if (device != rt->device)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) /* Skip headers in the image */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) buf += hdr_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) image_size -= hdr_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) return usb4_port_retimer_nvm_write(rt->port, rt->index, 0, buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) image_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) static ssize_t device_show(struct device *dev, struct device_attribute *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) struct tb_retimer *rt = tb_to_retimer(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) return sprintf(buf, "%#x\n", rt->device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) static DEVICE_ATTR_RO(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) static ssize_t nvm_authenticate_show(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) struct device_attribute *attr, char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) struct tb_retimer *rt = tb_to_retimer(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) if (!mutex_trylock(&rt->tb->lock))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) return restart_syscall();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) if (!rt->nvm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) ret = -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) ret = sprintf(buf, "%#x\n", rt->auth_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) mutex_unlock(&rt->tb->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) static ssize_t nvm_authenticate_store(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) struct device_attribute *attr, const char *buf, size_t count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) struct tb_retimer *rt = tb_to_retimer(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) bool val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) pm_runtime_get_sync(&rt->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) if (!mutex_trylock(&rt->tb->lock)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) ret = restart_syscall();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) goto exit_rpm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) if (!rt->nvm) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) ret = -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) goto exit_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) ret = kstrtobool(buf, &val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) goto exit_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) /* Always clear status */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) rt->auth_status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) if (val) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) if (!rt->nvm->buf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) goto exit_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) ret = tb_retimer_nvm_validate_and_write(rt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) goto exit_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) ret = usb4_port_retimer_nvm_authenticate(rt->port, rt->index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) exit_unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) mutex_unlock(&rt->tb->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) exit_rpm:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) pm_runtime_mark_last_busy(&rt->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) pm_runtime_put_autosuspend(&rt->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) static DEVICE_ATTR_RW(nvm_authenticate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) static ssize_t nvm_version_show(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) struct device_attribute *attr, char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) struct tb_retimer *rt = tb_to_retimer(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) if (!mutex_trylock(&rt->tb->lock))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) return restart_syscall();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) if (!rt->nvm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) ret = -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) ret = sprintf(buf, "%x.%x\n", rt->nvm->major, rt->nvm->minor);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) mutex_unlock(&rt->tb->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) static DEVICE_ATTR_RO(nvm_version);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) static ssize_t vendor_show(struct device *dev, struct device_attribute *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) struct tb_retimer *rt = tb_to_retimer(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) return sprintf(buf, "%#x\n", rt->vendor);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) static DEVICE_ATTR_RO(vendor);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) static struct attribute *retimer_attrs[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) &dev_attr_device.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) &dev_attr_nvm_authenticate.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) &dev_attr_nvm_version.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) &dev_attr_vendor.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) NULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) static const struct attribute_group retimer_group = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) .attrs = retimer_attrs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) static const struct attribute_group *retimer_groups[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) &retimer_group,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) NULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) static void tb_retimer_release(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) struct tb_retimer *rt = tb_to_retimer(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) kfree(rt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) struct device_type tb_retimer_type = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) .name = "thunderbolt_retimer",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) .groups = retimer_groups,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) .release = tb_retimer_release,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) static int tb_retimer_add(struct tb_port *port, u8 index, u32 auth_status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) struct tb_retimer *rt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) u32 vendor, device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) if (!port->cap_usb4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) ret = usb4_port_retimer_read(port, index, USB4_SB_VENDOR_ID, &vendor,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) sizeof(vendor));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) if (ret != -ENODEV)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) tb_port_warn(port, "failed read retimer VendorId: %d\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) ret = usb4_port_retimer_read(port, index, USB4_SB_PRODUCT_ID, &device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) sizeof(device));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) if (ret != -ENODEV)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) tb_port_warn(port, "failed read retimer ProductId: %d\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) if (vendor != PCI_VENDOR_ID_INTEL && vendor != 0x8087) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) tb_port_info(port, "retimer NVM format of vendor %#x is not supported\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) vendor);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) * Check that it supports NVM operations. If not then don't add
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) * the device at all.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) ret = usb4_port_retimer_nvm_sector_size(port, index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) rt = kzalloc(sizeof(*rt), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) if (!rt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) rt->index = index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) rt->vendor = vendor;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) rt->device = device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) rt->auth_status = auth_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) rt->port = port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) rt->tb = port->sw->tb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) rt->dev.parent = &port->sw->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) rt->dev.bus = &tb_bus_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) rt->dev.type = &tb_retimer_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) dev_set_name(&rt->dev, "%s:%u.%u", dev_name(&port->sw->dev),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) port->port, index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) ret = device_register(&rt->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) dev_err(&rt->dev, "failed to register retimer: %d\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) put_device(&rt->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) ret = tb_retimer_nvm_add(rt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) dev_err(&rt->dev, "failed to add NVM devices: %d\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) device_unregister(&rt->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) dev_info(&rt->dev, "new retimer found, vendor=%#x device=%#x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) rt->vendor, rt->device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) pm_runtime_no_callbacks(&rt->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) pm_runtime_set_active(&rt->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) pm_runtime_enable(&rt->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) pm_runtime_set_autosuspend_delay(&rt->dev, TB_AUTOSUSPEND_DELAY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) pm_runtime_mark_last_busy(&rt->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) pm_runtime_use_autosuspend(&rt->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) static void tb_retimer_remove(struct tb_retimer *rt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) dev_info(&rt->dev, "retimer disconnected\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) tb_nvm_free(rt->nvm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) device_unregister(&rt->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) struct tb_retimer_lookup {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) const struct tb_port *port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) u8 index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) static int retimer_match(struct device *dev, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) const struct tb_retimer_lookup *lookup = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) struct tb_retimer *rt = tb_to_retimer(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) return rt && rt->port == lookup->port && rt->index == lookup->index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) static struct tb_retimer *tb_port_find_retimer(struct tb_port *port, u8 index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) struct tb_retimer_lookup lookup = { .port = port, .index = index };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) struct device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) dev = device_find_child(&port->sw->dev, &lookup, retimer_match);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) if (dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) return tb_to_retimer(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) * tb_retimer_scan() - Scan for on-board retimers under port
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) * @port: USB4 port to scan
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) * Tries to enumerate on-board retimers connected to @port. Found
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) * retimers are registered as children of @port. Does not scan for cable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) * retimers for now.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) int tb_retimer_scan(struct tb_port *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) u32 status[TB_MAX_RETIMER_INDEX + 1] = {};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) int ret, i, last_idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) if (!port->cap_usb4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) * Send broadcast RT to make sure retimer indices facing this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) * port are set.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) ret = usb4_port_enumerate_retimers(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) * Before doing anything else, read the authentication status.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) * If the retimer has it set, store it for the new retimer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) * device instance.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) for (i = 1; i <= TB_MAX_RETIMER_INDEX; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) usb4_port_retimer_nvm_authenticate_status(port, i, &status[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) for (i = 1; i <= TB_MAX_RETIMER_INDEX; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) * Last retimer is true only for the last on-board
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) * retimer (the one connected directly to the Type-C
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) * port).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) ret = usb4_port_retimer_is_last(port, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) if (ret > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) last_idx = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) else if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) if (!last_idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) /* Add on-board retimers if they do not exist already */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) for (i = 1; i <= last_idx; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) struct tb_retimer *rt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) rt = tb_port_find_retimer(port, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) if (rt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) put_device(&rt->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) ret = tb_retimer_add(port, i, status[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) if (ret && ret != -EOPNOTSUPP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) static int remove_retimer(struct device *dev, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) struct tb_retimer *rt = tb_to_retimer(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) struct tb_port *port = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) if (rt && rt->port == port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) tb_retimer_remove(rt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) * tb_retimer_remove_all() - Remove all retimers under port
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) * @port: USB4 port whose retimers to remove
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) * This removes all previously added retimers under @port.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) void tb_retimer_remove_all(struct tb_port *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) if (port->cap_usb4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) device_for_each_child_reverse(&port->sw->dev, port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) remove_retimer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) }