^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Thunderbolt link controller support
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (C) 2019, Intel Corporation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Author: Mika Westerberg <mika.westerberg@linux.intel.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include "tb.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) * tb_lc_read_uuid() - Read switch UUID from link controller common register
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) * @sw: Switch whose UUID is read
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) * @uuid: UUID is placed here
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) int tb_lc_read_uuid(struct tb_switch *sw, u32 *uuid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) if (!sw->cap_lc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) return tb_sw_read(sw, uuid, TB_CFG_SWITCH, sw->cap_lc + TB_LC_FUSE, 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) static int read_lc_desc(struct tb_switch *sw, u32 *desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) if (!sw->cap_lc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) return tb_sw_read(sw, desc, TB_CFG_SWITCH, sw->cap_lc + TB_LC_DESC, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) static int find_port_lc_cap(struct tb_port *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) struct tb_switch *sw = port->sw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) int start, phys, ret, size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) u32 desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) ret = read_lc_desc(sw, &desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) /* Start of port LC registers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) start = (desc & TB_LC_DESC_SIZE_MASK) >> TB_LC_DESC_SIZE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) size = (desc & TB_LC_DESC_PORT_SIZE_MASK) >> TB_LC_DESC_PORT_SIZE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) phys = tb_phy_port_from_link(port->port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) return sw->cap_lc + start + phys * size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) static int tb_lc_set_port_configured(struct tb_port *port, bool configured)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) bool upstream = tb_is_upstream_port(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) struct tb_switch *sw = port->sw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) u32 ctrl, lane;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) int cap, ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) if (sw->generation < 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) cap = find_port_lc_cap(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) if (cap < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) return cap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) ret = tb_sw_read(sw, &ctrl, TB_CFG_SWITCH, cap + TB_LC_SX_CTRL, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) /* Resolve correct lane */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) if (port->port % 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) lane = TB_LC_SX_CTRL_L1C;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) lane = TB_LC_SX_CTRL_L2C;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) if (configured) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) ctrl |= lane;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) if (upstream)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) ctrl |= TB_LC_SX_CTRL_UPSTREAM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) ctrl &= ~lane;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) if (upstream)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) ctrl &= ~TB_LC_SX_CTRL_UPSTREAM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) return tb_sw_write(sw, &ctrl, TB_CFG_SWITCH, cap + TB_LC_SX_CTRL, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) * tb_lc_configure_port() - Let LC know about configured port
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) * @port: Port that is set as configured
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) * Sets the port configured for power management purposes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) int tb_lc_configure_port(struct tb_port *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) return tb_lc_set_port_configured(port, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) * tb_lc_unconfigure_port() - Let LC know about unconfigured port
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) * @port: Port that is set as configured
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) * Sets the port unconfigured for power management purposes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) void tb_lc_unconfigure_port(struct tb_port *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) tb_lc_set_port_configured(port, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) static int tb_lc_set_xdomain_configured(struct tb_port *port, bool configure)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) struct tb_switch *sw = port->sw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) u32 ctrl, lane;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) int cap, ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) if (sw->generation < 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) cap = find_port_lc_cap(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) if (cap < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) return cap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) ret = tb_sw_read(sw, &ctrl, TB_CFG_SWITCH, cap + TB_LC_SX_CTRL, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) /* Resolve correct lane */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) if (port->port % 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) lane = TB_LC_SX_CTRL_L1D;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) lane = TB_LC_SX_CTRL_L2D;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) if (configure)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) ctrl |= lane;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) ctrl &= ~lane;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) return tb_sw_write(sw, &ctrl, TB_CFG_SWITCH, cap + TB_LC_SX_CTRL, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) * tb_lc_configure_xdomain() - Inform LC that the link is XDomain
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) * @port: Switch downstream port connected to another host
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) * Sets the lane configured for XDomain accordingly so that the LC knows
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) * about this. Returns %0 in success and negative errno in failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) int tb_lc_configure_xdomain(struct tb_port *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) return tb_lc_set_xdomain_configured(port, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) * tb_lc_unconfigure_xdomain() - Unconfigure XDomain from port
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) * @port: Switch downstream port that was connected to another host
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) * Unsets the lane XDomain configuration.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) void tb_lc_unconfigure_xdomain(struct tb_port *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) tb_lc_set_xdomain_configured(port, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) static int tb_lc_set_wake_one(struct tb_switch *sw, unsigned int offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) unsigned int flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) u32 ctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) * Enable wake on PCIe and USB4 (wake coming from another
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) * router).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) ret = tb_sw_read(sw, &ctrl, TB_CFG_SWITCH,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) offset + TB_LC_SX_CTRL, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) ctrl &= ~(TB_LC_SX_CTRL_WOC | TB_LC_SX_CTRL_WOD | TB_LC_SX_CTRL_WOP |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) TB_LC_SX_CTRL_WOU4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) if (flags & TB_WAKE_ON_CONNECT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) ctrl |= TB_LC_SX_CTRL_WOC | TB_LC_SX_CTRL_WOD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) if (flags & TB_WAKE_ON_USB4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) ctrl |= TB_LC_SX_CTRL_WOU4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) if (flags & TB_WAKE_ON_PCIE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) ctrl |= TB_LC_SX_CTRL_WOP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) return tb_sw_write(sw, &ctrl, TB_CFG_SWITCH, offset + TB_LC_SX_CTRL, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) * tb_lc_set_wake() - Enable/disable wake
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) * @sw: Switch whose wakes to configure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) * @flags: Wakeup flags (%0 to disable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) * For each LC sets wake bits accordingly.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) int tb_lc_set_wake(struct tb_switch *sw, unsigned int flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) int start, size, nlc, ret, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) u32 desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) if (sw->generation < 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) if (!tb_route(sw))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) ret = read_lc_desc(sw, &desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) /* Figure out number of link controllers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) nlc = desc & TB_LC_DESC_NLC_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) start = (desc & TB_LC_DESC_SIZE_MASK) >> TB_LC_DESC_SIZE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) size = (desc & TB_LC_DESC_PORT_SIZE_MASK) >> TB_LC_DESC_PORT_SIZE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) /* For each link controller set sleep bit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) for (i = 0; i < nlc; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) unsigned int offset = sw->cap_lc + start + i * size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) ret = tb_lc_set_wake_one(sw, offset, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) * tb_lc_set_sleep() - Inform LC that the switch is going to sleep
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) * @sw: Switch to set sleep
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) * Let the switch link controllers know that the switch is going to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) * sleep.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) int tb_lc_set_sleep(struct tb_switch *sw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) int start, size, nlc, ret, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) u32 desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) if (sw->generation < 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) ret = read_lc_desc(sw, &desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) /* Figure out number of link controllers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) nlc = desc & TB_LC_DESC_NLC_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) start = (desc & TB_LC_DESC_SIZE_MASK) >> TB_LC_DESC_SIZE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) size = (desc & TB_LC_DESC_PORT_SIZE_MASK) >> TB_LC_DESC_PORT_SIZE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) /* For each link controller set sleep bit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) for (i = 0; i < nlc; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) unsigned int offset = sw->cap_lc + start + i * size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) u32 ctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) ret = tb_sw_read(sw, &ctrl, TB_CFG_SWITCH,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) offset + TB_LC_SX_CTRL, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) ctrl |= TB_LC_SX_CTRL_SLP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) ret = tb_sw_write(sw, &ctrl, TB_CFG_SWITCH,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) offset + TB_LC_SX_CTRL, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) * tb_lc_lane_bonding_possible() - Is lane bonding possible towards switch
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) * @sw: Switch to check
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) * Checks whether conditions for lane bonding from parent to @sw are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) * possible.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) bool tb_lc_lane_bonding_possible(struct tb_switch *sw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) struct tb_port *up;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) int cap, ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) if (sw->generation < 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) up = tb_upstream_port(sw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) cap = find_port_lc_cap(up);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) if (cap < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, cap + TB_LC_PORT_ATTR, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) return !!(val & TB_LC_PORT_ATTR_BE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) static int tb_lc_dp_sink_from_port(const struct tb_switch *sw,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) struct tb_port *in)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) struct tb_port *port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) /* The first DP IN port is sink 0 and second is sink 1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) tb_switch_for_each_port(sw, port) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) if (tb_port_is_dpin(port))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) return in != port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) static int tb_lc_dp_sink_available(struct tb_switch *sw, int sink)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) u32 val, alloc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) ret = tb_sw_read(sw, &val, TB_CFG_SWITCH,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) sw->cap_lc + TB_LC_SNK_ALLOCATION, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) * Sink is available for CM/SW to use if the allocation valie is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) * either 0 or 1.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) if (!sink) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) alloc = val & TB_LC_SNK_ALLOCATION_SNK0_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) if (!alloc || alloc == TB_LC_SNK_ALLOCATION_SNK0_CM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) alloc = (val & TB_LC_SNK_ALLOCATION_SNK1_MASK) >>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) TB_LC_SNK_ALLOCATION_SNK1_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) if (!alloc || alloc == TB_LC_SNK_ALLOCATION_SNK1_CM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) * tb_lc_dp_sink_query() - Is DP sink available for DP IN port
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) * @sw: Switch whose DP sink is queried
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) * @in: DP IN port to check
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) * Queries through LC SNK_ALLOCATION registers whether DP sink is available
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) * for the given DP IN port or not.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) bool tb_lc_dp_sink_query(struct tb_switch *sw, struct tb_port *in)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) int sink;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) * For older generations sink is always available as there is no
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) * allocation mechanism.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) if (sw->generation < 3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) sink = tb_lc_dp_sink_from_port(sw, in);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) if (sink < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) return !tb_lc_dp_sink_available(sw, sink);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) * tb_lc_dp_sink_alloc() - Allocate DP sink
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) * @sw: Switch whose DP sink is allocated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) * @in: DP IN port the DP sink is allocated for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) * Allocate DP sink for @in via LC SNK_ALLOCATION registers. If the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) * resource is available and allocation is successful returns %0. In all
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) * other cases returs negative errno. In particular %-EBUSY is returned if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) * the resource was not available.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) int tb_lc_dp_sink_alloc(struct tb_switch *sw, struct tb_port *in)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) int ret, sink;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) if (sw->generation < 3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) sink = tb_lc_dp_sink_from_port(sw, in);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) if (sink < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) return sink;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) ret = tb_lc_dp_sink_available(sw, sink);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) ret = tb_sw_read(sw, &val, TB_CFG_SWITCH,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) sw->cap_lc + TB_LC_SNK_ALLOCATION, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) if (!sink) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) val &= ~TB_LC_SNK_ALLOCATION_SNK0_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) val |= TB_LC_SNK_ALLOCATION_SNK0_CM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) val &= ~TB_LC_SNK_ALLOCATION_SNK1_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) val |= TB_LC_SNK_ALLOCATION_SNK1_CM <<
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) TB_LC_SNK_ALLOCATION_SNK1_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) ret = tb_sw_write(sw, &val, TB_CFG_SWITCH,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) sw->cap_lc + TB_LC_SNK_ALLOCATION, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) tb_port_dbg(in, "sink %d allocated\n", sink);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) * tb_lc_dp_sink_dealloc() - De-allocate DP sink
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) * @sw: Switch whose DP sink is de-allocated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) * @in: DP IN port whose DP sink is de-allocated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) * De-allocate DP sink from @in using LC SNK_ALLOCATION registers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) int tb_lc_dp_sink_dealloc(struct tb_switch *sw, struct tb_port *in)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) int ret, sink;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) if (sw->generation < 3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) sink = tb_lc_dp_sink_from_port(sw, in);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) if (sink < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) return sink;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) /* Needs to be owned by CM/SW */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) ret = tb_lc_dp_sink_available(sw, sink);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) ret = tb_sw_read(sw, &val, TB_CFG_SWITCH,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) sw->cap_lc + TB_LC_SNK_ALLOCATION, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) if (!sink)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) val &= ~TB_LC_SNK_ALLOCATION_SNK0_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) val &= ~TB_LC_SNK_ALLOCATION_SNK1_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) ret = tb_sw_write(sw, &val, TB_CFG_SWITCH,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) sw->cap_lc + TB_LC_SNK_ALLOCATION, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) tb_port_dbg(in, "sink %d de-allocated\n", sink);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) * tb_lc_force_power() - Forces LC to be powered on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) * @sw: Thunderbolt switch
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) * This is useful to let authentication cycle pass even without
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) * a Thunderbolt link present.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) int tb_lc_force_power(struct tb_switch *sw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) u32 in = 0xffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) return tb_sw_write(sw, &in, TB_CFG_SWITCH, TB_LC_POWER, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) }