^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Thunderbolt driver - bus logic (NHI independent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Copyright (C) 2019, Intel Corporation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/errno.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/pm_runtime.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include "tb.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include "tb_regs.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include "tunnel.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) * struct tb_cm - Simple Thunderbolt connection manager
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) * @tunnel_list: List of active tunnels
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) * @dp_resources: List of available DP resources for DP tunneling
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) * @hotplug_active: tb_handle_hotplug will stop progressing plug
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) * events and exit if this is not set (it needs to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) * acquire the lock one more time). Used to drain wq
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) * after cfg has been paused.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) * @remove_work: Work used to remove any unplugged routers after
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) * runtime resume
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) struct tb_cm {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) struct list_head tunnel_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) struct list_head dp_resources;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) bool hotplug_active;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) struct delayed_work remove_work;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) static inline struct tb *tcm_to_tb(struct tb_cm *tcm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) return ((void *)tcm - sizeof(struct tb));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) struct tb_hotplug_event {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) struct work_struct work;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) struct tb *tb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) u64 route;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) u8 port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) bool unplug;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) static void tb_handle_hotplug(struct work_struct *work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) static void tb_queue_hotplug(struct tb *tb, u64 route, u8 port, bool unplug)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) struct tb_hotplug_event *ev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) ev = kmalloc(sizeof(*ev), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) if (!ev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) ev->tb = tb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) ev->route = route;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) ev->port = port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) ev->unplug = unplug;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) INIT_WORK(&ev->work, tb_handle_hotplug);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) queue_work(tb->wq, &ev->work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) /* enumeration & hot plug handling */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) static void tb_add_dp_resources(struct tb_switch *sw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) struct tb_cm *tcm = tb_priv(sw->tb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) struct tb_port *port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) tb_switch_for_each_port(sw, port) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) if (!tb_port_is_dpin(port))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) if (!tb_switch_query_dp_resource(sw, port))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) list_add_tail(&port->list, &tcm->dp_resources);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) tb_port_dbg(port, "DP IN resource available\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) static void tb_remove_dp_resources(struct tb_switch *sw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) struct tb_cm *tcm = tb_priv(sw->tb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) struct tb_port *port, *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) /* Clear children resources first */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) tb_switch_for_each_port(sw, port) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) if (tb_port_has_remote(port))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) tb_remove_dp_resources(port->remote->sw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) list_for_each_entry_safe(port, tmp, &tcm->dp_resources, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) if (port->sw == sw) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) tb_port_dbg(port, "DP OUT resource unavailable\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) list_del_init(&port->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) static void tb_discover_tunnels(struct tb_switch *sw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) struct tb *tb = sw->tb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) struct tb_cm *tcm = tb_priv(tb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) struct tb_port *port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) tb_switch_for_each_port(sw, port) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) struct tb_tunnel *tunnel = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) switch (port->config.type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) case TB_TYPE_DP_HDMI_IN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) tunnel = tb_tunnel_discover_dp(tb, port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) case TB_TYPE_PCIE_DOWN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) tunnel = tb_tunnel_discover_pci(tb, port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) case TB_TYPE_USB3_DOWN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) tunnel = tb_tunnel_discover_usb3(tb, port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) if (!tunnel)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) if (tb_tunnel_is_pci(tunnel)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) struct tb_switch *parent = tunnel->dst_port->sw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) while (parent != tunnel->src_port->sw) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) parent->boot = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) parent = tb_switch_parent(parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) } else if (tb_tunnel_is_dp(tunnel)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) /* Keep the domain from powering down */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) pm_runtime_get_sync(&tunnel->src_port->sw->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) pm_runtime_get_sync(&tunnel->dst_port->sw->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) list_add_tail(&tunnel->list, &tcm->tunnel_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) tb_switch_for_each_port(sw, port) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) if (tb_port_has_remote(port))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) tb_discover_tunnels(port->remote->sw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) static int tb_port_configure_xdomain(struct tb_port *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) * XDomain paths currently only support single lane so we must
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) * disable the other lane according to USB4 spec.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) tb_port_disable(port->dual_link_port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) if (tb_switch_is_usb4(port->sw))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) return usb4_port_configure_xdomain(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) return tb_lc_configure_xdomain(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) static void tb_port_unconfigure_xdomain(struct tb_port *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) if (tb_switch_is_usb4(port->sw))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) usb4_port_unconfigure_xdomain(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) tb_lc_unconfigure_xdomain(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) tb_port_enable(port->dual_link_port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) static void tb_scan_xdomain(struct tb_port *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) struct tb_switch *sw = port->sw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) struct tb *tb = sw->tb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) struct tb_xdomain *xd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) u64 route;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) route = tb_downstream_route(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) xd = tb_xdomain_find_by_route(tb, route);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) if (xd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) tb_xdomain_put(xd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) xd = tb_xdomain_alloc(tb, &sw->dev, route, tb->root_switch->uuid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) if (xd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) tb_port_at(route, sw)->xdomain = xd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) tb_port_configure_xdomain(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) tb_xdomain_add(xd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) static int tb_enable_tmu(struct tb_switch *sw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) /* If it is already enabled in correct mode, don't touch it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) if (tb_switch_tmu_is_enabled(sw))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) ret = tb_switch_tmu_disable(sw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) ret = tb_switch_tmu_post_time(sw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) return tb_switch_tmu_enable(sw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) * tb_find_unused_port() - return the first inactive port on @sw
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) * @sw: Switch to find the port on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) * @type: Port type to look for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) static struct tb_port *tb_find_unused_port(struct tb_switch *sw,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) enum tb_port_type type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) struct tb_port *port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) tb_switch_for_each_port(sw, port) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) if (tb_is_upstream_port(port))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) if (port->config.type != type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) if (!port->cap_adap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) if (tb_port_is_enabled(port))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) return port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) static struct tb_port *tb_find_usb3_down(struct tb_switch *sw,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) const struct tb_port *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) struct tb_port *down;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) down = usb4_switch_map_usb3_down(sw, port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) if (down && !tb_usb3_port_is_enabled(down))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) return down;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) static struct tb_tunnel *tb_find_tunnel(struct tb *tb, enum tb_tunnel_type type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) struct tb_port *src_port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) struct tb_port *dst_port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) struct tb_cm *tcm = tb_priv(tb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) struct tb_tunnel *tunnel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) if (tunnel->type == type &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) ((src_port && src_port == tunnel->src_port) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) (dst_port && dst_port == tunnel->dst_port))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) return tunnel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) static struct tb_tunnel *tb_find_first_usb3_tunnel(struct tb *tb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) struct tb_port *src_port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) struct tb_port *dst_port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) struct tb_port *port, *usb3_down;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) struct tb_switch *sw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) /* Pick the router that is deepest in the topology */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) if (dst_port->sw->config.depth > src_port->sw->config.depth)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) sw = dst_port->sw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) sw = src_port->sw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) /* Can't be the host router */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) if (sw == tb->root_switch)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) /* Find the downstream USB4 port that leads to this router */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) port = tb_port_at(tb_route(sw), tb->root_switch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) /* Find the corresponding host router USB3 downstream port */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) usb3_down = usb4_switch_map_usb3_down(tb->root_switch, port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) if (!usb3_down)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) return tb_find_tunnel(tb, TB_TUNNEL_USB3, usb3_down, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) static int tb_available_bandwidth(struct tb *tb, struct tb_port *src_port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) struct tb_port *dst_port, int *available_up, int *available_down)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) int usb3_consumed_up, usb3_consumed_down, ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) struct tb_cm *tcm = tb_priv(tb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) struct tb_tunnel *tunnel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) struct tb_port *port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) tb_port_dbg(dst_port, "calculating available bandwidth\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) tunnel = tb_find_first_usb3_tunnel(tb, src_port, dst_port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) if (tunnel) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) ret = tb_tunnel_consumed_bandwidth(tunnel, &usb3_consumed_up,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) &usb3_consumed_down);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) usb3_consumed_up = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) usb3_consumed_down = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) *available_up = *available_down = 40000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) /* Find the minimum available bandwidth over all links */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) tb_for_each_port_on_path(src_port, dst_port, port) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) int link_speed, link_width, up_bw, down_bw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) if (!tb_port_is_null(port))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) if (tb_is_upstream_port(port)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) link_speed = port->sw->link_speed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) link_speed = tb_port_get_link_speed(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) if (link_speed < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) return link_speed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) link_width = port->bonded ? 2 : 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) up_bw = link_speed * link_width * 1000; /* Mb/s */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) /* Leave 10% guard band */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) up_bw -= up_bw / 10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) down_bw = up_bw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) tb_port_dbg(port, "link total bandwidth %d Mb/s\n", up_bw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) * Find all DP tunnels that cross the port and reduce
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) * their consumed bandwidth from the available.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) int dp_consumed_up, dp_consumed_down;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) if (!tb_tunnel_is_dp(tunnel))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) if (!tb_tunnel_port_on_path(tunnel, port))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) ret = tb_tunnel_consumed_bandwidth(tunnel,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) &dp_consumed_up,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) &dp_consumed_down);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) up_bw -= dp_consumed_up;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) down_bw -= dp_consumed_down;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) * If USB3 is tunneled from the host router down to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) * branch leading to port we need to take USB3 consumed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) * bandwidth into account regardless whether it actually
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) * crosses the port.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) up_bw -= usb3_consumed_up;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) down_bw -= usb3_consumed_down;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) if (up_bw < *available_up)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) *available_up = up_bw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) if (down_bw < *available_down)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) *available_down = down_bw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) if (*available_up < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) *available_up = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) if (*available_down < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) *available_down = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) static int tb_release_unused_usb3_bandwidth(struct tb *tb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) struct tb_port *src_port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) struct tb_port *dst_port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) struct tb_tunnel *tunnel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) tunnel = tb_find_first_usb3_tunnel(tb, src_port, dst_port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) return tunnel ? tb_tunnel_release_unused_bandwidth(tunnel) : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) static void tb_reclaim_usb3_bandwidth(struct tb *tb, struct tb_port *src_port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) struct tb_port *dst_port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) int ret, available_up, available_down;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) struct tb_tunnel *tunnel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) tunnel = tb_find_first_usb3_tunnel(tb, src_port, dst_port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) if (!tunnel)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) tb_dbg(tb, "reclaiming unused bandwidth for USB3\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) * Calculate available bandwidth for the first hop USB3 tunnel.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) * That determines the whole USB3 bandwidth for this branch.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) ret = tb_available_bandwidth(tb, tunnel->src_port, tunnel->dst_port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) &available_up, &available_down);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) tb_warn(tb, "failed to calculate available bandwidth\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) tb_dbg(tb, "available bandwidth for USB3 %d/%d Mb/s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) available_up, available_down);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) tb_tunnel_reclaim_available_bandwidth(tunnel, &available_up, &available_down);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) static int tb_tunnel_usb3(struct tb *tb, struct tb_switch *sw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) struct tb_switch *parent = tb_switch_parent(sw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) int ret, available_up, available_down;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) struct tb_port *up, *down, *port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) struct tb_cm *tcm = tb_priv(tb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) struct tb_tunnel *tunnel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) up = tb_switch_find_port(sw, TB_TYPE_USB3_UP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) if (!up)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) if (!sw->link_usb4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) * Look up available down port. Since we are chaining it should
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) * be found right above this switch.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) port = tb_port_at(tb_route(sw), parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) down = tb_find_usb3_down(parent, port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) if (!down)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) if (tb_route(parent)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) struct tb_port *parent_up;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) * Check first that the parent switch has its upstream USB3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) * port enabled. Otherwise the chain is not complete and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) * there is no point setting up a new tunnel.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) parent_up = tb_switch_find_port(parent, TB_TYPE_USB3_UP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) if (!parent_up || !tb_port_is_enabled(parent_up))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) /* Make all unused bandwidth available for the new tunnel */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) ret = tb_release_unused_usb3_bandwidth(tb, down, up);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) ret = tb_available_bandwidth(tb, down, up, &available_up,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) &available_down);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) goto err_reclaim;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) tb_port_dbg(up, "available bandwidth for new USB3 tunnel %d/%d Mb/s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) available_up, available_down);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) tunnel = tb_tunnel_alloc_usb3(tb, up, down, available_up,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) available_down);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) if (!tunnel) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) goto err_reclaim;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) if (tb_tunnel_activate(tunnel)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) tb_port_info(up,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) "USB3 tunnel activation failed, aborting\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) ret = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) goto err_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) list_add_tail(&tunnel->list, &tcm->tunnel_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) if (tb_route(parent))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) tb_reclaim_usb3_bandwidth(tb, down, up);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) err_free:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) tb_tunnel_free(tunnel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) err_reclaim:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) if (tb_route(parent))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) tb_reclaim_usb3_bandwidth(tb, down, up);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) static int tb_create_usb3_tunnels(struct tb_switch *sw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) struct tb_port *port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) if (tb_route(sw)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) ret = tb_tunnel_usb3(sw->tb, sw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) tb_switch_for_each_port(sw, port) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) if (!tb_port_has_remote(port))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) ret = tb_create_usb3_tunnels(port->remote->sw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) static void tb_scan_port(struct tb_port *port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) * tb_scan_switch() - scan for and initialize downstream switches
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) static void tb_scan_switch(struct tb_switch *sw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) struct tb_port *port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) pm_runtime_get_sync(&sw->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) tb_switch_for_each_port(sw, port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) tb_scan_port(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) pm_runtime_mark_last_busy(&sw->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) pm_runtime_put_autosuspend(&sw->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) * tb_scan_port() - check for and initialize switches below port
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) static void tb_scan_port(struct tb_port *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) struct tb_cm *tcm = tb_priv(port->sw->tb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) struct tb_port *upstream_port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) struct tb_switch *sw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) if (tb_is_upstream_port(port))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) if (tb_port_is_dpout(port) && tb_dp_port_hpd_is_active(port) == 1 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) !tb_dp_port_is_enabled(port)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) tb_port_dbg(port, "DP adapter HPD set, queuing hotplug\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) tb_queue_hotplug(port->sw->tb, tb_route(port->sw), port->port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) if (port->config.type != TB_TYPE_PORT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) if (port->dual_link_port && port->link_nr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) return; /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) * Downstream switch is reachable through two ports.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) * Only scan on the primary port (link_nr == 0).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) if (tb_wait_for_port(port, false) <= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) if (port->remote) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) tb_port_dbg(port, "port already has a remote\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) tb_retimer_scan(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) sw = tb_switch_alloc(port->sw->tb, &port->sw->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) tb_downstream_route(port));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) if (IS_ERR(sw)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) * If there is an error accessing the connected switch
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) * it may be connected to another domain. Also we allow
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) * the other domain to be connected to a max depth switch.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) if (PTR_ERR(sw) == -EIO || PTR_ERR(sw) == -EADDRNOTAVAIL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) tb_scan_xdomain(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) if (tb_switch_configure(sw)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) tb_switch_put(sw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) * If there was previously another domain connected remove it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) * first.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) if (port->xdomain) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) tb_xdomain_remove(port->xdomain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) tb_port_unconfigure_xdomain(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) port->xdomain = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) * Do not send uevents until we have discovered all existing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) * tunnels and know which switches were authorized already by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) * the boot firmware.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) if (!tcm->hotplug_active)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) dev_set_uevent_suppress(&sw->dev, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) * At the moment Thunderbolt 2 and beyond (devices with LC) we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) * can support runtime PM.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) sw->rpm = sw->generation > 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) if (tb_switch_add(sw)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) tb_switch_put(sw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) /* Link the switches using both links if available */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) upstream_port = tb_upstream_port(sw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) port->remote = upstream_port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) upstream_port->remote = port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) if (port->dual_link_port && upstream_port->dual_link_port) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) port->dual_link_port->remote = upstream_port->dual_link_port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) upstream_port->dual_link_port->remote = port->dual_link_port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) /* Enable lane bonding if supported */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) tb_switch_lane_bonding_enable(sw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) /* Set the link configured */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) tb_switch_configure_link(sw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) if (tb_enable_tmu(sw))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) tb_sw_warn(sw, "failed to enable TMU\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) /* Scan upstream retimers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) tb_retimer_scan(upstream_port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) * Create USB 3.x tunnels only when the switch is plugged to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) * domain. This is because we scan the domain also during discovery
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) * and want to discover existing USB 3.x tunnels before we create
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) * any new.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) if (tcm->hotplug_active && tb_tunnel_usb3(sw->tb, sw))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) tb_sw_warn(sw, "USB3 tunnel creation failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) tb_add_dp_resources(sw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) tb_scan_switch(sw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) static void tb_deactivate_and_free_tunnel(struct tb_tunnel *tunnel)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) struct tb_port *src_port, *dst_port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) struct tb *tb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) if (!tunnel)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) tb_tunnel_deactivate(tunnel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) list_del(&tunnel->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) tb = tunnel->tb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) src_port = tunnel->src_port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) dst_port = tunnel->dst_port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) switch (tunnel->type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) case TB_TUNNEL_DP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) * In case of DP tunnel make sure the DP IN resource is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) * deallocated properly.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) tb_switch_dealloc_dp_resource(src_port->sw, src_port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) /* Now we can allow the domain to runtime suspend again */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) pm_runtime_mark_last_busy(&dst_port->sw->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) pm_runtime_put_autosuspend(&dst_port->sw->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) pm_runtime_mark_last_busy(&src_port->sw->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) pm_runtime_put_autosuspend(&src_port->sw->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) case TB_TUNNEL_USB3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) tb_reclaim_usb3_bandwidth(tb, src_port, dst_port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) * PCIe and DMA tunnels do not consume guaranteed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) * bandwidth.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) tb_tunnel_free(tunnel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) * tb_free_invalid_tunnels() - destroy tunnels of devices that have gone away
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) static void tb_free_invalid_tunnels(struct tb *tb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) struct tb_cm *tcm = tb_priv(tb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) struct tb_tunnel *tunnel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) struct tb_tunnel *n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) if (tb_tunnel_is_invalid(tunnel))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) tb_deactivate_and_free_tunnel(tunnel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) * tb_free_unplugged_children() - traverse hierarchy and free unplugged switches
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) static void tb_free_unplugged_children(struct tb_switch *sw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) struct tb_port *port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) tb_switch_for_each_port(sw, port) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) if (!tb_port_has_remote(port))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) if (port->remote->sw->is_unplugged) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) tb_retimer_remove_all(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) tb_remove_dp_resources(port->remote->sw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) tb_switch_unconfigure_link(port->remote->sw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) tb_switch_lane_bonding_disable(port->remote->sw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) tb_switch_remove(port->remote->sw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) port->remote = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) if (port->dual_link_port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) port->dual_link_port->remote = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) tb_free_unplugged_children(port->remote->sw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) static struct tb_port *tb_find_pcie_down(struct tb_switch *sw,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) const struct tb_port *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) struct tb_port *down = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) * To keep plugging devices consistently in the same PCIe
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) * hierarchy, do mapping here for switch downstream PCIe ports.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) if (tb_switch_is_usb4(sw)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) down = usb4_switch_map_pcie_down(sw, port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) } else if (!tb_route(sw)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) int phy_port = tb_phy_port_from_link(port->port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) int index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) * Hard-coded Thunderbolt port to PCIe down port mapping
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) * per controller.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) if (tb_switch_is_cactus_ridge(sw) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) tb_switch_is_alpine_ridge(sw))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) index = !phy_port ? 6 : 7;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) else if (tb_switch_is_falcon_ridge(sw))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) index = !phy_port ? 6 : 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) else if (tb_switch_is_titan_ridge(sw))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) index = !phy_port ? 8 : 9;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) /* Validate the hard-coding */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) if (WARN_ON(index > sw->config.max_port_number))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) down = &sw->ports[index];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) if (down) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) if (WARN_ON(!tb_port_is_pcie_down(down)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) if (tb_pci_port_is_enabled(down))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) return down;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) return tb_find_unused_port(sw, TB_TYPE_PCIE_DOWN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) static struct tb_port *tb_find_dp_out(struct tb *tb, struct tb_port *in)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) struct tb_port *host_port, *port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) struct tb_cm *tcm = tb_priv(tb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) host_port = tb_route(in->sw) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) tb_port_at(tb_route(in->sw), tb->root_switch) : NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) list_for_each_entry(port, &tcm->dp_resources, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) if (!tb_port_is_dpout(port))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) if (tb_port_is_enabled(port)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) tb_port_dbg(port, "in use\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) tb_port_dbg(port, "DP OUT available\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) * Keep the DP tunnel under the topology starting from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) * the same host router downstream port.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) if (host_port && tb_route(port->sw)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) struct tb_port *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) p = tb_port_at(tb_route(port->sw), tb->root_switch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) if (p != host_port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) return port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) static void tb_tunnel_dp(struct tb *tb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) int available_up, available_down, ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) struct tb_cm *tcm = tb_priv(tb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) struct tb_port *port, *in, *out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) struct tb_tunnel *tunnel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) * Find pair of inactive DP IN and DP OUT adapters and then
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) * establish a DP tunnel between them.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) tb_dbg(tb, "looking for DP IN <-> DP OUT pairs:\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) in = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) out = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) list_for_each_entry(port, &tcm->dp_resources, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) if (!tb_port_is_dpin(port))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) if (tb_port_is_enabled(port)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) tb_port_dbg(port, "in use\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) tb_port_dbg(port, "DP IN available\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) out = tb_find_dp_out(tb, port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) if (out) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) in = port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) if (!in) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) tb_dbg(tb, "no suitable DP IN adapter available, not tunneling\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) if (!out) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) tb_dbg(tb, "no suitable DP OUT adapter available, not tunneling\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) * DP stream needs the domain to be active so runtime resume
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) * both ends of the tunnel.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) * This should bring the routers in the middle active as well
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) * and keeps the domain from runtime suspending while the DP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) * tunnel is active.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) pm_runtime_get_sync(&in->sw->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) pm_runtime_get_sync(&out->sw->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) if (tb_switch_alloc_dp_resource(in->sw, in)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) tb_port_dbg(in, "no resource available for DP IN, not tunneling\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) goto err_rpm_put;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) /* Make all unused USB3 bandwidth available for the new DP tunnel */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) ret = tb_release_unused_usb3_bandwidth(tb, in, out);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) tb_warn(tb, "failed to release unused bandwidth\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) goto err_dealloc_dp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) ret = tb_available_bandwidth(tb, in, out, &available_up,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) &available_down);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) goto err_reclaim;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) tb_dbg(tb, "available bandwidth for new DP tunnel %u/%u Mb/s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) available_up, available_down);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) tunnel = tb_tunnel_alloc_dp(tb, in, out, available_up, available_down);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) if (!tunnel) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) tb_port_dbg(out, "could not allocate DP tunnel\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) goto err_reclaim;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) if (tb_tunnel_activate(tunnel)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) tb_port_info(out, "DP tunnel activation failed, aborting\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) goto err_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) list_add_tail(&tunnel->list, &tcm->tunnel_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) tb_reclaim_usb3_bandwidth(tb, in, out);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) err_free:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) tb_tunnel_free(tunnel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) err_reclaim:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) tb_reclaim_usb3_bandwidth(tb, in, out);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) err_dealloc_dp:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) tb_switch_dealloc_dp_resource(in->sw, in);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) err_rpm_put:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) pm_runtime_mark_last_busy(&out->sw->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) pm_runtime_put_autosuspend(&out->sw->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) pm_runtime_mark_last_busy(&in->sw->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) pm_runtime_put_autosuspend(&in->sw->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) static void tb_dp_resource_unavailable(struct tb *tb, struct tb_port *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) struct tb_port *in, *out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) struct tb_tunnel *tunnel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) if (tb_port_is_dpin(port)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) tb_port_dbg(port, "DP IN resource unavailable\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) in = port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) out = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) tb_port_dbg(port, "DP OUT resource unavailable\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) in = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) out = port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) tunnel = tb_find_tunnel(tb, TB_TUNNEL_DP, in, out);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) tb_deactivate_and_free_tunnel(tunnel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) list_del_init(&port->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) * See if there is another DP OUT port that can be used for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) * to create another tunnel.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) tb_tunnel_dp(tb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) static void tb_dp_resource_available(struct tb *tb, struct tb_port *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) struct tb_cm *tcm = tb_priv(tb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) struct tb_port *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) if (tb_port_is_enabled(port))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) list_for_each_entry(p, &tcm->dp_resources, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) if (p == port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) tb_port_dbg(port, "DP %s resource available\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) tb_port_is_dpin(port) ? "IN" : "OUT");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) list_add_tail(&port->list, &tcm->dp_resources);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) /* Look for suitable DP IN <-> DP OUT pairs now */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) tb_tunnel_dp(tb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) static void tb_disconnect_and_release_dp(struct tb *tb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) struct tb_cm *tcm = tb_priv(tb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) struct tb_tunnel *tunnel, *n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) * Tear down all DP tunnels and release their resources. They
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) * will be re-established after resume based on plug events.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) list_for_each_entry_safe_reverse(tunnel, n, &tcm->tunnel_list, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) if (tb_tunnel_is_dp(tunnel))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) tb_deactivate_and_free_tunnel(tunnel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) while (!list_empty(&tcm->dp_resources)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) struct tb_port *port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) port = list_first_entry(&tcm->dp_resources,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) struct tb_port, list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) list_del_init(&port->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) static int tb_tunnel_pci(struct tb *tb, struct tb_switch *sw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) struct tb_port *up, *down, *port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) struct tb_cm *tcm = tb_priv(tb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) struct tb_switch *parent_sw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) struct tb_tunnel *tunnel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) up = tb_switch_find_port(sw, TB_TYPE_PCIE_UP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) if (!up)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) * Look up available down port. Since we are chaining it should
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) * be found right above this switch.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) parent_sw = tb_to_switch(sw->dev.parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) port = tb_port_at(tb_route(sw), parent_sw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) down = tb_find_pcie_down(parent_sw, port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) if (!down)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) tunnel = tb_tunnel_alloc_pci(tb, up, down);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) if (!tunnel)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) if (tb_tunnel_activate(tunnel)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) tb_port_info(up,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) "PCIe tunnel activation failed, aborting\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) tb_tunnel_free(tunnel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) list_add_tail(&tunnel->list, &tcm->tunnel_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) static int tb_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) struct tb_cm *tcm = tb_priv(tb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) struct tb_port *nhi_port, *dst_port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) struct tb_tunnel *tunnel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) struct tb_switch *sw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) sw = tb_to_switch(xd->dev.parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) dst_port = tb_port_at(xd->route, sw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) nhi_port = tb_switch_find_port(tb->root_switch, TB_TYPE_NHI);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) mutex_lock(&tb->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) tunnel = tb_tunnel_alloc_dma(tb, nhi_port, dst_port, xd->transmit_ring,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) xd->transmit_path, xd->receive_ring,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) xd->receive_path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) if (!tunnel) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) mutex_unlock(&tb->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) if (tb_tunnel_activate(tunnel)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) tb_port_info(nhi_port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) "DMA tunnel activation failed, aborting\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) tb_tunnel_free(tunnel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) mutex_unlock(&tb->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) list_add_tail(&tunnel->list, &tcm->tunnel_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) mutex_unlock(&tb->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) static void __tb_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) struct tb_port *dst_port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) struct tb_tunnel *tunnel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) struct tb_switch *sw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) sw = tb_to_switch(xd->dev.parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) dst_port = tb_port_at(xd->route, sw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) * It is possible that the tunnel was already teared down (in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) * case of cable disconnect) so it is fine if we cannot find it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) * here anymore.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) tunnel = tb_find_tunnel(tb, TB_TUNNEL_DMA, NULL, dst_port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) tb_deactivate_and_free_tunnel(tunnel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) static int tb_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) if (!xd->is_unplugged) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) mutex_lock(&tb->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) __tb_disconnect_xdomain_paths(tb, xd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) mutex_unlock(&tb->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) /* hotplug handling */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) * tb_handle_hotplug() - handle hotplug event
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) * Executes on tb->wq.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) static void tb_handle_hotplug(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) struct tb_hotplug_event *ev = container_of(work, typeof(*ev), work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) struct tb *tb = ev->tb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) struct tb_cm *tcm = tb_priv(tb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) struct tb_switch *sw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) struct tb_port *port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) /* Bring the domain back from sleep if it was suspended */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) pm_runtime_get_sync(&tb->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) mutex_lock(&tb->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) if (!tcm->hotplug_active)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) goto out; /* during init, suspend or shutdown */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) sw = tb_switch_find_by_route(tb, ev->route);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) if (!sw) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) tb_warn(tb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) "hotplug event from non existent switch %llx:%x (unplug: %d)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) ev->route, ev->port, ev->unplug);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) if (ev->port > sw->config.max_port_number) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) tb_warn(tb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) "hotplug event from non existent port %llx:%x (unplug: %d)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) ev->route, ev->port, ev->unplug);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) goto put_sw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) port = &sw->ports[ev->port];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) if (tb_is_upstream_port(port)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) tb_dbg(tb, "hotplug event for upstream port %llx:%x (unplug: %d)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) ev->route, ev->port, ev->unplug);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) goto put_sw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) pm_runtime_get_sync(&sw->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) if (ev->unplug) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) tb_retimer_remove_all(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) if (tb_port_has_remote(port)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) tb_port_dbg(port, "switch unplugged\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) tb_sw_set_unplugged(port->remote->sw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) tb_free_invalid_tunnels(tb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) tb_remove_dp_resources(port->remote->sw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) tb_switch_tmu_disable(port->remote->sw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) tb_switch_unconfigure_link(port->remote->sw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) tb_switch_lane_bonding_disable(port->remote->sw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) tb_switch_remove(port->remote->sw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) port->remote = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) if (port->dual_link_port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) port->dual_link_port->remote = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) /* Maybe we can create another DP tunnel */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) tb_tunnel_dp(tb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) } else if (port->xdomain) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) struct tb_xdomain *xd = tb_xdomain_get(port->xdomain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) tb_port_dbg(port, "xdomain unplugged\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) * Service drivers are unbound during
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) * tb_xdomain_remove() so setting XDomain as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) * unplugged here prevents deadlock if they call
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) * tb_xdomain_disable_paths(). We will tear down
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) * the path below.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) xd->is_unplugged = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) tb_xdomain_remove(xd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) port->xdomain = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) __tb_disconnect_xdomain_paths(tb, xd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) tb_xdomain_put(xd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) tb_port_unconfigure_xdomain(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) } else if (tb_port_is_dpout(port) || tb_port_is_dpin(port)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) tb_dp_resource_unavailable(tb, port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) tb_port_dbg(port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) "got unplug event for disconnected port, ignoring\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) } else if (port->remote) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) tb_port_dbg(port, "got plug event for connected port, ignoring\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) if (tb_port_is_null(port)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) tb_port_dbg(port, "hotplug: scanning\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) tb_scan_port(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) if (!port->remote)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) tb_port_dbg(port, "hotplug: no switch found\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) } else if (tb_port_is_dpout(port) || tb_port_is_dpin(port)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) tb_dp_resource_available(tb, port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) pm_runtime_mark_last_busy(&sw->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) pm_runtime_put_autosuspend(&sw->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) put_sw:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) tb_switch_put(sw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) mutex_unlock(&tb->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) pm_runtime_mark_last_busy(&tb->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) pm_runtime_put_autosuspend(&tb->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) kfree(ev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) * tb_schedule_hotplug_handler() - callback function for the control channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) * Delegates to tb_handle_hotplug.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) static void tb_handle_event(struct tb *tb, enum tb_cfg_pkg_type type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) const void *buf, size_t size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) const struct cfg_event_pkg *pkg = buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) u64 route;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) if (type != TB_CFG_PKG_EVENT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) tb_warn(tb, "unexpected event %#x, ignoring\n", type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) route = tb_cfg_get_route(&pkg->header);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) if (tb_cfg_ack_plug(tb->ctl, route, pkg->port, pkg->unplug)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) tb_warn(tb, "could not ack plug event on %llx:%x\n", route,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) pkg->port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) tb_queue_hotplug(tb, route, pkg->port, pkg->unplug);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) static void tb_stop(struct tb *tb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) struct tb_cm *tcm = tb_priv(tb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) struct tb_tunnel *tunnel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) struct tb_tunnel *n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) cancel_delayed_work(&tcm->remove_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) /* tunnels are only present after everything has been initialized */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) * DMA tunnels require the driver to be functional so we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) * tear them down. Other protocol tunnels can be left
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) * intact.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) if (tb_tunnel_is_dma(tunnel))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) tb_tunnel_deactivate(tunnel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) tb_tunnel_free(tunnel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) tb_switch_remove(tb->root_switch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) tcm->hotplug_active = false; /* signal tb_handle_hotplug to quit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) static int tb_scan_finalize_switch(struct device *dev, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) if (tb_is_switch(dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) struct tb_switch *sw = tb_to_switch(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) * If we found that the switch was already setup by the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) * boot firmware, mark it as authorized now before we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) * send uevent to userspace.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) if (sw->boot)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) sw->authorized = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) dev_set_uevent_suppress(dev, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) kobject_uevent(&dev->kobj, KOBJ_ADD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) device_for_each_child(dev, NULL, tb_scan_finalize_switch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) static int tb_start(struct tb *tb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) struct tb_cm *tcm = tb_priv(tb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) tb->root_switch = tb_switch_alloc(tb, &tb->dev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) if (IS_ERR(tb->root_switch))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) return PTR_ERR(tb->root_switch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) * ICM firmware upgrade needs running firmware and in native
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) * mode that is not available so disable firmware upgrade of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) * root switch.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) tb->root_switch->no_nvm_upgrade = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) /* All USB4 routers support runtime PM */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) tb->root_switch->rpm = tb_switch_is_usb4(tb->root_switch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) ret = tb_switch_configure(tb->root_switch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) tb_switch_put(tb->root_switch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) /* Announce the switch to the world */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) ret = tb_switch_add(tb->root_switch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) tb_switch_put(tb->root_switch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) /* Enable TMU if it is off */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) tb_switch_tmu_enable(tb->root_switch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) /* Full scan to discover devices added before the driver was loaded. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) tb_scan_switch(tb->root_switch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) /* Find out tunnels created by the boot firmware */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) tb_discover_tunnels(tb->root_switch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) * If the boot firmware did not create USB 3.x tunnels create them
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) * now for the whole topology.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) tb_create_usb3_tunnels(tb->root_switch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) /* Add DP IN resources for the root switch */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) tb_add_dp_resources(tb->root_switch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) /* Make the discovered switches available to the userspace */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) device_for_each_child(&tb->root_switch->dev, NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) tb_scan_finalize_switch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) /* Allow tb_handle_hotplug to progress events */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) tcm->hotplug_active = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) static int tb_suspend_noirq(struct tb *tb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) struct tb_cm *tcm = tb_priv(tb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) tb_dbg(tb, "suspending...\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) tb_disconnect_and_release_dp(tb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) tb_switch_suspend(tb->root_switch, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) tcm->hotplug_active = false; /* signal tb_handle_hotplug to quit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) tb_dbg(tb, "suspend finished\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) static void tb_restore_children(struct tb_switch *sw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) struct tb_port *port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) /* No need to restore if the router is already unplugged */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) if (sw->is_unplugged)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) if (tb_enable_tmu(sw))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) tb_sw_warn(sw, "failed to restore TMU configuration\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) tb_switch_for_each_port(sw, port) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) if (!tb_port_has_remote(port) && !port->xdomain)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) if (port->remote) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) tb_switch_lane_bonding_enable(port->remote->sw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) tb_switch_configure_link(port->remote->sw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) tb_restore_children(port->remote->sw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) } else if (port->xdomain) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) tb_port_configure_xdomain(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) static int tb_resume_noirq(struct tb *tb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) struct tb_cm *tcm = tb_priv(tb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) struct tb_tunnel *tunnel, *n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) tb_dbg(tb, "resuming...\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) /* remove any pci devices the firmware might have setup */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) tb_switch_reset(tb->root_switch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) tb_switch_resume(tb->root_switch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) tb_free_invalid_tunnels(tb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) tb_free_unplugged_children(tb->root_switch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) tb_restore_children(tb->root_switch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) tb_tunnel_restart(tunnel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) if (!list_empty(&tcm->tunnel_list)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) * the pcie links need some time to get going.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) * 100ms works for me...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) tb_dbg(tb, "tunnels restarted, sleeping for 100ms\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) msleep(100);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) /* Allow tb_handle_hotplug to progress events */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) tcm->hotplug_active = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) tb_dbg(tb, "resume finished\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) static int tb_free_unplugged_xdomains(struct tb_switch *sw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) struct tb_port *port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) tb_switch_for_each_port(sw, port) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) if (tb_is_upstream_port(port))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) if (port->xdomain && port->xdomain->is_unplugged) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) tb_retimer_remove_all(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) tb_xdomain_remove(port->xdomain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) tb_port_unconfigure_xdomain(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) port->xdomain = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) ret++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) } else if (port->remote) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) ret += tb_free_unplugged_xdomains(port->remote->sw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) static int tb_freeze_noirq(struct tb *tb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) struct tb_cm *tcm = tb_priv(tb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) tcm->hotplug_active = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) static int tb_thaw_noirq(struct tb *tb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) struct tb_cm *tcm = tb_priv(tb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) tcm->hotplug_active = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) static void tb_complete(struct tb *tb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) * Release any unplugged XDomains and if there is a case where
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) * another domain is swapped in place of unplugged XDomain we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) * need to run another rescan.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) mutex_lock(&tb->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) if (tb_free_unplugged_xdomains(tb->root_switch))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) tb_scan_switch(tb->root_switch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) mutex_unlock(&tb->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) static int tb_runtime_suspend(struct tb *tb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) struct tb_cm *tcm = tb_priv(tb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) mutex_lock(&tb->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) tb_switch_suspend(tb->root_switch, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) tcm->hotplug_active = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) mutex_unlock(&tb->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) static void tb_remove_work(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) struct tb_cm *tcm = container_of(work, struct tb_cm, remove_work.work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) struct tb *tb = tcm_to_tb(tcm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) mutex_lock(&tb->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) if (tb->root_switch) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) tb_free_unplugged_children(tb->root_switch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) tb_free_unplugged_xdomains(tb->root_switch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) mutex_unlock(&tb->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) static int tb_runtime_resume(struct tb *tb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) struct tb_cm *tcm = tb_priv(tb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) struct tb_tunnel *tunnel, *n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) mutex_lock(&tb->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) tb_switch_resume(tb->root_switch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) tb_free_invalid_tunnels(tb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) tb_restore_children(tb->root_switch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) tb_tunnel_restart(tunnel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) tcm->hotplug_active = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) mutex_unlock(&tb->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) * Schedule cleanup of any unplugged devices. Run this in a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) * separate thread to avoid possible deadlock if the device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) * removal runtime resumes the unplugged device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) queue_delayed_work(tb->wq, &tcm->remove_work, msecs_to_jiffies(50));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) static const struct tb_cm_ops tb_cm_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) .start = tb_start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) .stop = tb_stop,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) .suspend_noirq = tb_suspend_noirq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) .resume_noirq = tb_resume_noirq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) .freeze_noirq = tb_freeze_noirq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) .thaw_noirq = tb_thaw_noirq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) .complete = tb_complete,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) .runtime_suspend = tb_runtime_suspend,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) .runtime_resume = tb_runtime_resume,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) .handle_event = tb_handle_event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) .approve_switch = tb_tunnel_pci,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) .approve_xdomain_paths = tb_approve_xdomain_paths,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) .disconnect_xdomain_paths = tb_disconnect_xdomain_paths,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) struct tb *tb_probe(struct tb_nhi *nhi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) struct tb_cm *tcm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) struct tb *tb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) tb = tb_domain_alloc(nhi, sizeof(*tcm));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) if (!tb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) tb->security_level = TB_SECURITY_USER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) tb->cm_ops = &tb_cm_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) tcm = tb_priv(tb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) INIT_LIST_HEAD(&tcm->tunnel_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) INIT_LIST_HEAD(&tcm->dp_resources);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) INIT_DELAYED_WORK(&tcm->remove_work, tb_remove_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) return tb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) }