^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Thunderbolt driver - Tunneling support
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Copyright (C) 2019, Intel Corporation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/list.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include "tunnel.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include "tb.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) /* PCIe adapters use always HopID of 8 for both directions */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #define TB_PCI_HOPID 8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #define TB_PCI_PATH_DOWN 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #define TB_PCI_PATH_UP 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) /* USB3 adapters use always HopID of 8 for both directions */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #define TB_USB3_HOPID 8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #define TB_USB3_PATH_DOWN 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #define TB_USB3_PATH_UP 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) /* DP adapters use HopID 8 for AUX and 9 for Video */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #define TB_DP_AUX_TX_HOPID 8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #define TB_DP_AUX_RX_HOPID 8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #define TB_DP_VIDEO_HOPID 9
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #define TB_DP_VIDEO_PATH_OUT 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #define TB_DP_AUX_PATH_OUT 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #define TB_DP_AUX_PATH_IN 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #define TB_DMA_PATH_OUT 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #define TB_DMA_PATH_IN 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) static const char * const tb_tunnel_names[] = { "PCI", "DP", "DMA", "USB3" };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) #define __TB_TUNNEL_PRINT(level, tunnel, fmt, arg...) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) do { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) struct tb_tunnel *__tunnel = (tunnel); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) level(__tunnel->tb, "%llx:%x <-> %llx:%x (%s): " fmt, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) tb_route(__tunnel->src_port->sw), \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) __tunnel->src_port->port, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) tb_route(__tunnel->dst_port->sw), \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) __tunnel->dst_port->port, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) tb_tunnel_names[__tunnel->type], \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) ## arg); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) } while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) #define tb_tunnel_WARN(tunnel, fmt, arg...) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) __TB_TUNNEL_PRINT(tb_WARN, tunnel, fmt, ##arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) #define tb_tunnel_warn(tunnel, fmt, arg...) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) __TB_TUNNEL_PRINT(tb_warn, tunnel, fmt, ##arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) #define tb_tunnel_info(tunnel, fmt, arg...) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) __TB_TUNNEL_PRINT(tb_info, tunnel, fmt, ##arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) #define tb_tunnel_dbg(tunnel, fmt, arg...) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) __TB_TUNNEL_PRINT(tb_dbg, tunnel, fmt, ##arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) static struct tb_tunnel *tb_tunnel_alloc(struct tb *tb, size_t npaths,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) enum tb_tunnel_type type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) struct tb_tunnel *tunnel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) tunnel = kzalloc(sizeof(*tunnel), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) if (!tunnel)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) tunnel->paths = kcalloc(npaths, sizeof(tunnel->paths[0]), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) if (!tunnel->paths) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) tb_tunnel_free(tunnel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) INIT_LIST_HEAD(&tunnel->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) tunnel->tb = tb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) tunnel->npaths = npaths;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) tunnel->type = type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) return tunnel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) static int tb_pci_activate(struct tb_tunnel *tunnel, bool activate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) int res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) res = tb_pci_port_enable(tunnel->src_port, activate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) if (res)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) return res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) if (tb_port_is_pcie_up(tunnel->dst_port))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) return tb_pci_port_enable(tunnel->dst_port, activate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) static int tb_initial_credits(const struct tb_switch *sw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) /* If the path is complete sw is not NULL */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) if (sw) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) /* More credits for faster link */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) switch (sw->link_speed * sw->link_width) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) case 40:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) return 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) case 20:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) return 24;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) return 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) static void tb_pci_init_path(struct tb_path *path)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) path->egress_fc_enable = TB_PATH_SOURCE | TB_PATH_INTERNAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) path->egress_shared_buffer = TB_PATH_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) path->ingress_fc_enable = TB_PATH_ALL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) path->ingress_shared_buffer = TB_PATH_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) path->priority = 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) path->weight = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) path->drop_packages = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) path->nfc_credits = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) path->hops[0].initial_credits = 7;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) if (path->path_length > 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) path->hops[1].initial_credits =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) tb_initial_credits(path->hops[1].in_port->sw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) * tb_tunnel_discover_pci() - Discover existing PCIe tunnels
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) * @tb: Pointer to the domain structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) * @down: PCIe downstream adapter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) * If @down adapter is active, follows the tunnel to the PCIe upstream
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) * adapter and back. Returns the discovered tunnel or %NULL if there was
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) * no tunnel.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) struct tb_tunnel *tb_tunnel_discover_pci(struct tb *tb, struct tb_port *down)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) struct tb_tunnel *tunnel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) struct tb_path *path;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) if (!tb_pci_port_is_enabled(down))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) tunnel = tb_tunnel_alloc(tb, 2, TB_TUNNEL_PCI);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) if (!tunnel)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) tunnel->activate = tb_pci_activate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) tunnel->src_port = down;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) * Discover both paths even if they are not complete. We will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) * clean them up by calling tb_tunnel_deactivate() below in that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) * case.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) path = tb_path_discover(down, TB_PCI_HOPID, NULL, -1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) &tunnel->dst_port, "PCIe Up");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) if (!path) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) /* Just disable the downstream port */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) tb_pci_port_enable(down, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) goto err_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) tunnel->paths[TB_PCI_PATH_UP] = path;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) tb_pci_init_path(tunnel->paths[TB_PCI_PATH_UP]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) path = tb_path_discover(tunnel->dst_port, -1, down, TB_PCI_HOPID, NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) "PCIe Down");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) if (!path)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) goto err_deactivate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) tunnel->paths[TB_PCI_PATH_DOWN] = path;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) tb_pci_init_path(tunnel->paths[TB_PCI_PATH_DOWN]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) /* Validate that the tunnel is complete */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) if (!tb_port_is_pcie_up(tunnel->dst_port)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) tb_port_warn(tunnel->dst_port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) "path does not end on a PCIe adapter, cleaning up\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) goto err_deactivate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) if (down != tunnel->src_port) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) tb_tunnel_warn(tunnel, "path is not complete, cleaning up\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) goto err_deactivate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) if (!tb_pci_port_is_enabled(tunnel->dst_port)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) tb_tunnel_warn(tunnel,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) "tunnel is not fully activated, cleaning up\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) goto err_deactivate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) tb_tunnel_dbg(tunnel, "discovered\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) return tunnel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) err_deactivate:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) tb_tunnel_deactivate(tunnel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) err_free:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) tb_tunnel_free(tunnel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) * tb_tunnel_alloc_pci() - allocate a pci tunnel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) * @tb: Pointer to the domain structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) * @up: PCIe upstream adapter port
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) * @down: PCIe downstream adapter port
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) * Allocate a PCI tunnel. The ports must be of type TB_TYPE_PCIE_UP and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) * TB_TYPE_PCIE_DOWN.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) * Return: Returns a tb_tunnel on success or NULL on failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) struct tb_tunnel *tb_tunnel_alloc_pci(struct tb *tb, struct tb_port *up,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) struct tb_port *down)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) struct tb_tunnel *tunnel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) struct tb_path *path;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) tunnel = tb_tunnel_alloc(tb, 2, TB_TUNNEL_PCI);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) if (!tunnel)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) tunnel->activate = tb_pci_activate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) tunnel->src_port = down;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) tunnel->dst_port = up;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) path = tb_path_alloc(tb, down, TB_PCI_HOPID, up, TB_PCI_HOPID, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) "PCIe Down");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) if (!path) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) tb_tunnel_free(tunnel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) tb_pci_init_path(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) tunnel->paths[TB_PCI_PATH_DOWN] = path;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) path = tb_path_alloc(tb, up, TB_PCI_HOPID, down, TB_PCI_HOPID, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) "PCIe Up");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) if (!path) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) tb_tunnel_free(tunnel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) tb_pci_init_path(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) tunnel->paths[TB_PCI_PATH_UP] = path;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) return tunnel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) static bool tb_dp_is_usb4(const struct tb_switch *sw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) /* Titan Ridge DP adapters need the same treatment as USB4 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) return tb_switch_is_usb4(sw) || tb_switch_is_titan_ridge(sw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) static int tb_dp_cm_handshake(struct tb_port *in, struct tb_port *out)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) int timeout = 10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) /* Both ends need to support this */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) if (!tb_dp_is_usb4(in->sw) || !tb_dp_is_usb4(out->sw))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) ret = tb_port_read(out, &val, TB_CFG_PORT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) out->cap_adap + DP_STATUS_CTRL, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) val |= DP_STATUS_CTRL_UF | DP_STATUS_CTRL_CMHS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) ret = tb_port_write(out, &val, TB_CFG_PORT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) out->cap_adap + DP_STATUS_CTRL, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) ret = tb_port_read(out, &val, TB_CFG_PORT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) out->cap_adap + DP_STATUS_CTRL, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) if (!(val & DP_STATUS_CTRL_CMHS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) usleep_range(10, 100);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) } while (timeout--);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) return -ETIMEDOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) static inline u32 tb_dp_cap_get_rate(u32 val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) u32 rate = (val & DP_COMMON_CAP_RATE_MASK) >> DP_COMMON_CAP_RATE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) switch (rate) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) case DP_COMMON_CAP_RATE_RBR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) return 1620;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) case DP_COMMON_CAP_RATE_HBR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) return 2700;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) case DP_COMMON_CAP_RATE_HBR2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) return 5400;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) case DP_COMMON_CAP_RATE_HBR3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) return 8100;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) static inline u32 tb_dp_cap_set_rate(u32 val, u32 rate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) val &= ~DP_COMMON_CAP_RATE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) switch (rate) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) WARN(1, "invalid rate %u passed, defaulting to 1620 MB/s\n", rate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) case 1620:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) val |= DP_COMMON_CAP_RATE_RBR << DP_COMMON_CAP_RATE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) case 2700:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) val |= DP_COMMON_CAP_RATE_HBR << DP_COMMON_CAP_RATE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) case 5400:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) val |= DP_COMMON_CAP_RATE_HBR2 << DP_COMMON_CAP_RATE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) case 8100:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) val |= DP_COMMON_CAP_RATE_HBR3 << DP_COMMON_CAP_RATE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) return val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) static inline u32 tb_dp_cap_get_lanes(u32 val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) u32 lanes = (val & DP_COMMON_CAP_LANES_MASK) >> DP_COMMON_CAP_LANES_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) switch (lanes) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) case DP_COMMON_CAP_1_LANE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) case DP_COMMON_CAP_2_LANES:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) return 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) case DP_COMMON_CAP_4_LANES:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) return 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) static inline u32 tb_dp_cap_set_lanes(u32 val, u32 lanes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) val &= ~DP_COMMON_CAP_LANES_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) switch (lanes) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) WARN(1, "invalid number of lanes %u passed, defaulting to 1\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) lanes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) case 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) val |= DP_COMMON_CAP_1_LANE << DP_COMMON_CAP_LANES_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) case 2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) val |= DP_COMMON_CAP_2_LANES << DP_COMMON_CAP_LANES_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) case 4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) val |= DP_COMMON_CAP_4_LANES << DP_COMMON_CAP_LANES_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) return val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) static unsigned int tb_dp_bandwidth(unsigned int rate, unsigned int lanes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) /* Tunneling removes the DP 8b/10b encoding */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) return rate * lanes * 8 / 10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) static int tb_dp_reduce_bandwidth(int max_bw, u32 in_rate, u32 in_lanes,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) u32 out_rate, u32 out_lanes, u32 *new_rate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) u32 *new_lanes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) static const u32 dp_bw[][2] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) /* Mb/s, lanes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) { 8100, 4 }, /* 25920 Mb/s */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) { 5400, 4 }, /* 17280 Mb/s */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) { 8100, 2 }, /* 12960 Mb/s */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) { 2700, 4 }, /* 8640 Mb/s */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) { 5400, 2 }, /* 8640 Mb/s */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) { 8100, 1 }, /* 6480 Mb/s */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) { 1620, 4 }, /* 5184 Mb/s */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) { 5400, 1 }, /* 4320 Mb/s */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) { 2700, 2 }, /* 4320 Mb/s */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) { 1620, 2 }, /* 2592 Mb/s */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) { 2700, 1 }, /* 2160 Mb/s */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) { 1620, 1 }, /* 1296 Mb/s */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) * Find a combination that can fit into max_bw and does not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) * exceed the maximum rate and lanes supported by the DP OUT and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) * DP IN adapters.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) for (i = 0; i < ARRAY_SIZE(dp_bw); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) if (dp_bw[i][0] > out_rate || dp_bw[i][1] > out_lanes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) if (dp_bw[i][0] > in_rate || dp_bw[i][1] > in_lanes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) if (tb_dp_bandwidth(dp_bw[i][0], dp_bw[i][1]) <= max_bw) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) *new_rate = dp_bw[i][0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) *new_lanes = dp_bw[i][1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) return -ENOSR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) static int tb_dp_xchg_caps(struct tb_tunnel *tunnel)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) u32 out_dp_cap, out_rate, out_lanes, in_dp_cap, in_rate, in_lanes, bw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) struct tb_port *out = tunnel->dst_port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) struct tb_port *in = tunnel->src_port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) int ret, max_bw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) * Copy DP_LOCAL_CAP register to DP_REMOTE_CAP register for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) * newer generation hardware.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) if (in->sw->generation < 2 || out->sw->generation < 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) * Perform connection manager handshake between IN and OUT ports
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) * before capabilities exchange can take place.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) ret = tb_dp_cm_handshake(in, out);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) /* Read both DP_LOCAL_CAP registers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) ret = tb_port_read(in, &in_dp_cap, TB_CFG_PORT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) in->cap_adap + DP_LOCAL_CAP, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) ret = tb_port_read(out, &out_dp_cap, TB_CFG_PORT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) out->cap_adap + DP_LOCAL_CAP, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) /* Write IN local caps to OUT remote caps */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) ret = tb_port_write(out, &in_dp_cap, TB_CFG_PORT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) out->cap_adap + DP_REMOTE_CAP, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) in_rate = tb_dp_cap_get_rate(in_dp_cap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) in_lanes = tb_dp_cap_get_lanes(in_dp_cap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) tb_port_dbg(in, "maximum supported bandwidth %u Mb/s x%u = %u Mb/s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) in_rate, in_lanes, tb_dp_bandwidth(in_rate, in_lanes));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) * If the tunnel bandwidth is limited (max_bw is set) then see
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) * if we need to reduce bandwidth to fit there.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) out_rate = tb_dp_cap_get_rate(out_dp_cap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) out_lanes = tb_dp_cap_get_lanes(out_dp_cap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) bw = tb_dp_bandwidth(out_rate, out_lanes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) tb_port_dbg(out, "maximum supported bandwidth %u Mb/s x%u = %u Mb/s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) out_rate, out_lanes, bw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) if (in->sw->config.depth < out->sw->config.depth)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) max_bw = tunnel->max_down;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) max_bw = tunnel->max_up;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) if (max_bw && bw > max_bw) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) u32 new_rate, new_lanes, new_bw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) ret = tb_dp_reduce_bandwidth(max_bw, in_rate, in_lanes,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) out_rate, out_lanes, &new_rate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) &new_lanes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) tb_port_info(out, "not enough bandwidth for DP tunnel\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) new_bw = tb_dp_bandwidth(new_rate, new_lanes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) tb_port_dbg(out, "bandwidth reduced to %u Mb/s x%u = %u Mb/s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) new_rate, new_lanes, new_bw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) * Set new rate and number of lanes before writing it to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) * the IN port remote caps.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) out_dp_cap = tb_dp_cap_set_rate(out_dp_cap, new_rate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) out_dp_cap = tb_dp_cap_set_lanes(out_dp_cap, new_lanes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) return tb_port_write(in, &out_dp_cap, TB_CFG_PORT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) in->cap_adap + DP_REMOTE_CAP, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) static int tb_dp_activate(struct tb_tunnel *tunnel, bool active)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) if (active) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) struct tb_path **paths;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) int last;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) paths = tunnel->paths;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) last = paths[TB_DP_VIDEO_PATH_OUT]->path_length - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) tb_dp_port_set_hops(tunnel->src_port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) paths[TB_DP_VIDEO_PATH_OUT]->hops[0].in_hop_index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) paths[TB_DP_AUX_PATH_OUT]->hops[0].in_hop_index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) paths[TB_DP_AUX_PATH_IN]->hops[last].next_hop_index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) tb_dp_port_set_hops(tunnel->dst_port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) paths[TB_DP_VIDEO_PATH_OUT]->hops[last].next_hop_index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) paths[TB_DP_AUX_PATH_IN]->hops[0].in_hop_index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) paths[TB_DP_AUX_PATH_OUT]->hops[last].next_hop_index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) tb_dp_port_hpd_clear(tunnel->src_port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) tb_dp_port_set_hops(tunnel->src_port, 0, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) if (tb_port_is_dpout(tunnel->dst_port))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) tb_dp_port_set_hops(tunnel->dst_port, 0, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) ret = tb_dp_port_enable(tunnel->src_port, active);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) if (tb_port_is_dpout(tunnel->dst_port))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) return tb_dp_port_enable(tunnel->dst_port, active);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) static int tb_dp_consumed_bandwidth(struct tb_tunnel *tunnel, int *consumed_up,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) int *consumed_down)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) struct tb_port *in = tunnel->src_port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) const struct tb_switch *sw = in->sw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) u32 val, rate = 0, lanes = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) if (tb_dp_is_usb4(sw)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) int timeout = 20;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) * Wait for DPRX done. Normally it should be already set
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) * for active tunnel.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) ret = tb_port_read(in, &val, TB_CFG_PORT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) in->cap_adap + DP_COMMON_CAP, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) if (val & DP_COMMON_CAP_DPRX_DONE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) rate = tb_dp_cap_get_rate(val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) lanes = tb_dp_cap_get_lanes(val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) msleep(250);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) } while (timeout--);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) if (!timeout)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) return -ETIMEDOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) } else if (sw->generation >= 2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) * Read from the copied remote cap so that we take into
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) * account if capabilities were reduced during exchange.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) ret = tb_port_read(in, &val, TB_CFG_PORT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) in->cap_adap + DP_REMOTE_CAP, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) rate = tb_dp_cap_get_rate(val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) lanes = tb_dp_cap_get_lanes(val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) /* No bandwidth management for legacy devices */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) *consumed_up = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) *consumed_down = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) if (in->sw->config.depth < tunnel->dst_port->sw->config.depth) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) *consumed_up = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) *consumed_down = tb_dp_bandwidth(rate, lanes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) *consumed_up = tb_dp_bandwidth(rate, lanes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) *consumed_down = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) static void tb_dp_init_aux_path(struct tb_path *path)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) path->egress_fc_enable = TB_PATH_SOURCE | TB_PATH_INTERNAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) path->egress_shared_buffer = TB_PATH_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) path->ingress_fc_enable = TB_PATH_ALL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) path->ingress_shared_buffer = TB_PATH_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) path->priority = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) path->weight = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) for (i = 0; i < path->path_length; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) path->hops[i].initial_credits = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) static void tb_dp_init_video_path(struct tb_path *path, bool discover)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) u32 nfc_credits = path->hops[0].in_port->config.nfc_credits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) path->egress_fc_enable = TB_PATH_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) path->egress_shared_buffer = TB_PATH_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) path->ingress_fc_enable = TB_PATH_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) path->ingress_shared_buffer = TB_PATH_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) path->priority = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) path->weight = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) if (discover) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) path->nfc_credits = nfc_credits & ADP_CS_4_NFC_BUFFERS_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) u32 max_credits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) max_credits = (nfc_credits & ADP_CS_4_TOTAL_BUFFERS_MASK) >>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) ADP_CS_4_TOTAL_BUFFERS_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) /* Leave some credits for AUX path */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) path->nfc_credits = min(max_credits - 2, 12U);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) * tb_tunnel_discover_dp() - Discover existing Display Port tunnels
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) * @tb: Pointer to the domain structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) * @in: DP in adapter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) * If @in adapter is active, follows the tunnel to the DP out adapter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) * and back. Returns the discovered tunnel or %NULL if there was no
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) * tunnel.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) * Return: DP tunnel or %NULL if no tunnel found.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) struct tb_tunnel *tb_tunnel_discover_dp(struct tb *tb, struct tb_port *in)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) struct tb_tunnel *tunnel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) struct tb_port *port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) struct tb_path *path;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) if (!tb_dp_port_is_enabled(in))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) tunnel = tb_tunnel_alloc(tb, 3, TB_TUNNEL_DP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) if (!tunnel)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) tunnel->init = tb_dp_xchg_caps;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) tunnel->activate = tb_dp_activate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) tunnel->consumed_bandwidth = tb_dp_consumed_bandwidth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) tunnel->src_port = in;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) path = tb_path_discover(in, TB_DP_VIDEO_HOPID, NULL, -1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) &tunnel->dst_port, "Video");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) if (!path) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) /* Just disable the DP IN port */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) tb_dp_port_enable(in, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) goto err_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) tunnel->paths[TB_DP_VIDEO_PATH_OUT] = path;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) tb_dp_init_video_path(tunnel->paths[TB_DP_VIDEO_PATH_OUT], true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) path = tb_path_discover(in, TB_DP_AUX_TX_HOPID, NULL, -1, NULL, "AUX TX");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) if (!path)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) goto err_deactivate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) tunnel->paths[TB_DP_AUX_PATH_OUT] = path;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) tb_dp_init_aux_path(tunnel->paths[TB_DP_AUX_PATH_OUT]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) path = tb_path_discover(tunnel->dst_port, -1, in, TB_DP_AUX_RX_HOPID,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) &port, "AUX RX");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) if (!path)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) goto err_deactivate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) tunnel->paths[TB_DP_AUX_PATH_IN] = path;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) tb_dp_init_aux_path(tunnel->paths[TB_DP_AUX_PATH_IN]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) /* Validate that the tunnel is complete */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) if (!tb_port_is_dpout(tunnel->dst_port)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) tb_port_warn(in, "path does not end on a DP adapter, cleaning up\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) goto err_deactivate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) if (!tb_dp_port_is_enabled(tunnel->dst_port))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) goto err_deactivate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) if (!tb_dp_port_hpd_is_active(tunnel->dst_port))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) goto err_deactivate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) if (port != tunnel->src_port) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) tb_tunnel_warn(tunnel, "path is not complete, cleaning up\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) goto err_deactivate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) tb_tunnel_dbg(tunnel, "discovered\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) return tunnel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) err_deactivate:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) tb_tunnel_deactivate(tunnel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) err_free:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) tb_tunnel_free(tunnel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) * tb_tunnel_alloc_dp() - allocate a Display Port tunnel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) * @tb: Pointer to the domain structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) * @in: DP in adapter port
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) * @out: DP out adapter port
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) * @max_up: Maximum available upstream bandwidth for the DP tunnel (%0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) * if not limited)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) * @max_down: Maximum available downstream bandwidth for the DP tunnel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) * (%0 if not limited)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) * Allocates a tunnel between @in and @out that is capable of tunneling
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) * Display Port traffic.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) * Return: Returns a tb_tunnel on success or NULL on failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) struct tb_tunnel *tb_tunnel_alloc_dp(struct tb *tb, struct tb_port *in,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) struct tb_port *out, int max_up,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) int max_down)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) struct tb_tunnel *tunnel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) struct tb_path **paths;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) struct tb_path *path;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) if (WARN_ON(!in->cap_adap || !out->cap_adap))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) tunnel = tb_tunnel_alloc(tb, 3, TB_TUNNEL_DP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) if (!tunnel)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) tunnel->init = tb_dp_xchg_caps;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) tunnel->activate = tb_dp_activate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) tunnel->consumed_bandwidth = tb_dp_consumed_bandwidth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) tunnel->src_port = in;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) tunnel->dst_port = out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) tunnel->max_up = max_up;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) tunnel->max_down = max_down;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) paths = tunnel->paths;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) path = tb_path_alloc(tb, in, TB_DP_VIDEO_HOPID, out, TB_DP_VIDEO_HOPID,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) 1, "Video");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) if (!path)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) goto err_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) tb_dp_init_video_path(path, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) paths[TB_DP_VIDEO_PATH_OUT] = path;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) path = tb_path_alloc(tb, in, TB_DP_AUX_TX_HOPID, out,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) TB_DP_AUX_TX_HOPID, 1, "AUX TX");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) if (!path)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) goto err_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) tb_dp_init_aux_path(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) paths[TB_DP_AUX_PATH_OUT] = path;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) path = tb_path_alloc(tb, out, TB_DP_AUX_RX_HOPID, in,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) TB_DP_AUX_RX_HOPID, 1, "AUX RX");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) if (!path)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) goto err_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) tb_dp_init_aux_path(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) paths[TB_DP_AUX_PATH_IN] = path;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) return tunnel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) err_free:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) tb_tunnel_free(tunnel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) static u32 tb_dma_credits(struct tb_port *nhi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) u32 max_credits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) max_credits = (nhi->config.nfc_credits & ADP_CS_4_TOTAL_BUFFERS_MASK) >>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) ADP_CS_4_TOTAL_BUFFERS_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) return min(max_credits, 13U);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) static int tb_dma_activate(struct tb_tunnel *tunnel, bool active)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) struct tb_port *nhi = tunnel->src_port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) u32 credits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) credits = active ? tb_dma_credits(nhi) : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) return tb_port_set_initial_credits(nhi, credits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) static void tb_dma_init_path(struct tb_path *path, unsigned int isb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) unsigned int efc, u32 credits)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) path->egress_fc_enable = efc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) path->ingress_fc_enable = TB_PATH_ALL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) path->egress_shared_buffer = TB_PATH_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) path->ingress_shared_buffer = isb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) path->priority = 5;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) path->weight = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) path->clear_fc = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) for (i = 0; i < path->path_length; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) path->hops[i].initial_credits = credits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) * tb_tunnel_alloc_dma() - allocate a DMA tunnel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) * @tb: Pointer to the domain structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) * @nhi: Host controller port
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) * @dst: Destination null port which the other domain is connected to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) * @transmit_ring: NHI ring number used to send packets towards the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) * other domain
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) * @transmit_path: HopID used for transmitting packets
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) * @receive_ring: NHI ring number used to receive packets from the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) * other domain
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) * @reveive_path: HopID used for receiving packets
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) * Return: Returns a tb_tunnel on success or NULL on failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) struct tb_tunnel *tb_tunnel_alloc_dma(struct tb *tb, struct tb_port *nhi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) struct tb_port *dst, int transmit_ring,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) int transmit_path, int receive_ring,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) int receive_path)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) struct tb_tunnel *tunnel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) struct tb_path *path;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) u32 credits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) tunnel = tb_tunnel_alloc(tb, 2, TB_TUNNEL_DMA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) if (!tunnel)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) tunnel->activate = tb_dma_activate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) tunnel->src_port = nhi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) tunnel->dst_port = dst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) credits = tb_dma_credits(nhi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) path = tb_path_alloc(tb, dst, receive_path, nhi, receive_ring, 0, "DMA RX");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) if (!path) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) tb_tunnel_free(tunnel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) tb_dma_init_path(path, TB_PATH_NONE, TB_PATH_SOURCE | TB_PATH_INTERNAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) credits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) tunnel->paths[TB_DMA_PATH_IN] = path;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) path = tb_path_alloc(tb, nhi, transmit_ring, dst, transmit_path, 0, "DMA TX");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) if (!path) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) tb_tunnel_free(tunnel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) tb_dma_init_path(path, TB_PATH_SOURCE, TB_PATH_ALL, credits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) tunnel->paths[TB_DMA_PATH_OUT] = path;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) return tunnel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) static int tb_usb3_max_link_rate(struct tb_port *up, struct tb_port *down)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) int ret, up_max_rate, down_max_rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) ret = usb4_usb3_port_max_link_rate(up);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) up_max_rate = ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) ret = usb4_usb3_port_max_link_rate(down);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) down_max_rate = ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) return min(up_max_rate, down_max_rate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) static int tb_usb3_init(struct tb_tunnel *tunnel)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) tb_tunnel_dbg(tunnel, "allocating initial bandwidth %d/%d Mb/s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) tunnel->allocated_up, tunnel->allocated_down);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) return usb4_usb3_port_allocate_bandwidth(tunnel->src_port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) &tunnel->allocated_up,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) &tunnel->allocated_down);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) static int tb_usb3_activate(struct tb_tunnel *tunnel, bool activate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) int res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) res = tb_usb3_port_enable(tunnel->src_port, activate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) if (res)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) return res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) if (tb_port_is_usb3_up(tunnel->dst_port))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) return tb_usb3_port_enable(tunnel->dst_port, activate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) static int tb_usb3_consumed_bandwidth(struct tb_tunnel *tunnel,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) int *consumed_up, int *consumed_down)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) * PCIe tunneling affects the USB3 bandwidth so take that it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) * into account here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) *consumed_up = tunnel->allocated_up * (3 + 1) / 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) *consumed_down = tunnel->allocated_down * (3 + 1) / 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) static int tb_usb3_release_unused_bandwidth(struct tb_tunnel *tunnel)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) ret = usb4_usb3_port_release_bandwidth(tunnel->src_port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) &tunnel->allocated_up,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) &tunnel->allocated_down);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) tb_tunnel_dbg(tunnel, "decreased bandwidth allocation to %d/%d Mb/s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) tunnel->allocated_up, tunnel->allocated_down);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) static void tb_usb3_reclaim_available_bandwidth(struct tb_tunnel *tunnel,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) int *available_up,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) int *available_down)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) int ret, max_rate, allocate_up, allocate_down;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) ret = usb4_usb3_port_actual_link_rate(tunnel->src_port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) tb_tunnel_warn(tunnel, "failed to read actual link rate\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) } else if (!ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) /* Use maximum link rate if the link valid is not set */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) ret = usb4_usb3_port_max_link_rate(tunnel->src_port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) tb_tunnel_warn(tunnel, "failed to read maximum link rate\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) * 90% of the max rate can be allocated for isochronous
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) * transfers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) max_rate = ret * 90 / 100;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) /* No need to reclaim if already at maximum */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) if (tunnel->allocated_up >= max_rate &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) tunnel->allocated_down >= max_rate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) /* Don't go lower than what is already allocated */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) allocate_up = min(max_rate, *available_up);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) if (allocate_up < tunnel->allocated_up)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) allocate_up = tunnel->allocated_up;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) allocate_down = min(max_rate, *available_down);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) if (allocate_down < tunnel->allocated_down)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) allocate_down = tunnel->allocated_down;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) /* If no changes no need to do more */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) if (allocate_up == tunnel->allocated_up &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) allocate_down == tunnel->allocated_down)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) ret = usb4_usb3_port_allocate_bandwidth(tunnel->src_port, &allocate_up,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) &allocate_down);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) tb_tunnel_info(tunnel, "failed to allocate bandwidth\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) tunnel->allocated_up = allocate_up;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) *available_up -= tunnel->allocated_up;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) tunnel->allocated_down = allocate_down;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) *available_down -= tunnel->allocated_down;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) tb_tunnel_dbg(tunnel, "increased bandwidth allocation to %d/%d Mb/s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) tunnel->allocated_up, tunnel->allocated_down);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) static void tb_usb3_init_path(struct tb_path *path)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) path->egress_fc_enable = TB_PATH_SOURCE | TB_PATH_INTERNAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) path->egress_shared_buffer = TB_PATH_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) path->ingress_fc_enable = TB_PATH_ALL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) path->ingress_shared_buffer = TB_PATH_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) path->priority = 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) path->weight = 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) path->drop_packages = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) path->nfc_credits = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) path->hops[0].initial_credits = 7;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) if (path->path_length > 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) path->hops[1].initial_credits =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) tb_initial_credits(path->hops[1].in_port->sw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) * tb_tunnel_discover_usb3() - Discover existing USB3 tunnels
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) * @tb: Pointer to the domain structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) * @down: USB3 downstream adapter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) * If @down adapter is active, follows the tunnel to the USB3 upstream
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) * adapter and back. Returns the discovered tunnel or %NULL if there was
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) * no tunnel.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) struct tb_tunnel *tb_tunnel_discover_usb3(struct tb *tb, struct tb_port *down)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) struct tb_tunnel *tunnel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) struct tb_path *path;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) if (!tb_usb3_port_is_enabled(down))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) tunnel = tb_tunnel_alloc(tb, 2, TB_TUNNEL_USB3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) if (!tunnel)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) tunnel->activate = tb_usb3_activate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) tunnel->src_port = down;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) * Discover both paths even if they are not complete. We will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) * clean them up by calling tb_tunnel_deactivate() below in that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) * case.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) path = tb_path_discover(down, TB_USB3_HOPID, NULL, -1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) &tunnel->dst_port, "USB3 Down");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) if (!path) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) /* Just disable the downstream port */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) tb_usb3_port_enable(down, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) goto err_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) tunnel->paths[TB_USB3_PATH_DOWN] = path;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) tb_usb3_init_path(tunnel->paths[TB_USB3_PATH_DOWN]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) path = tb_path_discover(tunnel->dst_port, -1, down, TB_USB3_HOPID, NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) "USB3 Up");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) if (!path)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) goto err_deactivate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) tunnel->paths[TB_USB3_PATH_UP] = path;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) tb_usb3_init_path(tunnel->paths[TB_USB3_PATH_UP]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) /* Validate that the tunnel is complete */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) if (!tb_port_is_usb3_up(tunnel->dst_port)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) tb_port_warn(tunnel->dst_port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) "path does not end on an USB3 adapter, cleaning up\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) goto err_deactivate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) if (down != tunnel->src_port) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) tb_tunnel_warn(tunnel, "path is not complete, cleaning up\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) goto err_deactivate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) if (!tb_usb3_port_is_enabled(tunnel->dst_port)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) tb_tunnel_warn(tunnel,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) "tunnel is not fully activated, cleaning up\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) goto err_deactivate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) if (!tb_route(down->sw)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) * Read the initial bandwidth allocation for the first
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) * hop tunnel.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) ret = usb4_usb3_port_allocated_bandwidth(down,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) &tunnel->allocated_up, &tunnel->allocated_down);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) goto err_deactivate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) tb_tunnel_dbg(tunnel, "currently allocated bandwidth %d/%d Mb/s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) tunnel->allocated_up, tunnel->allocated_down);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) tunnel->init = tb_usb3_init;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) tunnel->consumed_bandwidth = tb_usb3_consumed_bandwidth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) tunnel->release_unused_bandwidth =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) tb_usb3_release_unused_bandwidth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) tunnel->reclaim_available_bandwidth =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) tb_usb3_reclaim_available_bandwidth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) tb_tunnel_dbg(tunnel, "discovered\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) return tunnel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) err_deactivate:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) tb_tunnel_deactivate(tunnel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) err_free:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) tb_tunnel_free(tunnel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) * tb_tunnel_alloc_usb3() - allocate a USB3 tunnel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) * @tb: Pointer to the domain structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) * @up: USB3 upstream adapter port
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) * @down: USB3 downstream adapter port
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) * @max_up: Maximum available upstream bandwidth for the USB3 tunnel (%0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) * if not limited).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) * @max_down: Maximum available downstream bandwidth for the USB3 tunnel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) * (%0 if not limited).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) * Allocate an USB3 tunnel. The ports must be of type @TB_TYPE_USB3_UP and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) * @TB_TYPE_USB3_DOWN.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) * Return: Returns a tb_tunnel on success or %NULL on failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) struct tb_tunnel *tb_tunnel_alloc_usb3(struct tb *tb, struct tb_port *up,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) struct tb_port *down, int max_up,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) int max_down)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) struct tb_tunnel *tunnel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) struct tb_path *path;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) int max_rate = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) * Check that we have enough bandwidth available for the new
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) * USB3 tunnel.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) if (max_up > 0 || max_down > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) max_rate = tb_usb3_max_link_rate(down, up);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) if (max_rate < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) /* Only 90% can be allocated for USB3 isochronous transfers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) max_rate = max_rate * 90 / 100;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) tb_port_dbg(up, "required bandwidth for USB3 tunnel %d Mb/s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) max_rate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) if (max_rate > max_up || max_rate > max_down) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) tb_port_warn(up, "not enough bandwidth for USB3 tunnel\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) tunnel = tb_tunnel_alloc(tb, 2, TB_TUNNEL_USB3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) if (!tunnel)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) tunnel->activate = tb_usb3_activate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) tunnel->src_port = down;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) tunnel->dst_port = up;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) tunnel->max_up = max_up;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) tunnel->max_down = max_down;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) path = tb_path_alloc(tb, down, TB_USB3_HOPID, up, TB_USB3_HOPID, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) "USB3 Down");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) if (!path) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) tb_tunnel_free(tunnel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) tb_usb3_init_path(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) tunnel->paths[TB_USB3_PATH_DOWN] = path;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) path = tb_path_alloc(tb, up, TB_USB3_HOPID, down, TB_USB3_HOPID, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) "USB3 Up");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) if (!path) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) tb_tunnel_free(tunnel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) tb_usb3_init_path(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) tunnel->paths[TB_USB3_PATH_UP] = path;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) if (!tb_route(down->sw)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) tunnel->allocated_up = max_rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) tunnel->allocated_down = max_rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) tunnel->init = tb_usb3_init;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) tunnel->consumed_bandwidth = tb_usb3_consumed_bandwidth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) tunnel->release_unused_bandwidth =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) tb_usb3_release_unused_bandwidth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) tunnel->reclaim_available_bandwidth =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) tb_usb3_reclaim_available_bandwidth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) return tunnel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) * tb_tunnel_free() - free a tunnel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) * @tunnel: Tunnel to be freed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) * Frees a tunnel. The tunnel does not need to be deactivated.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) void tb_tunnel_free(struct tb_tunnel *tunnel)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) if (!tunnel)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) for (i = 0; i < tunnel->npaths; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) if (tunnel->paths[i])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) tb_path_free(tunnel->paths[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) kfree(tunnel->paths);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) kfree(tunnel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) * tb_tunnel_is_invalid - check whether an activated path is still valid
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) * @tunnel: Tunnel to check
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) bool tb_tunnel_is_invalid(struct tb_tunnel *tunnel)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) for (i = 0; i < tunnel->npaths; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) WARN_ON(!tunnel->paths[i]->activated);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) if (tb_path_is_invalid(tunnel->paths[i]))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) * tb_tunnel_restart() - activate a tunnel after a hardware reset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) * @tunnel: Tunnel to restart
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) * Return: 0 on success and negative errno in case if failure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) int tb_tunnel_restart(struct tb_tunnel *tunnel)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) int res, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) tb_tunnel_dbg(tunnel, "activating\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) * Make sure all paths are properly disabled before enabling
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) * them again.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) for (i = 0; i < tunnel->npaths; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) if (tunnel->paths[i]->activated) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) tb_path_deactivate(tunnel->paths[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) tunnel->paths[i]->activated = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) if (tunnel->init) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) res = tunnel->init(tunnel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) if (res)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) return res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) for (i = 0; i < tunnel->npaths; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) res = tb_path_activate(tunnel->paths[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) if (res)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) if (tunnel->activate) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) res = tunnel->activate(tunnel, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) if (res)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) tb_tunnel_warn(tunnel, "activation failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) tb_tunnel_deactivate(tunnel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) return res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) * tb_tunnel_activate() - activate a tunnel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) * @tunnel: Tunnel to activate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) * Return: Returns 0 on success or an error code on failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) int tb_tunnel_activate(struct tb_tunnel *tunnel)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) for (i = 0; i < tunnel->npaths; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) if (tunnel->paths[i]->activated) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) tb_tunnel_WARN(tunnel,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) "trying to activate an already activated tunnel\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) return tb_tunnel_restart(tunnel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) * tb_tunnel_deactivate() - deactivate a tunnel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) * @tunnel: Tunnel to deactivate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) void tb_tunnel_deactivate(struct tb_tunnel *tunnel)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) tb_tunnel_dbg(tunnel, "deactivating\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) if (tunnel->activate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) tunnel->activate(tunnel, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) for (i = 0; i < tunnel->npaths; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) if (tunnel->paths[i] && tunnel->paths[i]->activated)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) tb_path_deactivate(tunnel->paths[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) * tb_tunnel_port_on_path() - Does the tunnel go through port
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) * @tunnel: Tunnel to check
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) * @port: Port to check
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) * Returns true if @tunnel goes through @port (direction does not matter),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) * false otherwise.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) bool tb_tunnel_port_on_path(const struct tb_tunnel *tunnel,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) const struct tb_port *port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) for (i = 0; i < tunnel->npaths; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) if (!tunnel->paths[i])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) if (tb_path_port_on_path(tunnel->paths[i], port))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) static bool tb_tunnel_is_active(const struct tb_tunnel *tunnel)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) for (i = 0; i < tunnel->npaths; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) if (!tunnel->paths[i])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) if (!tunnel->paths[i]->activated)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) * tb_tunnel_consumed_bandwidth() - Return bandwidth consumed by the tunnel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) * @tunnel: Tunnel to check
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) * @consumed_up: Consumed bandwidth in Mb/s from @dst_port to @src_port.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) * Can be %NULL.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) * @consumed_down: Consumed bandwidth in Mb/s from @src_port to @dst_port.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) * Can be %NULL.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) * Stores the amount of isochronous bandwidth @tunnel consumes in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) * @consumed_up and @consumed_down. In case of success returns %0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) * negative errno otherwise.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) int tb_tunnel_consumed_bandwidth(struct tb_tunnel *tunnel, int *consumed_up,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) int *consumed_down)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) int up_bw = 0, down_bw = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) if (!tb_tunnel_is_active(tunnel))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) if (tunnel->consumed_bandwidth) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) ret = tunnel->consumed_bandwidth(tunnel, &up_bw, &down_bw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) tb_tunnel_dbg(tunnel, "consumed bandwidth %d/%d Mb/s\n", up_bw,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) down_bw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) if (consumed_up)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) *consumed_up = up_bw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) if (consumed_down)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) *consumed_down = down_bw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) * tb_tunnel_release_unused_bandwidth() - Release unused bandwidth
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) * @tunnel: Tunnel whose unused bandwidth to release
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) * If tunnel supports dynamic bandwidth management (USB3 tunnels at the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) * moment) this function makes it to release all the unused bandwidth.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) * Returns %0 in case of success and negative errno otherwise.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) int tb_tunnel_release_unused_bandwidth(struct tb_tunnel *tunnel)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) if (!tb_tunnel_is_active(tunnel))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) if (tunnel->release_unused_bandwidth) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) ret = tunnel->release_unused_bandwidth(tunnel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) * tb_tunnel_reclaim_available_bandwidth() - Reclaim available bandwidth
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) * @tunnel: Tunnel reclaiming available bandwidth
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) * @available_up: Available upstream bandwidth (in Mb/s)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) * @available_down: Available downstream bandwidth (in Mb/s)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) * Reclaims bandwidth from @available_up and @available_down and updates
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) * the variables accordingly (e.g decreases both according to what was
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) * reclaimed by the tunnel). If nothing was reclaimed the values are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) * kept as is.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) void tb_tunnel_reclaim_available_bandwidth(struct tb_tunnel *tunnel,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) int *available_up,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) int *available_down)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) if (!tb_tunnel_is_active(tunnel))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) if (tunnel->reclaim_available_bandwidth)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) tunnel->reclaim_available_bandwidth(tunnel, available_up,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) available_down);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) }