^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Internal Thunderbolt Connection Manager. This is a firmware running on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * the Thunderbolt host controller performing most of the low-level
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * handling.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * Copyright (C) 2017, Intel Corporation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * Authors: Michael Jamet <michael.jamet@intel.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * Mika Westerberg <mika.westerberg@linux.intel.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/mutex.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/moduleparam.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/pci.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/pm_runtime.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/platform_data/x86/apple.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/sizes.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/workqueue.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include "ctl.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include "nhi_regs.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include "tb.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #define PCIE2CIO_CMD 0x30
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #define PCIE2CIO_CMD_TIMEOUT BIT(31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #define PCIE2CIO_CMD_START BIT(30)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #define PCIE2CIO_CMD_WRITE BIT(21)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #define PCIE2CIO_CMD_CS_MASK GENMASK(20, 19)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #define PCIE2CIO_CMD_CS_SHIFT 19
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #define PCIE2CIO_CMD_PORT_MASK GENMASK(18, 13)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #define PCIE2CIO_CMD_PORT_SHIFT 13
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #define PCIE2CIO_WRDATA 0x34
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #define PCIE2CIO_RDDATA 0x38
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #define PHY_PORT_CS1 0x37
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) #define PHY_PORT_CS1_LINK_DISABLE BIT(14)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) #define PHY_PORT_CS1_LINK_STATE_MASK GENMASK(29, 26)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) #define PHY_PORT_CS1_LINK_STATE_SHIFT 26
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) #define ICM_TIMEOUT 5000 /* ms */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) #define ICM_APPROVE_TIMEOUT 10000 /* ms */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) #define ICM_MAX_LINK 4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) static bool start_icm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) module_param(start_icm, bool, 0444);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) MODULE_PARM_DESC(start_icm, "start ICM firmware if it is not running (default: false)");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) * struct icm - Internal connection manager private data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) * @request_lock: Makes sure only one message is send to ICM at time
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) * @rescan_work: Work used to rescan the surviving switches after resume
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) * @upstream_port: Pointer to the PCIe upstream port this host
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) * controller is connected. This is only set for systems
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) * where ICM needs to be started manually
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) * @vnd_cap: Vendor defined capability where PCIe2CIO mailbox resides
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) * (only set when @upstream_port is not %NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) * @safe_mode: ICM is in safe mode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) * @max_boot_acl: Maximum number of preboot ACL entries (%0 if not supported)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) * @rpm: Does the controller support runtime PM (RTD3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) * @can_upgrade_nvm: Can the NVM firmware be upgrade on this controller
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) * @veto: Is RTD3 veto in effect
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) * @is_supported: Checks if we can support ICM on this controller
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) * @cio_reset: Trigger CIO reset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) * @get_mode: Read and return the ICM firmware mode (optional)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) * @get_route: Find a route string for given switch
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) * @save_devices: Ask ICM to save devices to ACL when suspending (optional)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) * @driver_ready: Send driver ready message to ICM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) * @set_uuid: Set UUID for the root switch (optional)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) * @device_connected: Handle device connected ICM message
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) * @device_disconnected: Handle device disconnected ICM message
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) * @xdomain_connected - Handle XDomain connected ICM message
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) * @xdomain_disconnected - Handle XDomain disconnected ICM message
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) * @rtd3_veto: Handle RTD3 veto notification ICM message
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) struct icm {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) struct mutex request_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) struct delayed_work rescan_work;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) struct pci_dev *upstream_port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) size_t max_boot_acl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) int vnd_cap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) bool safe_mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) bool rpm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) bool can_upgrade_nvm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) bool veto;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) bool (*is_supported)(struct tb *tb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) int (*cio_reset)(struct tb *tb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) int (*get_mode)(struct tb *tb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) int (*get_route)(struct tb *tb, u8 link, u8 depth, u64 *route);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) void (*save_devices)(struct tb *tb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) int (*driver_ready)(struct tb *tb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) enum tb_security_level *security_level,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) size_t *nboot_acl, bool *rpm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) void (*set_uuid)(struct tb *tb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) void (*device_connected)(struct tb *tb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) const struct icm_pkg_header *hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) void (*device_disconnected)(struct tb *tb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) const struct icm_pkg_header *hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) void (*xdomain_connected)(struct tb *tb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) const struct icm_pkg_header *hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) void (*xdomain_disconnected)(struct tb *tb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) const struct icm_pkg_header *hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) void (*rtd3_veto)(struct tb *tb, const struct icm_pkg_header *hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) struct icm_notification {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) struct work_struct work;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) struct icm_pkg_header *pkg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) struct tb *tb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) struct ep_name_entry {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) u8 len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) u8 type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) u8 data[];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) #define EP_NAME_INTEL_VSS 0x10
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) /* Intel Vendor specific structure */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) struct intel_vss {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) u16 vendor;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) u16 model;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) u8 mc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) u8 flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) u16 pci_devid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) u32 nvm_version;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) #define INTEL_VSS_FLAGS_RTD3 BIT(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) static const struct intel_vss *parse_intel_vss(const void *ep_name, size_t size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) const void *end = ep_name + size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) while (ep_name < end) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) const struct ep_name_entry *ep = ep_name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) if (!ep->len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) if (ep_name + ep->len > end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) if (ep->type == EP_NAME_INTEL_VSS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) return (const struct intel_vss *)ep->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) ep_name += ep->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) static bool intel_vss_is_rtd3(const void *ep_name, size_t size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) const struct intel_vss *vss;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) vss = parse_intel_vss(ep_name, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) if (vss)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) return !!(vss->flags & INTEL_VSS_FLAGS_RTD3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) static inline struct tb *icm_to_tb(struct icm *icm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) return ((void *)icm - sizeof(struct tb));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) static inline u8 phy_port_from_route(u64 route, u8 depth)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) u8 link;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) link = depth ? route >> ((depth - 1) * 8) : route;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) return tb_phy_port_from_link(link);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) static inline u8 dual_link_from_link(u8 link)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) return link ? ((link - 1) ^ 0x01) + 1 : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) static inline u64 get_route(u32 route_hi, u32 route_lo)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) return (u64)route_hi << 32 | route_lo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) static inline u64 get_parent_route(u64 route)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) int depth = tb_route_length(route);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) return depth ? route & ~(0xffULL << (depth - 1) * TB_ROUTE_SHIFT) : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) static int pci2cio_wait_completion(struct icm *icm, unsigned long timeout_msec)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) unsigned long end = jiffies + msecs_to_jiffies(timeout_msec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) u32 cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) pci_read_config_dword(icm->upstream_port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) icm->vnd_cap + PCIE2CIO_CMD, &cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) if (!(cmd & PCIE2CIO_CMD_START)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) if (cmd & PCIE2CIO_CMD_TIMEOUT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) msleep(50);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) } while (time_before(jiffies, end));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) return -ETIMEDOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) static int pcie2cio_read(struct icm *icm, enum tb_cfg_space cs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) unsigned int port, unsigned int index, u32 *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) struct pci_dev *pdev = icm->upstream_port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) int ret, vnd_cap = icm->vnd_cap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) u32 cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) cmd = index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) cmd |= (port << PCIE2CIO_CMD_PORT_SHIFT) & PCIE2CIO_CMD_PORT_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) cmd |= (cs << PCIE2CIO_CMD_CS_SHIFT) & PCIE2CIO_CMD_CS_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) cmd |= PCIE2CIO_CMD_START;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) pci_write_config_dword(pdev, vnd_cap + PCIE2CIO_CMD, cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) ret = pci2cio_wait_completion(icm, 5000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) pci_read_config_dword(pdev, vnd_cap + PCIE2CIO_RDDATA, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) static int pcie2cio_write(struct icm *icm, enum tb_cfg_space cs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) unsigned int port, unsigned int index, u32 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) struct pci_dev *pdev = icm->upstream_port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) int vnd_cap = icm->vnd_cap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) u32 cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) pci_write_config_dword(pdev, vnd_cap + PCIE2CIO_WRDATA, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) cmd = index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) cmd |= (port << PCIE2CIO_CMD_PORT_SHIFT) & PCIE2CIO_CMD_PORT_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) cmd |= (cs << PCIE2CIO_CMD_CS_SHIFT) & PCIE2CIO_CMD_CS_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) cmd |= PCIE2CIO_CMD_WRITE | PCIE2CIO_CMD_START;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) pci_write_config_dword(pdev, vnd_cap + PCIE2CIO_CMD, cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) return pci2cio_wait_completion(icm, 5000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) static bool icm_match(const struct tb_cfg_request *req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) const struct ctl_pkg *pkg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) const struct icm_pkg_header *res_hdr = pkg->buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) const struct icm_pkg_header *req_hdr = req->request;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) if (pkg->frame.eof != req->response_type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) if (res_hdr->code != req_hdr->code)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) static bool icm_copy(struct tb_cfg_request *req, const struct ctl_pkg *pkg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) const struct icm_pkg_header *hdr = pkg->buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) if (hdr->packet_id < req->npackets) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) size_t offset = hdr->packet_id * req->response_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) memcpy(req->response + offset, pkg->buffer, req->response_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) return hdr->packet_id == hdr->total_packets - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) static int icm_request(struct tb *tb, const void *request, size_t request_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) void *response, size_t response_size, size_t npackets,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) unsigned int timeout_msec)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) struct icm *icm = tb_priv(tb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) int retries = 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) struct tb_cfg_request *req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) struct tb_cfg_result res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) req = tb_cfg_request_alloc();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) if (!req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) req->match = icm_match;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) req->copy = icm_copy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) req->request = request;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) req->request_size = request_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) req->request_type = TB_CFG_PKG_ICM_CMD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) req->response = response;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) req->npackets = npackets;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) req->response_size = response_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) req->response_type = TB_CFG_PKG_ICM_RESP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) mutex_lock(&icm->request_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) res = tb_cfg_request_sync(tb->ctl, req, timeout_msec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) mutex_unlock(&icm->request_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) tb_cfg_request_put(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) if (res.err != -ETIMEDOUT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) return res.err == 1 ? -EIO : res.err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) usleep_range(20, 50);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) } while (retries--);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) return -ETIMEDOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) * If rescan is queued to run (we are resuming), postpone it to give the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) * firmware some more time to send device connected notifications for next
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) * devices in the chain.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) static void icm_postpone_rescan(struct tb *tb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) struct icm *icm = tb_priv(tb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) if (delayed_work_pending(&icm->rescan_work))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) mod_delayed_work(tb->wq, &icm->rescan_work,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) msecs_to_jiffies(500));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) static void icm_veto_begin(struct tb *tb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) struct icm *icm = tb_priv(tb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) if (!icm->veto) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) icm->veto = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) /* Keep the domain powered while veto is in effect */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) pm_runtime_get(&tb->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) static void icm_veto_end(struct tb *tb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) struct icm *icm = tb_priv(tb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) if (icm->veto) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) icm->veto = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) /* Allow the domain suspend now */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) pm_runtime_mark_last_busy(&tb->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) pm_runtime_put_autosuspend(&tb->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) static bool icm_firmware_running(const struct tb_nhi *nhi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) val = ioread32(nhi->iobase + REG_FW_STS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) return !!(val & REG_FW_STS_ICM_EN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) static bool icm_fr_is_supported(struct tb *tb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) return !x86_apple_machine;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) static inline int icm_fr_get_switch_index(u32 port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) int index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) if ((port & ICM_PORT_TYPE_MASK) != TB_TYPE_PORT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) index = port >> ICM_PORT_INDEX_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) return index != 0xff ? index : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) static int icm_fr_get_route(struct tb *tb, u8 link, u8 depth, u64 *route)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) struct icm_fr_pkg_get_topology_response *switches, *sw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) struct icm_fr_pkg_get_topology request = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) .hdr = { .code = ICM_GET_TOPOLOGY },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) size_t npackets = ICM_GET_TOPOLOGY_PACKETS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) int ret, index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) u8 i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) switches = kcalloc(npackets, sizeof(*switches), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) if (!switches)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) ret = icm_request(tb, &request, sizeof(request), switches,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) sizeof(*switches), npackets, ICM_TIMEOUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) goto err_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) sw = &switches[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) index = icm_fr_get_switch_index(sw->ports[link]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) if (!index) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) ret = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) goto err_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) sw = &switches[index];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) for (i = 1; i < depth; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) unsigned int j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) if (!(sw->first_data & ICM_SWITCH_USED)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) ret = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) goto err_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) for (j = 0; j < ARRAY_SIZE(sw->ports); j++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) index = icm_fr_get_switch_index(sw->ports[j]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) if (index > sw->switch_index) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) sw = &switches[index];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) *route = get_route(sw->route_hi, sw->route_lo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) err_free:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) kfree(switches);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) static void icm_fr_save_devices(struct tb *tb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) nhi_mailbox_cmd(tb->nhi, NHI_MAILBOX_SAVE_DEVS, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) icm_fr_driver_ready(struct tb *tb, enum tb_security_level *security_level,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) size_t *nboot_acl, bool *rpm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) struct icm_fr_pkg_driver_ready_response reply;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) struct icm_pkg_driver_ready request = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) .hdr.code = ICM_DRIVER_READY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) memset(&reply, 0, sizeof(reply));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) 1, ICM_TIMEOUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) if (security_level)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) *security_level = reply.security_level & ICM_FR_SLEVEL_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) static int icm_fr_approve_switch(struct tb *tb, struct tb_switch *sw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) struct icm_fr_pkg_approve_device request;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) struct icm_fr_pkg_approve_device reply;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) memset(&request, 0, sizeof(request));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) memcpy(&request.ep_uuid, sw->uuid, sizeof(request.ep_uuid));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) request.hdr.code = ICM_APPROVE_DEVICE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) request.connection_id = sw->connection_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) request.connection_key = sw->connection_key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) memset(&reply, 0, sizeof(reply));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) /* Use larger timeout as establishing tunnels can take some time */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) 1, ICM_APPROVE_TIMEOUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) if (reply.hdr.flags & ICM_FLAGS_ERROR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) tb_warn(tb, "PCIe tunnel creation failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) static int icm_fr_add_switch_key(struct tb *tb, struct tb_switch *sw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) struct icm_fr_pkg_add_device_key request;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) struct icm_fr_pkg_add_device_key_response reply;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) memset(&request, 0, sizeof(request));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) memcpy(&request.ep_uuid, sw->uuid, sizeof(request.ep_uuid));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) request.hdr.code = ICM_ADD_DEVICE_KEY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) request.connection_id = sw->connection_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) request.connection_key = sw->connection_key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) memcpy(request.key, sw->key, TB_SWITCH_KEY_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) memset(&reply, 0, sizeof(reply));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) 1, ICM_TIMEOUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) if (reply.hdr.flags & ICM_FLAGS_ERROR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) tb_warn(tb, "Adding key to switch failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) static int icm_fr_challenge_switch_key(struct tb *tb, struct tb_switch *sw,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) const u8 *challenge, u8 *response)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) struct icm_fr_pkg_challenge_device request;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) struct icm_fr_pkg_challenge_device_response reply;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) memset(&request, 0, sizeof(request));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) memcpy(&request.ep_uuid, sw->uuid, sizeof(request.ep_uuid));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) request.hdr.code = ICM_CHALLENGE_DEVICE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) request.connection_id = sw->connection_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) request.connection_key = sw->connection_key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) memcpy(request.challenge, challenge, TB_SWITCH_KEY_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) memset(&reply, 0, sizeof(reply));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) 1, ICM_TIMEOUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) if (reply.hdr.flags & ICM_FLAGS_ERROR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) return -EKEYREJECTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) if (reply.hdr.flags & ICM_FLAGS_NO_KEY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) return -ENOKEY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) memcpy(response, reply.response, TB_SWITCH_KEY_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) static int icm_fr_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) struct icm_fr_pkg_approve_xdomain_response reply;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) struct icm_fr_pkg_approve_xdomain request;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) memset(&request, 0, sizeof(request));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) request.hdr.code = ICM_APPROVE_XDOMAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) request.link_info = xd->depth << ICM_LINK_INFO_DEPTH_SHIFT | xd->link;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) memcpy(&request.remote_uuid, xd->remote_uuid, sizeof(*xd->remote_uuid));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) request.transmit_path = xd->transmit_path;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) request.transmit_ring = xd->transmit_ring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) request.receive_path = xd->receive_path;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) request.receive_ring = xd->receive_ring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) memset(&reply, 0, sizeof(reply));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) 1, ICM_TIMEOUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) if (reply.hdr.flags & ICM_FLAGS_ERROR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) static int icm_fr_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) u8 phy_port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) u8 cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) phy_port = tb_phy_port_from_link(xd->link);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) if (phy_port == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) cmd = NHI_MAILBOX_DISCONNECT_PA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) cmd = NHI_MAILBOX_DISCONNECT_PB;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) nhi_mailbox_cmd(tb->nhi, cmd, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) usleep_range(10, 50);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) nhi_mailbox_cmd(tb->nhi, cmd, 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) static struct tb_switch *alloc_switch(struct tb_switch *parent_sw, u64 route,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) const uuid_t *uuid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) struct tb *tb = parent_sw->tb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) struct tb_switch *sw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) sw = tb_switch_alloc(tb, &parent_sw->dev, route);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) if (IS_ERR(sw)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) tb_warn(tb, "failed to allocate switch at %llx\n", route);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) return sw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) sw->uuid = kmemdup(uuid, sizeof(*uuid), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) if (!sw->uuid) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) tb_switch_put(sw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) init_completion(&sw->rpm_complete);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) return sw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) static int add_switch(struct tb_switch *parent_sw, struct tb_switch *sw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) u64 route = tb_route(sw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) /* Link the two switches now */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) tb_port_at(route, parent_sw)->remote = tb_upstream_port(sw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) tb_upstream_port(sw)->remote = tb_port_at(route, parent_sw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) ret = tb_switch_add(sw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) tb_port_at(tb_route(sw), parent_sw)->remote = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) static void update_switch(struct tb_switch *parent_sw, struct tb_switch *sw,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) u64 route, u8 connection_id, u8 connection_key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) u8 link, u8 depth, bool boot)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) /* Disconnect from parent */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) tb_port_at(tb_route(sw), parent_sw)->remote = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) /* Re-connect via updated port*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) tb_port_at(route, parent_sw)->remote = tb_upstream_port(sw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) /* Update with the new addressing information */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) sw->config.route_hi = upper_32_bits(route);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) sw->config.route_lo = lower_32_bits(route);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) sw->connection_id = connection_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) sw->connection_key = connection_key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) sw->link = link;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) sw->depth = depth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) sw->boot = boot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) /* This switch still exists */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) sw->is_unplugged = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) /* Runtime resume is now complete */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) complete(&sw->rpm_complete);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) static void remove_switch(struct tb_switch *sw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) struct tb_switch *parent_sw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) parent_sw = tb_to_switch(sw->dev.parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) tb_port_at(tb_route(sw), parent_sw)->remote = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) tb_switch_remove(sw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) static void add_xdomain(struct tb_switch *sw, u64 route,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) const uuid_t *local_uuid, const uuid_t *remote_uuid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) u8 link, u8 depth)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) struct tb_xdomain *xd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) pm_runtime_get_sync(&sw->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) xd = tb_xdomain_alloc(sw->tb, &sw->dev, route, local_uuid, remote_uuid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) if (!xd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) xd->link = link;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) xd->depth = depth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) tb_port_at(route, sw)->xdomain = xd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) tb_xdomain_add(xd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) pm_runtime_mark_last_busy(&sw->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) pm_runtime_put_autosuspend(&sw->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) static void update_xdomain(struct tb_xdomain *xd, u64 route, u8 link)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) xd->link = link;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) xd->route = route;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) xd->is_unplugged = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) static void remove_xdomain(struct tb_xdomain *xd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) struct tb_switch *sw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) sw = tb_to_switch(xd->dev.parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) tb_port_at(xd->route, sw)->xdomain = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) tb_xdomain_remove(xd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) icm_fr_device_connected(struct tb *tb, const struct icm_pkg_header *hdr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) const struct icm_fr_event_device_connected *pkg =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) (const struct icm_fr_event_device_connected *)hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) enum tb_security_level security_level;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) struct tb_switch *sw, *parent_sw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) bool boot, dual_lane, speed_gen3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) struct icm *icm = tb_priv(tb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) bool authorized = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) struct tb_xdomain *xd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) u8 link, depth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) u64 route;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) icm_postpone_rescan(tb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) link = pkg->link_info & ICM_LINK_INFO_LINK_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) depth = (pkg->link_info & ICM_LINK_INFO_DEPTH_MASK) >>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) ICM_LINK_INFO_DEPTH_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) authorized = pkg->link_info & ICM_LINK_INFO_APPROVED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) security_level = (pkg->hdr.flags & ICM_FLAGS_SLEVEL_MASK) >>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) ICM_FLAGS_SLEVEL_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) boot = pkg->link_info & ICM_LINK_INFO_BOOT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) dual_lane = pkg->hdr.flags & ICM_FLAGS_DUAL_LANE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) speed_gen3 = pkg->hdr.flags & ICM_FLAGS_SPEED_GEN3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) if (pkg->link_info & ICM_LINK_INFO_REJECTED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) tb_info(tb, "switch at %u.%u was rejected by ICM firmware because topology limit exceeded\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) link, depth);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) sw = tb_switch_find_by_uuid(tb, &pkg->ep_uuid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) if (sw) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) u8 phy_port, sw_phy_port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) parent_sw = tb_to_switch(sw->dev.parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) sw_phy_port = tb_phy_port_from_link(sw->link);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) phy_port = tb_phy_port_from_link(link);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) * On resume ICM will send us connected events for the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) * devices that still are present. However, that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) * information might have changed for example by the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) * fact that a switch on a dual-link connection might
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) * have been enumerated using the other link now. Make
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) * sure our book keeping matches that.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) if (sw->depth == depth && sw_phy_port == phy_port &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) !!sw->authorized == authorized) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) * It was enumerated through another link so update
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) * route string accordingly.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) if (sw->link != link) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) ret = icm->get_route(tb, link, depth, &route);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) tb_err(tb, "failed to update route string for switch at %u.%u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) link, depth);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) tb_switch_put(sw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) route = tb_route(sw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) update_switch(parent_sw, sw, route, pkg->connection_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) pkg->connection_key, link, depth, boot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) tb_switch_put(sw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) * User connected the same switch to another physical
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) * port or to another part of the topology. Remove the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) * existing switch now before adding the new one.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) remove_switch(sw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) tb_switch_put(sw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) * If the switch was not found by UUID, look for a switch on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) * same physical port (taking possible link aggregation into
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) * account) and depth. If we found one it is definitely a stale
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) * one so remove it first.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) sw = tb_switch_find_by_link_depth(tb, link, depth);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) if (!sw) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) u8 dual_link;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) dual_link = dual_link_from_link(link);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) if (dual_link)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) sw = tb_switch_find_by_link_depth(tb, dual_link, depth);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) if (sw) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) remove_switch(sw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) tb_switch_put(sw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) /* Remove existing XDomain connection if found */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) xd = tb_xdomain_find_by_link_depth(tb, link, depth);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) if (xd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) remove_xdomain(xd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) tb_xdomain_put(xd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) parent_sw = tb_switch_find_by_link_depth(tb, link, depth - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) if (!parent_sw) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) tb_err(tb, "failed to find parent switch for %u.%u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) link, depth);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) ret = icm->get_route(tb, link, depth, &route);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) tb_err(tb, "failed to find route string for switch at %u.%u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) link, depth);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) tb_switch_put(parent_sw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) pm_runtime_get_sync(&parent_sw->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) sw = alloc_switch(parent_sw, route, &pkg->ep_uuid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) if (!IS_ERR(sw)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) sw->connection_id = pkg->connection_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) sw->connection_key = pkg->connection_key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) sw->link = link;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) sw->depth = depth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) sw->authorized = authorized;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) sw->security_level = security_level;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) sw->boot = boot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) sw->link_speed = speed_gen3 ? 20 : 10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) sw->link_width = dual_lane ? 2 : 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) sw->rpm = intel_vss_is_rtd3(pkg->ep_name, sizeof(pkg->ep_name));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) if (add_switch(parent_sw, sw))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) tb_switch_put(sw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) pm_runtime_mark_last_busy(&parent_sw->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) pm_runtime_put_autosuspend(&parent_sw->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) tb_switch_put(parent_sw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) icm_fr_device_disconnected(struct tb *tb, const struct icm_pkg_header *hdr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) const struct icm_fr_event_device_disconnected *pkg =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) (const struct icm_fr_event_device_disconnected *)hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) struct tb_switch *sw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) u8 link, depth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) link = pkg->link_info & ICM_LINK_INFO_LINK_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) depth = (pkg->link_info & ICM_LINK_INFO_DEPTH_MASK) >>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) ICM_LINK_INFO_DEPTH_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) if (link > ICM_MAX_LINK || depth > TB_SWITCH_MAX_DEPTH) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) tb_warn(tb, "invalid topology %u.%u, ignoring\n", link, depth);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) sw = tb_switch_find_by_link_depth(tb, link, depth);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) if (!sw) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) tb_warn(tb, "no switch exists at %u.%u, ignoring\n", link,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) depth);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) remove_switch(sw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) tb_switch_put(sw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) icm_fr_xdomain_connected(struct tb *tb, const struct icm_pkg_header *hdr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) const struct icm_fr_event_xdomain_connected *pkg =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) (const struct icm_fr_event_xdomain_connected *)hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) struct tb_xdomain *xd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) struct tb_switch *sw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) u8 link, depth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) u64 route;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) link = pkg->link_info & ICM_LINK_INFO_LINK_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) depth = (pkg->link_info & ICM_LINK_INFO_DEPTH_MASK) >>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) ICM_LINK_INFO_DEPTH_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) if (link > ICM_MAX_LINK || depth > TB_SWITCH_MAX_DEPTH) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) tb_warn(tb, "invalid topology %u.%u, ignoring\n", link, depth);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) route = get_route(pkg->local_route_hi, pkg->local_route_lo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) xd = tb_xdomain_find_by_uuid(tb, &pkg->remote_uuid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) if (xd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) u8 xd_phy_port, phy_port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) xd_phy_port = phy_port_from_route(xd->route, xd->depth);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) phy_port = phy_port_from_route(route, depth);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) if (xd->depth == depth && xd_phy_port == phy_port) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) update_xdomain(xd, route, link);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) tb_xdomain_put(xd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) * If we find an existing XDomain connection remove it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) * now. We need to go through login handshake and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) * everything anyway to be able to re-establish the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) * connection.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) remove_xdomain(xd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) tb_xdomain_put(xd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) * Look if there already exists an XDomain in the same place
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) * than the new one and in that case remove it because it is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) * most likely another host that got disconnected.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) xd = tb_xdomain_find_by_link_depth(tb, link, depth);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) if (!xd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) u8 dual_link;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) dual_link = dual_link_from_link(link);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) if (dual_link)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) xd = tb_xdomain_find_by_link_depth(tb, dual_link,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) depth);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) if (xd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) remove_xdomain(xd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) tb_xdomain_put(xd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) * If the user disconnected a switch during suspend and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) * connected another host to the same port, remove the switch
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) * first.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) sw = tb_switch_find_by_route(tb, route);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) if (sw) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) remove_switch(sw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) tb_switch_put(sw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) sw = tb_switch_find_by_link_depth(tb, link, depth);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) if (!sw) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) tb_warn(tb, "no switch exists at %u.%u, ignoring\n", link,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) depth);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) add_xdomain(sw, route, &pkg->local_uuid, &pkg->remote_uuid, link,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) depth);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) tb_switch_put(sw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) icm_fr_xdomain_disconnected(struct tb *tb, const struct icm_pkg_header *hdr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) const struct icm_fr_event_xdomain_disconnected *pkg =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) (const struct icm_fr_event_xdomain_disconnected *)hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) struct tb_xdomain *xd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) * If the connection is through one or multiple devices, the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) * XDomain device is removed along with them so it is fine if we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) * cannot find it here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) xd = tb_xdomain_find_by_uuid(tb, &pkg->remote_uuid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) if (xd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) remove_xdomain(xd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) tb_xdomain_put(xd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) static int icm_tr_cio_reset(struct tb *tb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) return pcie2cio_write(tb_priv(tb), TB_CFG_SWITCH, 0, 0x777, BIT(1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) icm_tr_driver_ready(struct tb *tb, enum tb_security_level *security_level,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) size_t *nboot_acl, bool *rpm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) struct icm_tr_pkg_driver_ready_response reply;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) struct icm_pkg_driver_ready request = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) .hdr.code = ICM_DRIVER_READY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) memset(&reply, 0, sizeof(reply));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) 1, 20000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) if (security_level)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) *security_level = reply.info & ICM_TR_INFO_SLEVEL_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) if (nboot_acl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) *nboot_acl = (reply.info & ICM_TR_INFO_BOOT_ACL_MASK) >>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) ICM_TR_INFO_BOOT_ACL_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) if (rpm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) *rpm = !!(reply.hdr.flags & ICM_TR_FLAGS_RTD3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) static int icm_tr_approve_switch(struct tb *tb, struct tb_switch *sw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) struct icm_tr_pkg_approve_device request;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) struct icm_tr_pkg_approve_device reply;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) memset(&request, 0, sizeof(request));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) memcpy(&request.ep_uuid, sw->uuid, sizeof(request.ep_uuid));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) request.hdr.code = ICM_APPROVE_DEVICE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) request.route_lo = sw->config.route_lo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) request.route_hi = sw->config.route_hi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) request.connection_id = sw->connection_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) memset(&reply, 0, sizeof(reply));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) 1, ICM_APPROVE_TIMEOUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) if (reply.hdr.flags & ICM_FLAGS_ERROR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) tb_warn(tb, "PCIe tunnel creation failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) static int icm_tr_add_switch_key(struct tb *tb, struct tb_switch *sw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) struct icm_tr_pkg_add_device_key_response reply;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) struct icm_tr_pkg_add_device_key request;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) memset(&request, 0, sizeof(request));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) memcpy(&request.ep_uuid, sw->uuid, sizeof(request.ep_uuid));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) request.hdr.code = ICM_ADD_DEVICE_KEY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) request.route_lo = sw->config.route_lo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) request.route_hi = sw->config.route_hi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) request.connection_id = sw->connection_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) memcpy(request.key, sw->key, TB_SWITCH_KEY_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) memset(&reply, 0, sizeof(reply));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) 1, ICM_TIMEOUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) if (reply.hdr.flags & ICM_FLAGS_ERROR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) tb_warn(tb, "Adding key to switch failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) static int icm_tr_challenge_switch_key(struct tb *tb, struct tb_switch *sw,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) const u8 *challenge, u8 *response)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) struct icm_tr_pkg_challenge_device_response reply;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) struct icm_tr_pkg_challenge_device request;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) memset(&request, 0, sizeof(request));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) memcpy(&request.ep_uuid, sw->uuid, sizeof(request.ep_uuid));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) request.hdr.code = ICM_CHALLENGE_DEVICE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) request.route_lo = sw->config.route_lo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) request.route_hi = sw->config.route_hi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) request.connection_id = sw->connection_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) memcpy(request.challenge, challenge, TB_SWITCH_KEY_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) memset(&reply, 0, sizeof(reply));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) 1, ICM_TIMEOUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) if (reply.hdr.flags & ICM_FLAGS_ERROR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) return -EKEYREJECTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) if (reply.hdr.flags & ICM_FLAGS_NO_KEY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) return -ENOKEY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) memcpy(response, reply.response, TB_SWITCH_KEY_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) static int icm_tr_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) struct icm_tr_pkg_approve_xdomain_response reply;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) struct icm_tr_pkg_approve_xdomain request;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) memset(&request, 0, sizeof(request));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) request.hdr.code = ICM_APPROVE_XDOMAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) request.route_hi = upper_32_bits(xd->route);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) request.route_lo = lower_32_bits(xd->route);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) request.transmit_path = xd->transmit_path;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) request.transmit_ring = xd->transmit_ring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) request.receive_path = xd->receive_path;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) request.receive_ring = xd->receive_ring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) memcpy(&request.remote_uuid, xd->remote_uuid, sizeof(*xd->remote_uuid));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) memset(&reply, 0, sizeof(reply));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) 1, ICM_TIMEOUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) if (reply.hdr.flags & ICM_FLAGS_ERROR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) static int icm_tr_xdomain_tear_down(struct tb *tb, struct tb_xdomain *xd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) int stage)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) struct icm_tr_pkg_disconnect_xdomain_response reply;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) struct icm_tr_pkg_disconnect_xdomain request;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) memset(&request, 0, sizeof(request));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) request.hdr.code = ICM_DISCONNECT_XDOMAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) request.stage = stage;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) request.route_hi = upper_32_bits(xd->route);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) request.route_lo = lower_32_bits(xd->route);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) memcpy(&request.remote_uuid, xd->remote_uuid, sizeof(*xd->remote_uuid));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) memset(&reply, 0, sizeof(reply));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) 1, ICM_TIMEOUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) if (reply.hdr.flags & ICM_FLAGS_ERROR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) static int icm_tr_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) ret = icm_tr_xdomain_tear_down(tb, xd, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) usleep_range(10, 50);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) return icm_tr_xdomain_tear_down(tb, xd, 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) __icm_tr_device_connected(struct tb *tb, const struct icm_pkg_header *hdr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) bool force_rtd3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) const struct icm_tr_event_device_connected *pkg =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) (const struct icm_tr_event_device_connected *)hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) bool authorized, boot, dual_lane, speed_gen3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) enum tb_security_level security_level;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) struct tb_switch *sw, *parent_sw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) struct tb_xdomain *xd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) u64 route;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) icm_postpone_rescan(tb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) * Currently we don't use the QoS information coming with the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) * device connected message so simply just ignore that extra
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) * packet for now.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) if (pkg->hdr.packet_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) route = get_route(pkg->route_hi, pkg->route_lo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) authorized = pkg->link_info & ICM_LINK_INFO_APPROVED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) security_level = (pkg->hdr.flags & ICM_FLAGS_SLEVEL_MASK) >>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) ICM_FLAGS_SLEVEL_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) boot = pkg->link_info & ICM_LINK_INFO_BOOT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) dual_lane = pkg->hdr.flags & ICM_FLAGS_DUAL_LANE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) speed_gen3 = pkg->hdr.flags & ICM_FLAGS_SPEED_GEN3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) if (pkg->link_info & ICM_LINK_INFO_REJECTED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) tb_info(tb, "switch at %llx was rejected by ICM firmware because topology limit exceeded\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) route);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) sw = tb_switch_find_by_uuid(tb, &pkg->ep_uuid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) if (sw) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) /* Update the switch if it is still in the same place */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) if (tb_route(sw) == route && !!sw->authorized == authorized) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) parent_sw = tb_to_switch(sw->dev.parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) update_switch(parent_sw, sw, route, pkg->connection_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) 0, 0, 0, boot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) tb_switch_put(sw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) remove_switch(sw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) tb_switch_put(sw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) /* Another switch with the same address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) sw = tb_switch_find_by_route(tb, route);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) if (sw) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) remove_switch(sw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) tb_switch_put(sw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) /* XDomain connection with the same address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) xd = tb_xdomain_find_by_route(tb, route);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) if (xd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) remove_xdomain(xd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) tb_xdomain_put(xd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) parent_sw = tb_switch_find_by_route(tb, get_parent_route(route));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) if (!parent_sw) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) tb_err(tb, "failed to find parent switch for %llx\n", route);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) pm_runtime_get_sync(&parent_sw->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) sw = alloc_switch(parent_sw, route, &pkg->ep_uuid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) if (!IS_ERR(sw)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) sw->connection_id = pkg->connection_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) sw->authorized = authorized;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) sw->security_level = security_level;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) sw->boot = boot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) sw->link_speed = speed_gen3 ? 20 : 10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) sw->link_width = dual_lane ? 2 : 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) sw->rpm = force_rtd3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) if (!sw->rpm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) sw->rpm = intel_vss_is_rtd3(pkg->ep_name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) sizeof(pkg->ep_name));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) if (add_switch(parent_sw, sw))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) tb_switch_put(sw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) pm_runtime_mark_last_busy(&parent_sw->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) pm_runtime_put_autosuspend(&parent_sw->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) tb_switch_put(parent_sw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) icm_tr_device_connected(struct tb *tb, const struct icm_pkg_header *hdr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) __icm_tr_device_connected(tb, hdr, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) icm_tr_device_disconnected(struct tb *tb, const struct icm_pkg_header *hdr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) const struct icm_tr_event_device_disconnected *pkg =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) (const struct icm_tr_event_device_disconnected *)hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) struct tb_switch *sw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) u64 route;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) route = get_route(pkg->route_hi, pkg->route_lo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) sw = tb_switch_find_by_route(tb, route);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) if (!sw) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) tb_warn(tb, "no switch exists at %llx, ignoring\n", route);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) remove_switch(sw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) tb_switch_put(sw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) icm_tr_xdomain_connected(struct tb *tb, const struct icm_pkg_header *hdr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) const struct icm_tr_event_xdomain_connected *pkg =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) (const struct icm_tr_event_xdomain_connected *)hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) struct tb_xdomain *xd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) struct tb_switch *sw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) u64 route;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) if (!tb->root_switch)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) route = get_route(pkg->local_route_hi, pkg->local_route_lo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) xd = tb_xdomain_find_by_uuid(tb, &pkg->remote_uuid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) if (xd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) if (xd->route == route) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) update_xdomain(xd, route, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) tb_xdomain_put(xd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) remove_xdomain(xd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) tb_xdomain_put(xd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) /* An existing xdomain with the same address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) xd = tb_xdomain_find_by_route(tb, route);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) if (xd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) remove_xdomain(xd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) tb_xdomain_put(xd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) * If the user disconnected a switch during suspend and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) * connected another host to the same port, remove the switch
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) * first.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) sw = tb_switch_find_by_route(tb, route);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) if (sw) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) remove_switch(sw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) tb_switch_put(sw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) sw = tb_switch_find_by_route(tb, get_parent_route(route));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) if (!sw) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) tb_warn(tb, "no switch exists at %llx, ignoring\n", route);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) add_xdomain(sw, route, &pkg->local_uuid, &pkg->remote_uuid, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) tb_switch_put(sw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) icm_tr_xdomain_disconnected(struct tb *tb, const struct icm_pkg_header *hdr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) const struct icm_tr_event_xdomain_disconnected *pkg =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) (const struct icm_tr_event_xdomain_disconnected *)hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) struct tb_xdomain *xd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) u64 route;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) route = get_route(pkg->route_hi, pkg->route_lo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) xd = tb_xdomain_find_by_route(tb, route);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) if (xd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) remove_xdomain(xd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) tb_xdomain_put(xd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) static struct pci_dev *get_upstream_port(struct pci_dev *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) struct pci_dev *parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) parent = pci_upstream_bridge(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) while (parent) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) if (!pci_is_pcie(parent))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) if (pci_pcie_type(parent) == PCI_EXP_TYPE_UPSTREAM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) parent = pci_upstream_bridge(parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) if (!parent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) switch (parent->device) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_2C_BRIDGE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_4C_BRIDGE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_LP_BRIDGE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_4C_BRIDGE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_2C_BRIDGE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_2C_BRIDGE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_4C_BRIDGE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) return parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) static bool icm_ar_is_supported(struct tb *tb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) struct pci_dev *upstream_port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) struct icm *icm = tb_priv(tb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) * Starting from Alpine Ridge we can use ICM on Apple machines
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) * as well. We just need to reset and re-enable it first.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) * However, only start it if explicitly asked by the user.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) if (icm_firmware_running(tb->nhi))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) if (!start_icm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) * Find the upstream PCIe port in case we need to do reset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) * through its vendor specific registers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) upstream_port = get_upstream_port(tb->nhi->pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) if (upstream_port) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) int cap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) cap = pci_find_ext_capability(upstream_port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) PCI_EXT_CAP_ID_VNDR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) if (cap > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) icm->upstream_port = upstream_port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) icm->vnd_cap = cap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) static int icm_ar_cio_reset(struct tb *tb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) return pcie2cio_write(tb_priv(tb), TB_CFG_SWITCH, 0, 0x50, BIT(9));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) static int icm_ar_get_mode(struct tb *tb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) struct tb_nhi *nhi = tb->nhi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) int retries = 60;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) val = ioread32(nhi->iobase + REG_FW_STS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) if (val & REG_FW_STS_NVM_AUTH_DONE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) msleep(50);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) } while (--retries);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) if (!retries) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) dev_err(&nhi->pdev->dev, "ICM firmware not authenticated\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) return nhi_mailbox_mode(nhi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) icm_ar_driver_ready(struct tb *tb, enum tb_security_level *security_level,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) size_t *nboot_acl, bool *rpm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) struct icm_ar_pkg_driver_ready_response reply;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) struct icm_pkg_driver_ready request = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) .hdr.code = ICM_DRIVER_READY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) memset(&reply, 0, sizeof(reply));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) 1, ICM_TIMEOUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) if (security_level)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) *security_level = reply.info & ICM_AR_INFO_SLEVEL_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) if (nboot_acl && (reply.info & ICM_AR_INFO_BOOT_ACL_SUPPORTED))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) *nboot_acl = (reply.info & ICM_AR_INFO_BOOT_ACL_MASK) >>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) ICM_AR_INFO_BOOT_ACL_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) if (rpm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) *rpm = !!(reply.hdr.flags & ICM_AR_FLAGS_RTD3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) static int icm_ar_get_route(struct tb *tb, u8 link, u8 depth, u64 *route)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) struct icm_ar_pkg_get_route_response reply;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) struct icm_ar_pkg_get_route request = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) .hdr = { .code = ICM_GET_ROUTE },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) .link_info = depth << ICM_LINK_INFO_DEPTH_SHIFT | link,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) memset(&reply, 0, sizeof(reply));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) 1, ICM_TIMEOUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) if (reply.hdr.flags & ICM_FLAGS_ERROR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) *route = get_route(reply.route_hi, reply.route_lo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) static int icm_ar_get_boot_acl(struct tb *tb, uuid_t *uuids, size_t nuuids)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) struct icm_ar_pkg_preboot_acl_response reply;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) struct icm_ar_pkg_preboot_acl request = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) .hdr = { .code = ICM_PREBOOT_ACL },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) int ret, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) memset(&reply, 0, sizeof(reply));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) 1, ICM_TIMEOUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) if (reply.hdr.flags & ICM_FLAGS_ERROR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) for (i = 0; i < nuuids; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) u32 *uuid = (u32 *)&uuids[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) uuid[0] = reply.acl[i].uuid_lo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) uuid[1] = reply.acl[i].uuid_hi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) if (uuid[0] == 0xffffffff && uuid[1] == 0xffffffff) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) /* Map empty entries to null UUID */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) uuid[0] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) uuid[1] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) } else if (uuid[0] != 0 || uuid[1] != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) /* Upper two DWs are always one's */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) uuid[2] = 0xffffffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) uuid[3] = 0xffffffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) static int icm_ar_set_boot_acl(struct tb *tb, const uuid_t *uuids,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) size_t nuuids)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) struct icm_ar_pkg_preboot_acl_response reply;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) struct icm_ar_pkg_preboot_acl request = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) .hdr = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) .code = ICM_PREBOOT_ACL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) .flags = ICM_FLAGS_WRITE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) int ret, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) for (i = 0; i < nuuids; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) const u32 *uuid = (const u32 *)&uuids[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) if (uuid_is_null(&uuids[i])) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) * Map null UUID to the empty (all one) entries
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) * for ICM.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) request.acl[i].uuid_lo = 0xffffffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) request.acl[i].uuid_hi = 0xffffffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) /* Two high DWs need to be set to all one */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) if (uuid[2] != 0xffffffff || uuid[3] != 0xffffffff)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) request.acl[i].uuid_lo = uuid[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) request.acl[i].uuid_hi = uuid[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) memset(&reply, 0, sizeof(reply));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) 1, ICM_TIMEOUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) if (reply.hdr.flags & ICM_FLAGS_ERROR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) icm_icl_driver_ready(struct tb *tb, enum tb_security_level *security_level,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) size_t *nboot_acl, bool *rpm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) struct icm_tr_pkg_driver_ready_response reply;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) struct icm_pkg_driver_ready request = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) .hdr.code = ICM_DRIVER_READY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) memset(&reply, 0, sizeof(reply));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) 1, 20000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) /* Ice Lake always supports RTD3 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) if (rpm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) *rpm = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) static void icm_icl_set_uuid(struct tb *tb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) struct tb_nhi *nhi = tb->nhi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) u32 uuid[4];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) pci_read_config_dword(nhi->pdev, VS_CAP_10, &uuid[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) pci_read_config_dword(nhi->pdev, VS_CAP_11, &uuid[1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) uuid[2] = 0xffffffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) uuid[3] = 0xffffffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) tb->root_switch->uuid = kmemdup(uuid, sizeof(uuid), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) icm_icl_device_connected(struct tb *tb, const struct icm_pkg_header *hdr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) __icm_tr_device_connected(tb, hdr, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) static void icm_icl_rtd3_veto(struct tb *tb, const struct icm_pkg_header *hdr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) const struct icm_icl_event_rtd3_veto *pkg =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) (const struct icm_icl_event_rtd3_veto *)hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) tb_dbg(tb, "ICM rtd3 veto=0x%08x\n", pkg->veto_reason);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) if (pkg->veto_reason)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) icm_veto_begin(tb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) icm_veto_end(tb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) static bool icm_tgl_is_supported(struct tb *tb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) * If the firmware is not running use software CM. This platform
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) * should fully support both.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) val = ioread32(tb->nhi->iobase + REG_FW_STS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) return !!(val & REG_FW_STS_NVM_AUTH_DONE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) static void icm_handle_notification(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) struct icm_notification *n = container_of(work, typeof(*n), work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) struct tb *tb = n->tb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) struct icm *icm = tb_priv(tb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) mutex_lock(&tb->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) * When the domain is stopped we flush its workqueue but before
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) * that the root switch is removed. In that case we should treat
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) * the queued events as being canceled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) if (tb->root_switch) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) switch (n->pkg->code) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) case ICM_EVENT_DEVICE_CONNECTED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) icm->device_connected(tb, n->pkg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) case ICM_EVENT_DEVICE_DISCONNECTED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) icm->device_disconnected(tb, n->pkg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) case ICM_EVENT_XDOMAIN_CONNECTED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) icm->xdomain_connected(tb, n->pkg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) case ICM_EVENT_XDOMAIN_DISCONNECTED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) icm->xdomain_disconnected(tb, n->pkg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) case ICM_EVENT_RTD3_VETO:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) icm->rtd3_veto(tb, n->pkg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) mutex_unlock(&tb->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) kfree(n->pkg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) kfree(n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) static void icm_handle_event(struct tb *tb, enum tb_cfg_pkg_type type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) const void *buf, size_t size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) struct icm_notification *n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) n = kmalloc(sizeof(*n), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) if (!n)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) INIT_WORK(&n->work, icm_handle_notification);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) n->pkg = kmemdup(buf, size, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) n->tb = tb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) queue_work(tb->wq, &n->work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) __icm_driver_ready(struct tb *tb, enum tb_security_level *security_level,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) size_t *nboot_acl, bool *rpm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) struct icm *icm = tb_priv(tb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) unsigned int retries = 50;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) ret = icm->driver_ready(tb, security_level, nboot_acl, rpm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) tb_err(tb, "failed to send driver ready to ICM\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) * Hold on here until the switch config space is accessible so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) * that we can read root switch config successfully.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) struct tb_cfg_result res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) u32 tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) res = tb_cfg_read_raw(tb->ctl, &tmp, 0, 0, TB_CFG_SWITCH,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) 0, 1, 100);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) if (!res.err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) msleep(50);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) } while (--retries);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) tb_err(tb, "failed to read root switch config space, giving up\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) return -ETIMEDOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) static int icm_firmware_reset(struct tb *tb, struct tb_nhi *nhi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) struct icm *icm = tb_priv(tb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) if (!icm->upstream_port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) /* Put ARC to wait for CIO reset event to happen */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) val = ioread32(nhi->iobase + REG_FW_STS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) val |= REG_FW_STS_CIO_RESET_REQ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) iowrite32(val, nhi->iobase + REG_FW_STS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) /* Re-start ARC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) val = ioread32(nhi->iobase + REG_FW_STS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) val |= REG_FW_STS_ICM_EN_INVERT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) val |= REG_FW_STS_ICM_EN_CPU;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) iowrite32(val, nhi->iobase + REG_FW_STS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) /* Trigger CIO reset now */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) return icm->cio_reset(tb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) static int icm_firmware_start(struct tb *tb, struct tb_nhi *nhi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) unsigned int retries = 10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) /* Check if the ICM firmware is already running */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) if (icm_firmware_running(nhi))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) dev_dbg(&nhi->pdev->dev, "starting ICM firmware\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) ret = icm_firmware_reset(tb, nhi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) /* Wait until the ICM firmware tells us it is up and running */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) /* Check that the ICM firmware is running */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) val = ioread32(nhi->iobase + REG_FW_STS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) if (val & REG_FW_STS_NVM_AUTH_DONE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) msleep(300);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) } while (--retries);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) return -ETIMEDOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) static int icm_reset_phy_port(struct tb *tb, int phy_port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) struct icm *icm = tb_priv(tb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) u32 state0, state1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) int port0, port1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) u32 val0, val1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) if (!icm->upstream_port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) if (phy_port) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) port0 = 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) port1 = 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) port0 = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) port1 = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) * Read link status of both null ports belonging to a single
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) * physical port.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) ret = pcie2cio_read(icm, TB_CFG_PORT, port0, PHY_PORT_CS1, &val0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) ret = pcie2cio_read(icm, TB_CFG_PORT, port1, PHY_PORT_CS1, &val1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) state0 = val0 & PHY_PORT_CS1_LINK_STATE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820) state0 >>= PHY_PORT_CS1_LINK_STATE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) state1 = val1 & PHY_PORT_CS1_LINK_STATE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) state1 >>= PHY_PORT_CS1_LINK_STATE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) /* If they are both up we need to reset them now */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) if (state0 != TB_PORT_UP || state1 != TB_PORT_UP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) val0 |= PHY_PORT_CS1_LINK_DISABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) ret = pcie2cio_write(icm, TB_CFG_PORT, port0, PHY_PORT_CS1, val0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) val1 |= PHY_PORT_CS1_LINK_DISABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) ret = pcie2cio_write(icm, TB_CFG_PORT, port1, PHY_PORT_CS1, val1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838) /* Wait a bit and then re-enable both ports */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) usleep_range(10, 100);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841) ret = pcie2cio_read(icm, TB_CFG_PORT, port0, PHY_PORT_CS1, &val0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) ret = pcie2cio_read(icm, TB_CFG_PORT, port1, PHY_PORT_CS1, &val1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848) val0 &= ~PHY_PORT_CS1_LINK_DISABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849) ret = pcie2cio_write(icm, TB_CFG_PORT, port0, PHY_PORT_CS1, val0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853) val1 &= ~PHY_PORT_CS1_LINK_DISABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854) return pcie2cio_write(icm, TB_CFG_PORT, port1, PHY_PORT_CS1, val1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) static int icm_firmware_init(struct tb *tb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859) struct icm *icm = tb_priv(tb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860) struct tb_nhi *nhi = tb->nhi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863) ret = icm_firmware_start(tb, nhi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865) dev_err(&nhi->pdev->dev, "could not start ICM firmware\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869) if (icm->get_mode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870) ret = icm->get_mode(tb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872) switch (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873) case NHI_FW_SAFE_MODE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874) icm->safe_mode = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877) case NHI_FW_CM_MODE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) /* Ask ICM to accept all Thunderbolt devices */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) nhi_mailbox_cmd(nhi, NHI_MAILBOX_ALLOW_ALL_DEVS, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886) tb_err(tb, "ICM firmware is in wrong mode: %u\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892) * Reset both physical ports if there is anything connected to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893) * them already.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895) ret = icm_reset_phy_port(tb, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897) dev_warn(&nhi->pdev->dev, "failed to reset links on port0\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898) ret = icm_reset_phy_port(tb, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900) dev_warn(&nhi->pdev->dev, "failed to reset links on port1\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) static int icm_driver_ready(struct tb *tb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907) struct icm *icm = tb_priv(tb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910) ret = icm_firmware_init(tb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914) if (icm->safe_mode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915) tb_info(tb, "Thunderbolt host controller is in safe mode.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916) tb_info(tb, "You need to update NVM firmware of the controller before it can be used.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917) tb_info(tb, "For latest updates check https://thunderbolttechnology.net/updates.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921) ret = __icm_driver_ready(tb, &tb->security_level, &tb->nboot_acl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922) &icm->rpm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927) * Make sure the number of supported preboot ACL matches what we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928) * expect or disable the whole feature.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930) if (tb->nboot_acl > icm->max_boot_acl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931) tb->nboot_acl = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936) static int icm_suspend(struct tb *tb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938) struct icm *icm = tb_priv(tb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940) if (icm->save_devices)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941) icm->save_devices(tb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943) nhi_mailbox_cmd(tb->nhi, NHI_MAILBOX_DRV_UNLOADS, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948) * Mark all switches (except root switch) below this one unplugged. ICM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949) * firmware will send us an updated list of switches after we have send
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950) * it driver ready command. If a switch is not in that list it will be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951) * removed when we perform rescan.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953) static void icm_unplug_children(struct tb_switch *sw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955) struct tb_port *port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957) if (tb_route(sw))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958) sw->is_unplugged = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960) tb_switch_for_each_port(sw, port) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961) if (port->xdomain)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962) port->xdomain->is_unplugged = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963) else if (tb_port_has_remote(port))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964) icm_unplug_children(port->remote->sw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968) static int complete_rpm(struct device *dev, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970) struct tb_switch *sw = tb_to_switch(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972) if (sw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973) complete(&sw->rpm_complete);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977) static void remove_unplugged_switch(struct tb_switch *sw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979) struct device *parent = get_device(sw->dev.parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981) pm_runtime_get_sync(parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984) * Signal this and switches below for rpm_complete because
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985) * tb_switch_remove() calls pm_runtime_get_sync() that then waits
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986) * for it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988) complete_rpm(&sw->dev, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989) bus_for_each_dev(&tb_bus_type, &sw->dev, NULL, complete_rpm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990) tb_switch_remove(sw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992) pm_runtime_mark_last_busy(parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993) pm_runtime_put_autosuspend(parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995) put_device(parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998) static void icm_free_unplugged_children(struct tb_switch *sw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000) struct tb_port *port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002) tb_switch_for_each_port(sw, port) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003) if (port->xdomain && port->xdomain->is_unplugged) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004) tb_xdomain_remove(port->xdomain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005) port->xdomain = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006) } else if (tb_port_has_remote(port)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007) if (port->remote->sw->is_unplugged) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008) remove_unplugged_switch(port->remote->sw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009) port->remote = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011) icm_free_unplugged_children(port->remote->sw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2012) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2013) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2014) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2015) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2016)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2017) static void icm_rescan_work(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2018) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2019) struct icm *icm = container_of(work, struct icm, rescan_work.work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2020) struct tb *tb = icm_to_tb(icm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2021)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2022) mutex_lock(&tb->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2023) if (tb->root_switch)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2024) icm_free_unplugged_children(tb->root_switch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2025) mutex_unlock(&tb->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2026) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2027)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2028) static void icm_complete(struct tb *tb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2029) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2030) struct icm *icm = tb_priv(tb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2031)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2032) if (tb->nhi->going_away)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2033) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2034)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2035) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2036) * If RTD3 was vetoed before we entered system suspend allow it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2037) * again now before driver ready is sent. Firmware sends a new RTD3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2038) * veto if it is still the case after we have sent it driver ready
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2039) * command.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2040) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2041) icm_veto_end(tb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2042) icm_unplug_children(tb->root_switch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2043)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2044) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2045) * Now all existing children should be resumed, start events
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2046) * from ICM to get updated status.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2047) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2048) __icm_driver_ready(tb, NULL, NULL, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2049)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2050) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2051) * We do not get notifications of devices that have been
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2052) * unplugged during suspend so schedule rescan to clean them up
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2053) * if any.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2054) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2055) queue_delayed_work(tb->wq, &icm->rescan_work, msecs_to_jiffies(500));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2056) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2057)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2058) static int icm_runtime_suspend(struct tb *tb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2059) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2060) nhi_mailbox_cmd(tb->nhi, NHI_MAILBOX_DRV_UNLOADS, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2061) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2062) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2063)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2064) static int icm_runtime_suspend_switch(struct tb_switch *sw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2065) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2066) if (tb_route(sw))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2067) reinit_completion(&sw->rpm_complete);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2068) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2069) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2070)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2071) static int icm_runtime_resume_switch(struct tb_switch *sw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2072) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2073) if (tb_route(sw)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2074) if (!wait_for_completion_timeout(&sw->rpm_complete,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2075) msecs_to_jiffies(500))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2076) dev_dbg(&sw->dev, "runtime resuming timed out\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2077) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2078) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2079) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2080) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2081)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2082) static int icm_runtime_resume(struct tb *tb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2083) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2084) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2085) * We can reuse the same resume functionality than with system
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2086) * suspend.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2087) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2088) icm_complete(tb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2089) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2090) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2091)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2092) static int icm_start(struct tb *tb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2093) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2094) struct icm *icm = tb_priv(tb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2095) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2096)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2097) if (icm->safe_mode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2098) tb->root_switch = tb_switch_alloc_safe_mode(tb, &tb->dev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2099) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2100) tb->root_switch = tb_switch_alloc(tb, &tb->dev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2101) if (IS_ERR(tb->root_switch))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2102) return PTR_ERR(tb->root_switch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2104) tb->root_switch->no_nvm_upgrade = !icm->can_upgrade_nvm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2105) tb->root_switch->rpm = icm->rpm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2106)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2107) if (icm->set_uuid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2108) icm->set_uuid(tb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2109)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2110) ret = tb_switch_add(tb->root_switch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2111) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2112) tb_switch_put(tb->root_switch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2113) tb->root_switch = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2114) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2116) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2117) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2119) static void icm_stop(struct tb *tb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2120) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2121) struct icm *icm = tb_priv(tb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2123) cancel_delayed_work(&icm->rescan_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2124) tb_switch_remove(tb->root_switch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2125) tb->root_switch = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2126) nhi_mailbox_cmd(tb->nhi, NHI_MAILBOX_DRV_UNLOADS, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2127) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2129) static int icm_disconnect_pcie_paths(struct tb *tb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2130) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2131) return nhi_mailbox_cmd(tb->nhi, NHI_MAILBOX_DISCONNECT_PCIE_PATHS, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2132) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2134) /* Falcon Ridge */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2135) static const struct tb_cm_ops icm_fr_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2136) .driver_ready = icm_driver_ready,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2137) .start = icm_start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2138) .stop = icm_stop,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2139) .suspend = icm_suspend,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2140) .complete = icm_complete,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2141) .handle_event = icm_handle_event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2142) .approve_switch = icm_fr_approve_switch,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2143) .add_switch_key = icm_fr_add_switch_key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2144) .challenge_switch_key = icm_fr_challenge_switch_key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2145) .disconnect_pcie_paths = icm_disconnect_pcie_paths,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2146) .approve_xdomain_paths = icm_fr_approve_xdomain_paths,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2147) .disconnect_xdomain_paths = icm_fr_disconnect_xdomain_paths,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2148) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2149)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2150) /* Alpine Ridge */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2151) static const struct tb_cm_ops icm_ar_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2152) .driver_ready = icm_driver_ready,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2153) .start = icm_start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2154) .stop = icm_stop,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2155) .suspend = icm_suspend,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2156) .complete = icm_complete,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2157) .runtime_suspend = icm_runtime_suspend,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2158) .runtime_resume = icm_runtime_resume,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2159) .runtime_suspend_switch = icm_runtime_suspend_switch,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2160) .runtime_resume_switch = icm_runtime_resume_switch,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2161) .handle_event = icm_handle_event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2162) .get_boot_acl = icm_ar_get_boot_acl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2163) .set_boot_acl = icm_ar_set_boot_acl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2164) .approve_switch = icm_fr_approve_switch,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2165) .add_switch_key = icm_fr_add_switch_key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2166) .challenge_switch_key = icm_fr_challenge_switch_key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2167) .disconnect_pcie_paths = icm_disconnect_pcie_paths,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2168) .approve_xdomain_paths = icm_fr_approve_xdomain_paths,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2169) .disconnect_xdomain_paths = icm_fr_disconnect_xdomain_paths,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2170) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2171)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2172) /* Titan Ridge */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2173) static const struct tb_cm_ops icm_tr_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2174) .driver_ready = icm_driver_ready,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2175) .start = icm_start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2176) .stop = icm_stop,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2177) .suspend = icm_suspend,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2178) .complete = icm_complete,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2179) .runtime_suspend = icm_runtime_suspend,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2180) .runtime_resume = icm_runtime_resume,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2181) .runtime_suspend_switch = icm_runtime_suspend_switch,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2182) .runtime_resume_switch = icm_runtime_resume_switch,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2183) .handle_event = icm_handle_event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2184) .get_boot_acl = icm_ar_get_boot_acl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2185) .set_boot_acl = icm_ar_set_boot_acl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2186) .approve_switch = icm_tr_approve_switch,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2187) .add_switch_key = icm_tr_add_switch_key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2188) .challenge_switch_key = icm_tr_challenge_switch_key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2189) .disconnect_pcie_paths = icm_disconnect_pcie_paths,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2190) .approve_xdomain_paths = icm_tr_approve_xdomain_paths,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2191) .disconnect_xdomain_paths = icm_tr_disconnect_xdomain_paths,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2192) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2193)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2194) /* Ice Lake */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2195) static const struct tb_cm_ops icm_icl_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2196) .driver_ready = icm_driver_ready,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2197) .start = icm_start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2198) .stop = icm_stop,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2199) .complete = icm_complete,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2200) .runtime_suspend = icm_runtime_suspend,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2201) .runtime_resume = icm_runtime_resume,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2202) .handle_event = icm_handle_event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2203) .approve_xdomain_paths = icm_tr_approve_xdomain_paths,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2204) .disconnect_xdomain_paths = icm_tr_disconnect_xdomain_paths,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2205) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2206)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2207) struct tb *icm_probe(struct tb_nhi *nhi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2208) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2209) struct icm *icm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2210) struct tb *tb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2211)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2212) tb = tb_domain_alloc(nhi, sizeof(struct icm));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2213) if (!tb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2214) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2215)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2216) icm = tb_priv(tb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2217) INIT_DELAYED_WORK(&icm->rescan_work, icm_rescan_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2218) mutex_init(&icm->request_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2219)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2220) switch (nhi->pdev->device) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2221) case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_NHI:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2222) case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_NHI:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2223) icm->can_upgrade_nvm = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2224) icm->is_supported = icm_fr_is_supported;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2225) icm->get_route = icm_fr_get_route;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2226) icm->save_devices = icm_fr_save_devices;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2227) icm->driver_ready = icm_fr_driver_ready;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2228) icm->device_connected = icm_fr_device_connected;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2229) icm->device_disconnected = icm_fr_device_disconnected;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2230) icm->xdomain_connected = icm_fr_xdomain_connected;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2231) icm->xdomain_disconnected = icm_fr_xdomain_disconnected;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2232) tb->cm_ops = &icm_fr_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2233) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2234)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2235) case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_2C_NHI:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2236) case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_4C_NHI:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2237) case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_LP_NHI:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2238) case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_4C_NHI:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2239) case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_2C_NHI:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2240) icm->max_boot_acl = ICM_AR_PREBOOT_ACL_ENTRIES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2241) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2242) * NVM upgrade has not been tested on Apple systems and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2243) * they don't provide images publicly either. To be on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2244) * the safe side prevent root switch NVM upgrade on Macs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2245) * for now.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2246) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2247) icm->can_upgrade_nvm = !x86_apple_machine;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2248) icm->is_supported = icm_ar_is_supported;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2249) icm->cio_reset = icm_ar_cio_reset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2250) icm->get_mode = icm_ar_get_mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2251) icm->get_route = icm_ar_get_route;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2252) icm->save_devices = icm_fr_save_devices;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2253) icm->driver_ready = icm_ar_driver_ready;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2254) icm->device_connected = icm_fr_device_connected;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2255) icm->device_disconnected = icm_fr_device_disconnected;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2256) icm->xdomain_connected = icm_fr_xdomain_connected;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2257) icm->xdomain_disconnected = icm_fr_xdomain_disconnected;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2258) tb->cm_ops = &icm_ar_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2259) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2260)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2261) case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_2C_NHI:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2262) case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_4C_NHI:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2263) icm->max_boot_acl = ICM_AR_PREBOOT_ACL_ENTRIES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2264) icm->can_upgrade_nvm = !x86_apple_machine;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2265) icm->is_supported = icm_ar_is_supported;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2266) icm->cio_reset = icm_tr_cio_reset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2267) icm->get_mode = icm_ar_get_mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2268) icm->driver_ready = icm_tr_driver_ready;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2269) icm->device_connected = icm_tr_device_connected;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2270) icm->device_disconnected = icm_tr_device_disconnected;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2271) icm->xdomain_connected = icm_tr_xdomain_connected;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2272) icm->xdomain_disconnected = icm_tr_xdomain_disconnected;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2273) tb->cm_ops = &icm_tr_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2274) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2275)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2276) case PCI_DEVICE_ID_INTEL_ICL_NHI0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2277) case PCI_DEVICE_ID_INTEL_ICL_NHI1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2278) icm->is_supported = icm_fr_is_supported;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2279) icm->driver_ready = icm_icl_driver_ready;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2280) icm->set_uuid = icm_icl_set_uuid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2281) icm->device_connected = icm_icl_device_connected;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2282) icm->device_disconnected = icm_tr_device_disconnected;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2283) icm->xdomain_connected = icm_tr_xdomain_connected;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2284) icm->xdomain_disconnected = icm_tr_xdomain_disconnected;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2285) icm->rtd3_veto = icm_icl_rtd3_veto;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2286) tb->cm_ops = &icm_icl_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2287) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2288)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2289) case PCI_DEVICE_ID_INTEL_TGL_NHI0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2290) case PCI_DEVICE_ID_INTEL_TGL_NHI1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2291) case PCI_DEVICE_ID_INTEL_TGL_H_NHI0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2292) case PCI_DEVICE_ID_INTEL_TGL_H_NHI1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2293) icm->is_supported = icm_tgl_is_supported;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2294) icm->driver_ready = icm_icl_driver_ready;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2295) icm->set_uuid = icm_icl_set_uuid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2296) icm->device_connected = icm_icl_device_connected;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2297) icm->device_disconnected = icm_tr_device_disconnected;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2298) icm->xdomain_connected = icm_tr_xdomain_connected;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2299) icm->xdomain_disconnected = icm_tr_xdomain_disconnected;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2300) icm->rtd3_veto = icm_icl_rtd3_veto;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2301) tb->cm_ops = &icm_icl_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2302) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2303) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2304)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2305) if (!icm->is_supported || !icm->is_supported(tb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2306) dev_dbg(&nhi->pdev->dev, "ICM not supported on this controller\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2307) tb_domain_put(tb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2308) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2309) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2310)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2311) return tb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2312) }