^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Thunderbolt bus support
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (C) 2017, Intel Corporation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Author: Mika Westerberg <mika.westerberg@linux.intel.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/dmar.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/idr.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/iommu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/pm_runtime.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/random.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <crypto/hash.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include "tb.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) static DEFINE_IDA(tb_domain_ida);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) static bool match_service_id(const struct tb_service_id *id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) const struct tb_service *svc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) if (id->match_flags & TBSVC_MATCH_PROTOCOL_KEY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) if (strcmp(id->protocol_key, svc->key))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) if (id->match_flags & TBSVC_MATCH_PROTOCOL_ID) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) if (id->protocol_id != svc->prtcid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) if (id->match_flags & TBSVC_MATCH_PROTOCOL_VERSION) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) if (id->protocol_version != svc->prtcvers)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) if (id->match_flags & TBSVC_MATCH_PROTOCOL_VERSION) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) if (id->protocol_revision != svc->prtcrevs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) static const struct tb_service_id *__tb_service_match(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) struct device_driver *drv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) struct tb_service_driver *driver;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) const struct tb_service_id *ids;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) struct tb_service *svc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) svc = tb_to_service(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) if (!svc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) driver = container_of(drv, struct tb_service_driver, driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) if (!driver->id_table)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) for (ids = driver->id_table; ids->match_flags != 0; ids++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) if (match_service_id(ids, svc))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) return ids;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) static int tb_service_match(struct device *dev, struct device_driver *drv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) return !!__tb_service_match(dev, drv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) static int tb_service_probe(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) struct tb_service *svc = tb_to_service(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) struct tb_service_driver *driver;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) const struct tb_service_id *id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) driver = container_of(dev->driver, struct tb_service_driver, driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) id = __tb_service_match(dev, &driver->driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) return driver->probe(svc, id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) static int tb_service_remove(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) struct tb_service *svc = tb_to_service(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) struct tb_service_driver *driver;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) driver = container_of(dev->driver, struct tb_service_driver, driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) if (driver->remove)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) driver->remove(svc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) static void tb_service_shutdown(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) struct tb_service_driver *driver;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) struct tb_service *svc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) svc = tb_to_service(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) if (!svc || !dev->driver)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) driver = container_of(dev->driver, struct tb_service_driver, driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) if (driver->shutdown)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) driver->shutdown(svc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) static const char * const tb_security_names[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) [TB_SECURITY_NONE] = "none",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) [TB_SECURITY_USER] = "user",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) [TB_SECURITY_SECURE] = "secure",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) [TB_SECURITY_DPONLY] = "dponly",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) [TB_SECURITY_USBONLY] = "usbonly",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) static ssize_t boot_acl_show(struct device *dev, struct device_attribute *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) struct tb *tb = container_of(dev, struct tb, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) uuid_t *uuids;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) ssize_t ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) uuids = kcalloc(tb->nboot_acl, sizeof(uuid_t), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) if (!uuids)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) pm_runtime_get_sync(&tb->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) if (mutex_lock_interruptible(&tb->lock)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) ret = -ERESTARTSYS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) ret = tb->cm_ops->get_boot_acl(tb, uuids, tb->nboot_acl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) mutex_unlock(&tb->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) mutex_unlock(&tb->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) for (ret = 0, i = 0; i < tb->nboot_acl; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) if (!uuid_is_null(&uuids[i]))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) ret += scnprintf(buf + ret, PAGE_SIZE - ret, "%pUb",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) &uuids[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) ret += scnprintf(buf + ret, PAGE_SIZE - ret, "%s",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) i < tb->nboot_acl - 1 ? "," : "\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) pm_runtime_mark_last_busy(&tb->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) pm_runtime_put_autosuspend(&tb->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) kfree(uuids);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) static ssize_t boot_acl_store(struct device *dev, struct device_attribute *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) const char *buf, size_t count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) struct tb *tb = container_of(dev, struct tb, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) char *str, *s, *uuid_str;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) ssize_t ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) uuid_t *acl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) int i = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) * Make sure the value is not bigger than tb->nboot_acl * UUID
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) * length + commas and optional "\n". Also the smallest allowable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) * string is tb->nboot_acl * ",".
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) if (count > (UUID_STRING_LEN + 1) * tb->nboot_acl + 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) if (count < tb->nboot_acl - 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) str = kstrdup(buf, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) if (!str)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) acl = kcalloc(tb->nboot_acl, sizeof(uuid_t), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) if (!acl) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) goto err_free_str;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) uuid_str = strim(str);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) while ((s = strsep(&uuid_str, ",")) != NULL && i < tb->nboot_acl) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) size_t len = strlen(s);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) if (len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) if (len != UUID_STRING_LEN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) goto err_free_acl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) ret = uuid_parse(s, &acl[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) goto err_free_acl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) i++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) if (s || i < tb->nboot_acl) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) goto err_free_acl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) pm_runtime_get_sync(&tb->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) if (mutex_lock_interruptible(&tb->lock)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) ret = -ERESTARTSYS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) goto err_rpm_put;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) ret = tb->cm_ops->set_boot_acl(tb, acl, tb->nboot_acl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) if (!ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) /* Notify userspace about the change */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) kobject_uevent(&tb->dev.kobj, KOBJ_CHANGE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) mutex_unlock(&tb->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) err_rpm_put:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) pm_runtime_mark_last_busy(&tb->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) pm_runtime_put_autosuspend(&tb->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) err_free_acl:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) kfree(acl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) err_free_str:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) kfree(str);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) return ret ?: count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) static DEVICE_ATTR_RW(boot_acl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) static ssize_t iommu_dma_protection_show(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) struct device_attribute *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) * Kernel DMA protection is a feature where Thunderbolt security is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) * handled natively using IOMMU. It is enabled when IOMMU is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) * enabled and ACPI DMAR table has DMAR_PLATFORM_OPT_IN set.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) return sprintf(buf, "%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) iommu_present(&pci_bus_type) && dmar_platform_optin());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) static DEVICE_ATTR_RO(iommu_dma_protection);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) static ssize_t security_show(struct device *dev, struct device_attribute *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) struct tb *tb = container_of(dev, struct tb, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) const char *name = "unknown";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) if (tb->security_level < ARRAY_SIZE(tb_security_names))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) name = tb_security_names[tb->security_level];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) return sprintf(buf, "%s\n", name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) static DEVICE_ATTR_RO(security);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) static struct attribute *domain_attrs[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) &dev_attr_boot_acl.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) &dev_attr_iommu_dma_protection.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) &dev_attr_security.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) static umode_t domain_attr_is_visible(struct kobject *kobj,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) struct attribute *attr, int n)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) struct device *dev = kobj_to_dev(kobj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) struct tb *tb = container_of(dev, struct tb, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) if (attr == &dev_attr_boot_acl.attr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) if (tb->nboot_acl &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) tb->cm_ops->get_boot_acl &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) tb->cm_ops->set_boot_acl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) return attr->mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) return attr->mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) static struct attribute_group domain_attr_group = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) .is_visible = domain_attr_is_visible,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) .attrs = domain_attrs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) static const struct attribute_group *domain_attr_groups[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) &domain_attr_group,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) struct bus_type tb_bus_type = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) .name = "thunderbolt",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) .match = tb_service_match,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) .probe = tb_service_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) .remove = tb_service_remove,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) .shutdown = tb_service_shutdown,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) static void tb_domain_release(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) struct tb *tb = container_of(dev, struct tb, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) tb_ctl_free(tb->ctl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) destroy_workqueue(tb->wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) ida_simple_remove(&tb_domain_ida, tb->index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) mutex_destroy(&tb->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) kfree(tb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) struct device_type tb_domain_type = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) .name = "thunderbolt_domain",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) .release = tb_domain_release,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) * tb_domain_alloc() - Allocate a domain
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) * @nhi: Pointer to the host controller
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) * @privsize: Size of the connection manager private data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) * Allocates and initializes a new Thunderbolt domain. Connection
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) * managers are expected to call this and then fill in @cm_ops
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) * accordingly.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) * Call tb_domain_put() to release the domain before it has been added
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) * to the system.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) * Return: allocated domain structure on %NULL in case of error
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) struct tb *tb_domain_alloc(struct tb_nhi *nhi, size_t privsize)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) struct tb *tb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) * Make sure the structure sizes map with that the hardware
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) * expects because bit-fields are being used.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) BUILD_BUG_ON(sizeof(struct tb_regs_switch_header) != 5 * 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) BUILD_BUG_ON(sizeof(struct tb_regs_port_header) != 8 * 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) BUILD_BUG_ON(sizeof(struct tb_regs_hop) != 2 * 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) tb = kzalloc(sizeof(*tb) + privsize, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) if (!tb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) tb->nhi = nhi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) mutex_init(&tb->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) tb->index = ida_simple_get(&tb_domain_ida, 0, 0, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) if (tb->index < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) goto err_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) tb->wq = alloc_ordered_workqueue("thunderbolt%d", 0, tb->index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) if (!tb->wq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) goto err_remove_ida;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) tb->dev.parent = &nhi->pdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) tb->dev.bus = &tb_bus_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) tb->dev.type = &tb_domain_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) tb->dev.groups = domain_attr_groups;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) dev_set_name(&tb->dev, "domain%d", tb->index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) device_initialize(&tb->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) return tb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) err_remove_ida:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) ida_simple_remove(&tb_domain_ida, tb->index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) err_free:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) kfree(tb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) static bool tb_domain_event_cb(void *data, enum tb_cfg_pkg_type type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) const void *buf, size_t size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) struct tb *tb = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) if (!tb->cm_ops->handle_event) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) tb_warn(tb, "domain does not have event handler\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) switch (type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) case TB_CFG_PKG_XDOMAIN_REQ:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) case TB_CFG_PKG_XDOMAIN_RESP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) return tb_xdomain_handle_request(tb, type, buf, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) tb->cm_ops->handle_event(tb, type, buf, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) * tb_domain_add() - Add domain to the system
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) * @tb: Domain to add
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) * Starts the domain and adds it to the system. Hotplugging devices will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) * work after this has been returned successfully. In order to remove
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) * and release the domain after this function has been called, call
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) * tb_domain_remove().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) * Return: %0 in case of success and negative errno in case of error
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) int tb_domain_add(struct tb *tb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) if (WARN_ON(!tb->cm_ops))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) mutex_lock(&tb->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) tb->ctl = tb_ctl_alloc(tb->nhi, tb_domain_event_cb, tb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) if (!tb->ctl) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) goto err_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) * tb_schedule_hotplug_handler may be called as soon as the config
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) * channel is started. Thats why we have to hold the lock here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) tb_ctl_start(tb->ctl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) if (tb->cm_ops->driver_ready) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) ret = tb->cm_ops->driver_ready(tb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) goto err_ctl_stop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) ret = device_add(&tb->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) goto err_ctl_stop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) /* Start the domain */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) if (tb->cm_ops->start) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) ret = tb->cm_ops->start(tb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) goto err_domain_del;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) /* This starts event processing */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) mutex_unlock(&tb->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) device_init_wakeup(&tb->dev, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) pm_runtime_no_callbacks(&tb->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) pm_runtime_set_active(&tb->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) pm_runtime_enable(&tb->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) pm_runtime_set_autosuspend_delay(&tb->dev, TB_AUTOSUSPEND_DELAY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) pm_runtime_mark_last_busy(&tb->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) pm_runtime_use_autosuspend(&tb->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) err_domain_del:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) device_del(&tb->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) err_ctl_stop:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) tb_ctl_stop(tb->ctl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) err_unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) mutex_unlock(&tb->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) * tb_domain_remove() - Removes and releases a domain
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) * @tb: Domain to remove
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) * Stops the domain, removes it from the system and releases all
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) * resources once the last reference has been released.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) void tb_domain_remove(struct tb *tb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) mutex_lock(&tb->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) if (tb->cm_ops->stop)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) tb->cm_ops->stop(tb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) /* Stop the domain control traffic */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) tb_ctl_stop(tb->ctl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) mutex_unlock(&tb->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) flush_workqueue(tb->wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) device_unregister(&tb->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) * tb_domain_suspend_noirq() - Suspend a domain
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) * @tb: Domain to suspend
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) * Suspends all devices in the domain and stops the control channel.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) int tb_domain_suspend_noirq(struct tb *tb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) * The control channel interrupt is left enabled during suspend
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) * and taking the lock here prevents any events happening before
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) * we actually have stopped the domain and the control channel.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) mutex_lock(&tb->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) if (tb->cm_ops->suspend_noirq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) ret = tb->cm_ops->suspend_noirq(tb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) if (!ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) tb_ctl_stop(tb->ctl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) mutex_unlock(&tb->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) * tb_domain_resume_noirq() - Resume a domain
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) * @tb: Domain to resume
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) * Re-starts the control channel, and resumes all devices connected to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) * the domain.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) int tb_domain_resume_noirq(struct tb *tb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) mutex_lock(&tb->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) tb_ctl_start(tb->ctl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) if (tb->cm_ops->resume_noirq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) ret = tb->cm_ops->resume_noirq(tb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) mutex_unlock(&tb->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) int tb_domain_suspend(struct tb *tb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) return tb->cm_ops->suspend ? tb->cm_ops->suspend(tb) : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) int tb_domain_freeze_noirq(struct tb *tb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) mutex_lock(&tb->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) if (tb->cm_ops->freeze_noirq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) ret = tb->cm_ops->freeze_noirq(tb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) if (!ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) tb_ctl_stop(tb->ctl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) mutex_unlock(&tb->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) int tb_domain_thaw_noirq(struct tb *tb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) mutex_lock(&tb->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) tb_ctl_start(tb->ctl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) if (tb->cm_ops->thaw_noirq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) ret = tb->cm_ops->thaw_noirq(tb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) mutex_unlock(&tb->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) void tb_domain_complete(struct tb *tb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) if (tb->cm_ops->complete)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) tb->cm_ops->complete(tb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) int tb_domain_runtime_suspend(struct tb *tb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) if (tb->cm_ops->runtime_suspend) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) int ret = tb->cm_ops->runtime_suspend(tb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) tb_ctl_stop(tb->ctl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) int tb_domain_runtime_resume(struct tb *tb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) tb_ctl_start(tb->ctl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) if (tb->cm_ops->runtime_resume) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) int ret = tb->cm_ops->runtime_resume(tb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) * tb_domain_approve_switch() - Approve switch
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) * @tb: Domain the switch belongs to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) * @sw: Switch to approve
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) * This will approve switch by connection manager specific means. In
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) * case of success the connection manager will create tunnels for all
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) * supported protocols.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) int tb_domain_approve_switch(struct tb *tb, struct tb_switch *sw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) struct tb_switch *parent_sw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) if (!tb->cm_ops->approve_switch)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) return -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) /* The parent switch must be authorized before this one */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) parent_sw = tb_to_switch(sw->dev.parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) if (!parent_sw || !parent_sw->authorized)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) return tb->cm_ops->approve_switch(tb, sw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) * tb_domain_approve_switch_key() - Approve switch and add key
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) * @tb: Domain the switch belongs to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) * @sw: Switch to approve
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) * For switches that support secure connect, this function first adds
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) * key to the switch NVM using connection manager specific means. If
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) * adding the key is successful, the switch is approved and connected.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) * Return: %0 on success and negative errno in case of failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) int tb_domain_approve_switch_key(struct tb *tb, struct tb_switch *sw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) struct tb_switch *parent_sw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) if (!tb->cm_ops->approve_switch || !tb->cm_ops->add_switch_key)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) return -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) /* The parent switch must be authorized before this one */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) parent_sw = tb_to_switch(sw->dev.parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) if (!parent_sw || !parent_sw->authorized)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) ret = tb->cm_ops->add_switch_key(tb, sw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) return tb->cm_ops->approve_switch(tb, sw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) * tb_domain_challenge_switch_key() - Challenge and approve switch
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) * @tb: Domain the switch belongs to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) * @sw: Switch to approve
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) * For switches that support secure connect, this function generates
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) * random challenge and sends it to the switch. The switch responds to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) * this and if the response matches our random challenge, the switch is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) * approved and connected.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) * Return: %0 on success and negative errno in case of failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) int tb_domain_challenge_switch_key(struct tb *tb, struct tb_switch *sw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) u8 challenge[TB_SWITCH_KEY_SIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) u8 response[TB_SWITCH_KEY_SIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) u8 hmac[TB_SWITCH_KEY_SIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) struct tb_switch *parent_sw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) struct crypto_shash *tfm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) struct shash_desc *shash;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) if (!tb->cm_ops->approve_switch || !tb->cm_ops->challenge_switch_key)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) return -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) /* The parent switch must be authorized before this one */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) parent_sw = tb_to_switch(sw->dev.parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) if (!parent_sw || !parent_sw->authorized)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) get_random_bytes(challenge, sizeof(challenge));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) ret = tb->cm_ops->challenge_switch_key(tb, sw, challenge, response);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) tfm = crypto_alloc_shash("hmac(sha256)", 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) if (IS_ERR(tfm))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) return PTR_ERR(tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) ret = crypto_shash_setkey(tfm, sw->key, TB_SWITCH_KEY_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) goto err_free_tfm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) shash = kzalloc(sizeof(*shash) + crypto_shash_descsize(tfm),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) if (!shash) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) goto err_free_tfm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) shash->tfm = tfm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) memset(hmac, 0, sizeof(hmac));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) ret = crypto_shash_digest(shash, challenge, sizeof(hmac), hmac);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) goto err_free_shash;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) /* The returned HMAC must match the one we calculated */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) if (memcmp(response, hmac, sizeof(hmac))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) ret = -EKEYREJECTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) goto err_free_shash;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) crypto_free_shash(tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) kfree(shash);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) return tb->cm_ops->approve_switch(tb, sw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) err_free_shash:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) kfree(shash);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) err_free_tfm:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) crypto_free_shash(tfm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) * tb_domain_disconnect_pcie_paths() - Disconnect all PCIe paths
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) * @tb: Domain whose PCIe paths to disconnect
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) * This needs to be called in preparation for NVM upgrade of the host
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) * controller. Makes sure all PCIe paths are disconnected.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) * Return %0 on success and negative errno in case of error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) int tb_domain_disconnect_pcie_paths(struct tb *tb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) if (!tb->cm_ops->disconnect_pcie_paths)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) return -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) return tb->cm_ops->disconnect_pcie_paths(tb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) * tb_domain_approve_xdomain_paths() - Enable DMA paths for XDomain
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) * @tb: Domain enabling the DMA paths
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) * @xd: XDomain DMA paths are created to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) * Calls connection manager specific method to enable DMA paths to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) * XDomain in question.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) * Return: 0% in case of success and negative errno otherwise. In
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) * particular returns %-ENOTSUPP if the connection manager
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) * implementation does not support XDomains.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) int tb_domain_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) if (!tb->cm_ops->approve_xdomain_paths)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) return -ENOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) return tb->cm_ops->approve_xdomain_paths(tb, xd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) * tb_domain_disconnect_xdomain_paths() - Disable DMA paths for XDomain
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) * @tb: Domain disabling the DMA paths
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) * @xd: XDomain whose DMA paths are disconnected
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) * Calls connection manager specific method to disconnect DMA paths to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) * the XDomain in question.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) * Return: 0% in case of success and negative errno otherwise. In
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) * particular returns %-ENOTSUPP if the connection manager
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) * implementation does not support XDomains.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) int tb_domain_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) if (!tb->cm_ops->disconnect_xdomain_paths)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) return -ENOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) return tb->cm_ops->disconnect_xdomain_paths(tb, xd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) static int disconnect_xdomain(struct device *dev, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) struct tb_xdomain *xd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) struct tb *tb = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) xd = tb_to_xdomain(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) if (xd && xd->tb == tb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) ret = tb_xdomain_disable_paths(xd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) * tb_domain_disconnect_all_paths() - Disconnect all paths for the domain
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) * @tb: Domain whose paths are disconnected
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) * This function can be used to disconnect all paths (PCIe, XDomain) for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) * example in preparation for host NVM firmware upgrade. After this is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) * called the paths cannot be established without resetting the switch.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) * Return: %0 in case of success and negative errno otherwise.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) int tb_domain_disconnect_all_paths(struct tb *tb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) ret = tb_domain_disconnect_pcie_paths(tb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) return bus_for_each_dev(&tb_bus_type, NULL, tb, disconnect_xdomain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) int tb_domain_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) tb_test_init();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) tb_debugfs_init();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) ret = tb_xdomain_init();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) goto err_debugfs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) ret = bus_register(&tb_bus_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) goto err_xdomain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) err_xdomain:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) tb_xdomain_exit();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) err_debugfs:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) tb_debugfs_exit();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) tb_test_exit();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) void tb_domain_exit(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) bus_unregister(&tb_bus_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) ida_destroy(&tb_domain_ida);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) tb_nvm_exit();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) tb_xdomain_exit();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) tb_debugfs_exit();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) tb_test_exit();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) }