^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Thunderbolt driver - control channel and configuration commands
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Copyright (C) 2018, Intel Corporation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/crc32.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/pci.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/dmapool.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/workqueue.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include "ctl.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #define TB_CTL_RX_PKG_COUNT 10
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #define TB_CTL_RETRIES 4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) * struct tb_cfg - thunderbolt control channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) struct tb_ctl {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) struct tb_nhi *nhi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) struct tb_ring *tx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) struct tb_ring *rx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) struct dma_pool *frame_pool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) struct ctl_pkg *rx_packets[TB_CTL_RX_PKG_COUNT];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) struct mutex request_queue_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) struct list_head request_queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) bool running;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) event_cb callback;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) void *callback_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) #define tb_ctl_WARN(ctl, format, arg...) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) dev_WARN(&(ctl)->nhi->pdev->dev, format, ## arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) #define tb_ctl_err(ctl, format, arg...) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) dev_err(&(ctl)->nhi->pdev->dev, format, ## arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) #define tb_ctl_warn(ctl, format, arg...) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) dev_warn(&(ctl)->nhi->pdev->dev, format, ## arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) #define tb_ctl_info(ctl, format, arg...) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) dev_info(&(ctl)->nhi->pdev->dev, format, ## arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) #define tb_ctl_dbg(ctl, format, arg...) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) dev_dbg(&(ctl)->nhi->pdev->dev, format, ## arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) static DECLARE_WAIT_QUEUE_HEAD(tb_cfg_request_cancel_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) /* Serializes access to request kref_get/put */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) static DEFINE_MUTEX(tb_cfg_request_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) * tb_cfg_request_alloc() - Allocates a new config request
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) * This is refcounted object so when you are done with this, call
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) * tb_cfg_request_put() to it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) struct tb_cfg_request *tb_cfg_request_alloc(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) struct tb_cfg_request *req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) req = kzalloc(sizeof(*req), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) if (!req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) kref_init(&req->kref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) return req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) * tb_cfg_request_get() - Increase refcount of a request
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) * @req: Request whose refcount is increased
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) void tb_cfg_request_get(struct tb_cfg_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) mutex_lock(&tb_cfg_request_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) kref_get(&req->kref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) mutex_unlock(&tb_cfg_request_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) static void tb_cfg_request_destroy(struct kref *kref)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) struct tb_cfg_request *req = container_of(kref, typeof(*req), kref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) kfree(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) * tb_cfg_request_put() - Decrease refcount and possibly release the request
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) * @req: Request whose refcount is decreased
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) * Call this function when you are done with the request. When refcount
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) * goes to %0 the object is released.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) void tb_cfg_request_put(struct tb_cfg_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) mutex_lock(&tb_cfg_request_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) kref_put(&req->kref, tb_cfg_request_destroy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) mutex_unlock(&tb_cfg_request_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) static int tb_cfg_request_enqueue(struct tb_ctl *ctl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) struct tb_cfg_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) WARN_ON(test_bit(TB_CFG_REQUEST_ACTIVE, &req->flags));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) WARN_ON(req->ctl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) mutex_lock(&ctl->request_queue_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) if (!ctl->running) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) mutex_unlock(&ctl->request_queue_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) return -ENOTCONN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) req->ctl = ctl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) list_add_tail(&req->list, &ctl->request_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) set_bit(TB_CFG_REQUEST_ACTIVE, &req->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) mutex_unlock(&ctl->request_queue_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) static void tb_cfg_request_dequeue(struct tb_cfg_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) struct tb_ctl *ctl = req->ctl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) mutex_lock(&ctl->request_queue_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) list_del(&req->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) clear_bit(TB_CFG_REQUEST_ACTIVE, &req->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) if (test_bit(TB_CFG_REQUEST_CANCELED, &req->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) wake_up(&tb_cfg_request_cancel_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) mutex_unlock(&ctl->request_queue_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) static bool tb_cfg_request_is_active(struct tb_cfg_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) return test_bit(TB_CFG_REQUEST_ACTIVE, &req->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) static struct tb_cfg_request *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) tb_cfg_request_find(struct tb_ctl *ctl, struct ctl_pkg *pkg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) struct tb_cfg_request *req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) bool found = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) mutex_lock(&pkg->ctl->request_queue_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) list_for_each_entry(req, &pkg->ctl->request_queue, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) tb_cfg_request_get(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) if (req->match(req, pkg)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) found = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) tb_cfg_request_put(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) mutex_unlock(&pkg->ctl->request_queue_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) return found ? req : NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) /* utility functions */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) static int check_header(const struct ctl_pkg *pkg, u32 len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) enum tb_cfg_pkg_type type, u64 route)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) struct tb_cfg_header *header = pkg->buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) /* check frame, TODO: frame flags */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) if (WARN(len != pkg->frame.size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) "wrong framesize (expected %#x, got %#x)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) len, pkg->frame.size))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) if (WARN(type != pkg->frame.eof, "wrong eof (expected %#x, got %#x)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) type, pkg->frame.eof))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) if (WARN(pkg->frame.sof, "wrong sof (expected 0x0, got %#x)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) pkg->frame.sof))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) /* check header */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) if (WARN(header->unknown != 1 << 9,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) "header->unknown is %#x\n", header->unknown))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) if (WARN(route != tb_cfg_get_route(header),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) "wrong route (expected %llx, got %llx)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) route, tb_cfg_get_route(header)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) static int check_config_address(struct tb_cfg_address addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) enum tb_cfg_space space, u32 offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) u32 length)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) if (WARN(addr.zero, "addr.zero is %#x\n", addr.zero))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) if (WARN(space != addr.space, "wrong space (expected %x, got %x\n)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) space, addr.space))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) if (WARN(offset != addr.offset, "wrong offset (expected %x, got %x\n)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) offset, addr.offset))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) if (WARN(length != addr.length, "wrong space (expected %x, got %x\n)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) length, addr.length))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) * We cannot check addr->port as it is set to the upstream port of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) * sender.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) static struct tb_cfg_result decode_error(const struct ctl_pkg *response)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) struct cfg_error_pkg *pkg = response->buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) struct tb_ctl *ctl = response->ctl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) struct tb_cfg_result res = { 0 };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) res.response_route = tb_cfg_get_route(&pkg->header);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) res.response_port = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) res.err = check_header(response, sizeof(*pkg), TB_CFG_PKG_ERROR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) tb_cfg_get_route(&pkg->header));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) if (res.err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) return res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) if (pkg->zero1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) tb_ctl_warn(ctl, "pkg->zero1 is %#x\n", pkg->zero1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) if (pkg->zero2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) tb_ctl_warn(ctl, "pkg->zero2 is %#x\n", pkg->zero2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) if (pkg->zero3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) tb_ctl_warn(ctl, "pkg->zero3 is %#x\n", pkg->zero3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) res.err = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) res.tb_error = pkg->error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) res.response_port = pkg->port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) return res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) static struct tb_cfg_result parse_header(const struct ctl_pkg *pkg, u32 len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) enum tb_cfg_pkg_type type, u64 route)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) struct tb_cfg_header *header = pkg->buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) struct tb_cfg_result res = { 0 };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) if (pkg->frame.eof == TB_CFG_PKG_ERROR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) return decode_error(pkg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) res.response_port = 0; /* will be updated later for cfg_read/write */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) res.response_route = tb_cfg_get_route(header);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) res.err = check_header(pkg, len, type, route);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) return res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) static void tb_cfg_print_error(struct tb_ctl *ctl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) const struct tb_cfg_result *res)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) WARN_ON(res->err != 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) switch (res->tb_error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) case TB_CFG_ERROR_PORT_NOT_CONNECTED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) /* Port is not connected. This can happen during surprise
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) * removal. Do not warn. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) case TB_CFG_ERROR_INVALID_CONFIG_SPACE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) * Invalid cfg_space/offset/length combination in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) * cfg_read/cfg_write.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) tb_ctl_dbg(ctl, "%llx:%x: invalid config space or offset\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) res->response_route, res->response_port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) case TB_CFG_ERROR_NO_SUCH_PORT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) * - The route contains a non-existent port.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) * - The route contains a non-PHY port (e.g. PCIe).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) * - The port in cfg_read/cfg_write does not exist.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) tb_ctl_WARN(ctl, "CFG_ERROR(%llx:%x): Invalid port\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) res->response_route, res->response_port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) case TB_CFG_ERROR_LOOP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) tb_ctl_WARN(ctl, "CFG_ERROR(%llx:%x): Route contains a loop\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) res->response_route, res->response_port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) case TB_CFG_ERROR_LOCK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) tb_ctl_warn(ctl, "%llx:%x: downstream port is locked\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) res->response_route, res->response_port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) /* 5,6,7,9 and 11 are also valid error codes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) tb_ctl_WARN(ctl, "CFG_ERROR(%llx:%x): Unknown error\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) res->response_route, res->response_port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) static __be32 tb_crc(const void *data, size_t len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) return cpu_to_be32(~__crc32c_le(~0, data, len));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) static void tb_ctl_pkg_free(struct ctl_pkg *pkg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) if (pkg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) dma_pool_free(pkg->ctl->frame_pool,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) pkg->buffer, pkg->frame.buffer_phy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) kfree(pkg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) static struct ctl_pkg *tb_ctl_pkg_alloc(struct tb_ctl *ctl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) struct ctl_pkg *pkg = kzalloc(sizeof(*pkg), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) if (!pkg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) pkg->ctl = ctl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) pkg->buffer = dma_pool_alloc(ctl->frame_pool, GFP_KERNEL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) &pkg->frame.buffer_phy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) if (!pkg->buffer) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) kfree(pkg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) return pkg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) /* RX/TX handling */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) static void tb_ctl_tx_callback(struct tb_ring *ring, struct ring_frame *frame,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) bool canceled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) struct ctl_pkg *pkg = container_of(frame, typeof(*pkg), frame);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) tb_ctl_pkg_free(pkg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) * tb_cfg_tx() - transmit a packet on the control channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) * len must be a multiple of four.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) * Return: Returns 0 on success or an error code on failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) static int tb_ctl_tx(struct tb_ctl *ctl, const void *data, size_t len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) enum tb_cfg_pkg_type type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) int res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) struct ctl_pkg *pkg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) if (len % 4 != 0) { /* required for le->be conversion */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) tb_ctl_WARN(ctl, "TX: invalid size: %zu\n", len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) if (len > TB_FRAME_SIZE - 4) { /* checksum is 4 bytes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) tb_ctl_WARN(ctl, "TX: packet too large: %zu/%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) len, TB_FRAME_SIZE - 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) pkg = tb_ctl_pkg_alloc(ctl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) if (!pkg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) pkg->frame.callback = tb_ctl_tx_callback;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) pkg->frame.size = len + 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) pkg->frame.sof = type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) pkg->frame.eof = type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) cpu_to_be32_array(pkg->buffer, data, len / 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) *(__be32 *) (pkg->buffer + len) = tb_crc(pkg->buffer, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) res = tb_ring_tx(ctl->tx, &pkg->frame);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) if (res) /* ring is stopped */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) tb_ctl_pkg_free(pkg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) return res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) * tb_ctl_handle_event() - acknowledge a plug event, invoke ctl->callback
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) static bool tb_ctl_handle_event(struct tb_ctl *ctl, enum tb_cfg_pkg_type type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) struct ctl_pkg *pkg, size_t size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) return ctl->callback(ctl->callback_data, type, pkg->buffer, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) static void tb_ctl_rx_submit(struct ctl_pkg *pkg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) tb_ring_rx(pkg->ctl->rx, &pkg->frame); /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) * We ignore failures during stop.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) * All rx packets are referenced
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) * from ctl->rx_packets, so we do
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) * not loose them.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) static int tb_async_error(const struct ctl_pkg *pkg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) const struct cfg_error_pkg *error = (const struct cfg_error_pkg *)pkg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) if (pkg->frame.eof != TB_CFG_PKG_ERROR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) switch (error->error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) case TB_CFG_ERROR_LINK_ERROR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) case TB_CFG_ERROR_HEC_ERROR_DETECTED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) case TB_CFG_ERROR_FLOW_CONTROL_ERROR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) static void tb_ctl_rx_callback(struct tb_ring *ring, struct ring_frame *frame,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) bool canceled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) struct ctl_pkg *pkg = container_of(frame, typeof(*pkg), frame);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) struct tb_cfg_request *req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) __be32 crc32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) if (canceled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) return; /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) * ring is stopped, packet is referenced from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) * ctl->rx_packets.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) if (frame->size < 4 || frame->size % 4 != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) tb_ctl_err(pkg->ctl, "RX: invalid size %#x, dropping packet\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) frame->size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) goto rx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) frame->size -= 4; /* remove checksum */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) crc32 = tb_crc(pkg->buffer, frame->size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) be32_to_cpu_array(pkg->buffer, pkg->buffer, frame->size / 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) switch (frame->eof) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) case TB_CFG_PKG_READ:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) case TB_CFG_PKG_WRITE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) case TB_CFG_PKG_ERROR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) case TB_CFG_PKG_OVERRIDE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) case TB_CFG_PKG_RESET:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) if (*(__be32 *)(pkg->buffer + frame->size) != crc32) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) tb_ctl_err(pkg->ctl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) "RX: checksum mismatch, dropping packet\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) goto rx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) if (tb_async_error(pkg)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) tb_ctl_handle_event(pkg->ctl, frame->eof,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) pkg, frame->size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) goto rx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) case TB_CFG_PKG_EVENT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) case TB_CFG_PKG_XDOMAIN_RESP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) case TB_CFG_PKG_XDOMAIN_REQ:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) if (*(__be32 *)(pkg->buffer + frame->size) != crc32) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) tb_ctl_err(pkg->ctl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) "RX: checksum mismatch, dropping packet\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) goto rx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) case TB_CFG_PKG_ICM_EVENT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) if (tb_ctl_handle_event(pkg->ctl, frame->eof, pkg, frame->size))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) goto rx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) * The received packet will be processed only if there is an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) * active request and that the packet is what is expected. This
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) * prevents packets such as replies coming after timeout has
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) * triggered from messing with the active requests.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) req = tb_cfg_request_find(pkg->ctl, pkg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) if (req) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) if (req->copy(req, pkg))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) schedule_work(&req->work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) tb_cfg_request_put(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) rx:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) tb_ctl_rx_submit(pkg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) static void tb_cfg_request_work(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) struct tb_cfg_request *req = container_of(work, typeof(*req), work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) if (!test_bit(TB_CFG_REQUEST_CANCELED, &req->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) req->callback(req->callback_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) tb_cfg_request_dequeue(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) tb_cfg_request_put(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) * tb_cfg_request() - Start control request not waiting for it to complete
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) * @ctl: Control channel to use
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) * @req: Request to start
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) * @callback: Callback called when the request is completed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) * @callback_data: Data to be passed to @callback
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) * This queues @req on the given control channel without waiting for it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) * to complete. When the request completes @callback is called.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) int tb_cfg_request(struct tb_ctl *ctl, struct tb_cfg_request *req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) void (*callback)(void *), void *callback_data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) req->flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) req->callback = callback;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) req->callback_data = callback_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) INIT_WORK(&req->work, tb_cfg_request_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) INIT_LIST_HEAD(&req->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) tb_cfg_request_get(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) ret = tb_cfg_request_enqueue(ctl, req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) goto err_put;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) ret = tb_ctl_tx(ctl, req->request, req->request_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) req->request_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) goto err_dequeue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) if (!req->response)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) schedule_work(&req->work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) err_dequeue:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) tb_cfg_request_dequeue(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) err_put:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) tb_cfg_request_put(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) * tb_cfg_request_cancel() - Cancel a control request
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) * @req: Request to cancel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) * @err: Error to assign to the request
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) * This function can be used to cancel ongoing request. It will wait
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) * until the request is not active anymore.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) void tb_cfg_request_cancel(struct tb_cfg_request *req, int err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) set_bit(TB_CFG_REQUEST_CANCELED, &req->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) schedule_work(&req->work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) wait_event(tb_cfg_request_cancel_queue, !tb_cfg_request_is_active(req));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) req->result.err = err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) static void tb_cfg_request_complete(void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) complete(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) * tb_cfg_request_sync() - Start control request and wait until it completes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) * @ctl: Control channel to use
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) * @req: Request to start
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) * @timeout_msec: Timeout how long to wait @req to complete
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) * Starts a control request and waits until it completes. If timeout
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) * triggers the request is canceled before function returns. Note the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) * caller needs to make sure only one message for given switch is active
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) * at a time.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) struct tb_cfg_result tb_cfg_request_sync(struct tb_ctl *ctl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) struct tb_cfg_request *req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) int timeout_msec)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) unsigned long timeout = msecs_to_jiffies(timeout_msec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) struct tb_cfg_result res = { 0 };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) DECLARE_COMPLETION_ONSTACK(done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) ret = tb_cfg_request(ctl, req, tb_cfg_request_complete, &done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) res.err = ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) return res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) if (!wait_for_completion_timeout(&done, timeout))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) tb_cfg_request_cancel(req, -ETIMEDOUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) flush_work(&req->work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) return req->result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) /* public interface, alloc/start/stop/free */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) * tb_ctl_alloc() - allocate a control channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) * cb will be invoked once for every hot plug event.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) * Return: Returns a pointer on success or NULL on failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) struct tb_ctl *tb_ctl_alloc(struct tb_nhi *nhi, event_cb cb, void *cb_data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) struct tb_ctl *ctl = kzalloc(sizeof(*ctl), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) if (!ctl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) ctl->nhi = nhi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) ctl->callback = cb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) ctl->callback_data = cb_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) mutex_init(&ctl->request_queue_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) INIT_LIST_HEAD(&ctl->request_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) ctl->frame_pool = dma_pool_create("thunderbolt_ctl", &nhi->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) TB_FRAME_SIZE, 4, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) if (!ctl->frame_pool)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) ctl->tx = tb_ring_alloc_tx(nhi, 0, 10, RING_FLAG_NO_SUSPEND);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) if (!ctl->tx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) ctl->rx = tb_ring_alloc_rx(nhi, 0, 10, RING_FLAG_NO_SUSPEND, 0xffff,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) 0xffff, NULL, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) if (!ctl->rx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) for (i = 0; i < TB_CTL_RX_PKG_COUNT; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) ctl->rx_packets[i] = tb_ctl_pkg_alloc(ctl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) if (!ctl->rx_packets[i])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) ctl->rx_packets[i]->frame.callback = tb_ctl_rx_callback;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) tb_ctl_dbg(ctl, "control channel created\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) return ctl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) tb_ctl_free(ctl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) * tb_ctl_free() - free a control channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) * Must be called after tb_ctl_stop.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) * Must NOT be called from ctl->callback.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) void tb_ctl_free(struct tb_ctl *ctl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) if (!ctl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) if (ctl->rx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) tb_ring_free(ctl->rx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) if (ctl->tx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) tb_ring_free(ctl->tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) /* free RX packets */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) for (i = 0; i < TB_CTL_RX_PKG_COUNT; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) tb_ctl_pkg_free(ctl->rx_packets[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) dma_pool_destroy(ctl->frame_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) kfree(ctl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) * tb_cfg_start() - start/resume the control channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) void tb_ctl_start(struct tb_ctl *ctl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) tb_ctl_dbg(ctl, "control channel starting...\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) tb_ring_start(ctl->tx); /* is used to ack hotplug packets, start first */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) tb_ring_start(ctl->rx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) for (i = 0; i < TB_CTL_RX_PKG_COUNT; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) tb_ctl_rx_submit(ctl->rx_packets[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) ctl->running = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) * control() - pause the control channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) * All invocations of ctl->callback will have finished after this method
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) * returns.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) * Must NOT be called from ctl->callback.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) void tb_ctl_stop(struct tb_ctl *ctl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) mutex_lock(&ctl->request_queue_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) ctl->running = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) mutex_unlock(&ctl->request_queue_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) tb_ring_stop(ctl->rx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) tb_ring_stop(ctl->tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) if (!list_empty(&ctl->request_queue))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) tb_ctl_WARN(ctl, "dangling request in request_queue\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) INIT_LIST_HEAD(&ctl->request_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) tb_ctl_dbg(ctl, "control channel stopped\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) /* public interface, commands */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) * tb_cfg_ack_plug() - Ack hot plug/unplug event
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) * @ctl: Control channel to use
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) * @route: Router that originated the event
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) * @port: Port where the hot plug/unplug happened
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) * @unplug: Ack hot plug or unplug
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) * Call this as response for hot plug/unplug event to ack it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) * Returns %0 on success or an error code on failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) int tb_cfg_ack_plug(struct tb_ctl *ctl, u64 route, u32 port, bool unplug)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) struct cfg_error_pkg pkg = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) .header = tb_cfg_make_header(route),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) .port = port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) .error = TB_CFG_ERROR_ACK_PLUG_EVENT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) .pg = unplug ? TB_CFG_ERROR_PG_HOT_UNPLUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) : TB_CFG_ERROR_PG_HOT_PLUG,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) tb_ctl_dbg(ctl, "acking hot %splug event on %llx:%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) unplug ? "un" : "", route, port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) return tb_ctl_tx(ctl, &pkg, sizeof(pkg), TB_CFG_PKG_ERROR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) static bool tb_cfg_match(const struct tb_cfg_request *req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) const struct ctl_pkg *pkg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) u64 route = tb_cfg_get_route(pkg->buffer) & ~BIT_ULL(63);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) if (pkg->frame.eof == TB_CFG_PKG_ERROR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) if (pkg->frame.eof != req->response_type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) if (route != tb_cfg_get_route(req->request))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) if (pkg->frame.size != req->response_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) if (pkg->frame.eof == TB_CFG_PKG_READ ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) pkg->frame.eof == TB_CFG_PKG_WRITE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) const struct cfg_read_pkg *req_hdr = req->request;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) const struct cfg_read_pkg *res_hdr = pkg->buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) if (req_hdr->addr.seq != res_hdr->addr.seq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) static bool tb_cfg_copy(struct tb_cfg_request *req, const struct ctl_pkg *pkg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) struct tb_cfg_result res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) /* Now make sure it is in expected format */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) res = parse_header(pkg, req->response_size, req->response_type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) tb_cfg_get_route(req->request));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) if (!res.err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) memcpy(req->response, pkg->buffer, req->response_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) req->result = res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) /* Always complete when first response is received */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) * tb_cfg_reset() - send a reset packet and wait for a response
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) * If the switch at route is incorrectly configured then we will not receive a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) * reply (even though the switch will reset). The caller should check for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) * -ETIMEDOUT and attempt to reconfigure the switch.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) struct tb_cfg_result tb_cfg_reset(struct tb_ctl *ctl, u64 route,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) int timeout_msec)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) struct cfg_reset_pkg request = { .header = tb_cfg_make_header(route) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) struct tb_cfg_result res = { 0 };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) struct tb_cfg_header reply;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) struct tb_cfg_request *req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) req = tb_cfg_request_alloc();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) if (!req) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) res.err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) return res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) req->match = tb_cfg_match;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) req->copy = tb_cfg_copy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) req->request = &request;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) req->request_size = sizeof(request);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) req->request_type = TB_CFG_PKG_RESET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) req->response = &reply;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) req->response_size = sizeof(reply);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) req->response_type = TB_CFG_PKG_RESET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) res = tb_cfg_request_sync(ctl, req, timeout_msec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) tb_cfg_request_put(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) return res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) * tb_cfg_read() - read from config space into buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) * Offset and length are in dwords.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) struct tb_cfg_result tb_cfg_read_raw(struct tb_ctl *ctl, void *buffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) u64 route, u32 port, enum tb_cfg_space space,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) u32 offset, u32 length, int timeout_msec)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) struct tb_cfg_result res = { 0 };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) struct cfg_read_pkg request = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) .header = tb_cfg_make_header(route),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) .addr = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) .port = port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) .space = space,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) .offset = offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) .length = length,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) struct cfg_write_pkg reply;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) int retries = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) while (retries < TB_CTL_RETRIES) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) struct tb_cfg_request *req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) req = tb_cfg_request_alloc();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) if (!req) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) res.err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) return res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) request.addr.seq = retries++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) req->match = tb_cfg_match;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) req->copy = tb_cfg_copy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) req->request = &request;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) req->request_size = sizeof(request);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) req->request_type = TB_CFG_PKG_READ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) req->response = &reply;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) req->response_size = 12 + 4 * length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) req->response_type = TB_CFG_PKG_READ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) res = tb_cfg_request_sync(ctl, req, timeout_msec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) tb_cfg_request_put(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) if (res.err != -ETIMEDOUT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) /* Wait a bit (arbitrary time) until we send a retry */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) usleep_range(10, 100);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) if (res.err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) return res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) res.response_port = reply.addr.port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) res.err = check_config_address(reply.addr, space, offset, length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) if (!res.err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) memcpy(buffer, &reply.data, 4 * length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) return res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) * tb_cfg_write() - write from buffer into config space
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) * Offset and length are in dwords.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) struct tb_cfg_result tb_cfg_write_raw(struct tb_ctl *ctl, const void *buffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) u64 route, u32 port, enum tb_cfg_space space,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) u32 offset, u32 length, int timeout_msec)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) struct tb_cfg_result res = { 0 };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) struct cfg_write_pkg request = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) .header = tb_cfg_make_header(route),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) .addr = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) .port = port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) .space = space,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) .offset = offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) .length = length,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) struct cfg_read_pkg reply;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) int retries = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) memcpy(&request.data, buffer, length * 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) while (retries < TB_CTL_RETRIES) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) struct tb_cfg_request *req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) req = tb_cfg_request_alloc();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) if (!req) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) res.err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) return res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) request.addr.seq = retries++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) req->match = tb_cfg_match;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) req->copy = tb_cfg_copy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) req->request = &request;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) req->request_size = 12 + 4 * length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) req->request_type = TB_CFG_PKG_WRITE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) req->response = &reply;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) req->response_size = sizeof(reply);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) req->response_type = TB_CFG_PKG_WRITE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) res = tb_cfg_request_sync(ctl, req, timeout_msec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) tb_cfg_request_put(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) if (res.err != -ETIMEDOUT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) /* Wait a bit (arbitrary time) until we send a retry */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) usleep_range(10, 100);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) if (res.err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) return res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) res.response_port = reply.addr.port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) res.err = check_config_address(reply.addr, space, offset, length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) return res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) static int tb_cfg_get_error(struct tb_ctl *ctl, enum tb_cfg_space space,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) const struct tb_cfg_result *res)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) * For unimplemented ports access to port config space may return
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) * TB_CFG_ERROR_INVALID_CONFIG_SPACE (alternatively their type is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) * set to TB_TYPE_INACTIVE). In the former case return -ENODEV so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) * that the caller can mark the port as disabled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) if (space == TB_CFG_PORT &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) res->tb_error == TB_CFG_ERROR_INVALID_CONFIG_SPACE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) tb_cfg_print_error(ctl, res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) if (res->tb_error == TB_CFG_ERROR_LOCK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) return -EACCES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) int tb_cfg_read(struct tb_ctl *ctl, void *buffer, u64 route, u32 port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) enum tb_cfg_space space, u32 offset, u32 length)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) struct tb_cfg_result res = tb_cfg_read_raw(ctl, buffer, route, port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) space, offset, length, TB_CFG_DEFAULT_TIMEOUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) switch (res.err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) case 0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) /* Success */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) case 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) /* Thunderbolt error, tb_error holds the actual number */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) return tb_cfg_get_error(ctl, space, &res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) case -ETIMEDOUT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) tb_ctl_warn(ctl, "%llx: timeout reading config space %u from %#x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) route, space, offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) WARN(1, "tb_cfg_read: %d\n", res.err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) return res.err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) int tb_cfg_write(struct tb_ctl *ctl, const void *buffer, u64 route, u32 port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) enum tb_cfg_space space, u32 offset, u32 length)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) struct tb_cfg_result res = tb_cfg_write_raw(ctl, buffer, route, port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) space, offset, length, TB_CFG_DEFAULT_TIMEOUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) switch (res.err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) case 0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) /* Success */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) case 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) /* Thunderbolt error, tb_error holds the actual number */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) return tb_cfg_get_error(ctl, space, &res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) case -ETIMEDOUT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) tb_ctl_warn(ctl, "%llx: timeout writing config space %u to %#x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) route, space, offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) WARN(1, "tb_cfg_write: %d\n", res.err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) return res.err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) * tb_cfg_get_upstream_port() - get upstream port number of switch at route
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) * Reads the first dword from the switches TB_CFG_SWITCH config area and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) * returns the port number from which the reply originated.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) * Return: Returns the upstream port number on success or an error code on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) * failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) int tb_cfg_get_upstream_port(struct tb_ctl *ctl, u64 route)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) u32 dummy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) struct tb_cfg_result res = tb_cfg_read_raw(ctl, &dummy, route, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) TB_CFG_SWITCH, 0, 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) TB_CFG_DEFAULT_TIMEOUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) if (res.err == 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) if (res.err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) return res.err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) return res.response_port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) }