^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /* SPDX-License-Identifier: GPL-2.0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Thunderbolt driver - control channel and configuration commands
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Copyright (C) 2018, Intel Corporation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #ifndef _TB_CFG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #define _TB_CFG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/kref.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/thunderbolt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include "nhi.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include "tb_msgs.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) /* control channel */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) struct tb_ctl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) typedef bool (*event_cb)(void *data, enum tb_cfg_pkg_type type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) const void *buf, size_t size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) struct tb_ctl *tb_ctl_alloc(struct tb_nhi *nhi, event_cb cb, void *cb_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) void tb_ctl_start(struct tb_ctl *ctl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) void tb_ctl_stop(struct tb_ctl *ctl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) void tb_ctl_free(struct tb_ctl *ctl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) /* configuration commands */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #define TB_CFG_DEFAULT_TIMEOUT 5000 /* msec */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) struct tb_cfg_result {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) u64 response_route;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) u32 response_port; /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) * If err = 1 then this is the port that send the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) * error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) * If err = 0 and if this was a cfg_read/write then
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) * this is the the upstream port of the responding
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) * switch.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) * Otherwise the field is set to zero.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) int err; /* negative errors, 0 for success, 1 for tb errors */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) enum tb_cfg_error tb_error; /* valid if err == 1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) struct ctl_pkg {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) struct tb_ctl *ctl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) void *buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) struct ring_frame frame;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) * struct tb_cfg_request - Control channel request
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) * @kref: Reference count
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) * @ctl: Pointer to the control channel structure. Only set when the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) * request is queued.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) * @request_size: Size of the request packet (in bytes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) * @request_type: Type of the request packet
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) * @response: Response is stored here
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) * @response_size: Maximum size of one response packet
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) * @response_type: Expected type of the response packet
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) * @npackets: Number of packets expected to be returned with this request
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) * @match: Function used to match the incoming packet
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) * @copy: Function used to copy the incoming packet to @response
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) * @callback: Callback called when the request is finished successfully
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) * @callback_data: Data to be passed to @callback
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) * @flags: Flags for the request
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) * @work: Work item used to complete the request
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) * @result: Result after the request has been completed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) * @list: Requests are queued using this field
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) * An arbitrary request over Thunderbolt control channel. For standard
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) * control channel message, one should use tb_cfg_read/write() and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) * friends if possible.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) struct tb_cfg_request {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) struct kref kref;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) struct tb_ctl *ctl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) const void *request;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) size_t request_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) enum tb_cfg_pkg_type request_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) void *response;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) size_t response_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) enum tb_cfg_pkg_type response_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) size_t npackets;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) bool (*match)(const struct tb_cfg_request *req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) const struct ctl_pkg *pkg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) bool (*copy)(struct tb_cfg_request *req, const struct ctl_pkg *pkg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) void (*callback)(void *callback_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) void *callback_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) struct work_struct work;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) struct tb_cfg_result result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) struct list_head list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) #define TB_CFG_REQUEST_ACTIVE 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) #define TB_CFG_REQUEST_CANCELED 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) struct tb_cfg_request *tb_cfg_request_alloc(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) void tb_cfg_request_get(struct tb_cfg_request *req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) void tb_cfg_request_put(struct tb_cfg_request *req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) int tb_cfg_request(struct tb_ctl *ctl, struct tb_cfg_request *req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) void (*callback)(void *), void *callback_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) void tb_cfg_request_cancel(struct tb_cfg_request *req, int err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) struct tb_cfg_result tb_cfg_request_sync(struct tb_ctl *ctl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) struct tb_cfg_request *req, int timeout_msec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) static inline u64 tb_cfg_get_route(const struct tb_cfg_header *header)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) return (u64) header->route_hi << 32 | header->route_lo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) static inline struct tb_cfg_header tb_cfg_make_header(u64 route)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) struct tb_cfg_header header = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) .route_hi = route >> 32,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) .route_lo = route,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) /* check for overflow, route_hi is not 32 bits! */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) WARN_ON(tb_cfg_get_route(&header) != route);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) return header;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) int tb_cfg_ack_plug(struct tb_ctl *ctl, u64 route, u32 port, bool unplug);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) struct tb_cfg_result tb_cfg_reset(struct tb_ctl *ctl, u64 route,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) int timeout_msec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) struct tb_cfg_result tb_cfg_read_raw(struct tb_ctl *ctl, void *buffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) u64 route, u32 port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) enum tb_cfg_space space, u32 offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) u32 length, int timeout_msec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) struct tb_cfg_result tb_cfg_write_raw(struct tb_ctl *ctl, const void *buffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) u64 route, u32 port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) enum tb_cfg_space space, u32 offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) u32 length, int timeout_msec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) int tb_cfg_read(struct tb_ctl *ctl, void *buffer, u64 route, u32 port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) enum tb_cfg_space space, u32 offset, u32 length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) int tb_cfg_write(struct tb_ctl *ctl, const void *buffer, u64 route, u32 port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) enum tb_cfg_space space, u32 offset, u32 length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) int tb_cfg_get_upstream_port(struct tb_ctl *ctl, u64 route);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) #endif