Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  * Greybus connections
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5)  * Copyright 2014 Google Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6)  * Copyright 2014 Linaro Ltd.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9) #include <linux/workqueue.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10) #include <linux/greybus.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) #include "greybus_trace.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) #define GB_CONNECTION_CPORT_QUIESCE_TIMEOUT	1000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) static void gb_connection_kref_release(struct kref *kref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) static DEFINE_SPINLOCK(gb_connections_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) static DEFINE_MUTEX(gb_connection_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) /* Caller holds gb_connection_mutex. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) static bool gb_connection_cport_in_use(struct gb_interface *intf, u16 cport_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) 	struct gb_host_device *hd = intf->hd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) 	struct gb_connection *connection;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) 	list_for_each_entry(connection, &hd->connections, hd_links) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) 		if (connection->intf == intf &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) 		    connection->intf_cport_id == cport_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) 			return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) 	return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) static void gb_connection_get(struct gb_connection *connection)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) 	kref_get(&connection->kref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) 	trace_gb_connection_get(connection);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) static void gb_connection_put(struct gb_connection *connection)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) 	trace_gb_connection_put(connection);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) 	kref_put(&connection->kref, gb_connection_kref_release);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51)  * Returns a reference-counted pointer to the connection if found.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) static struct gb_connection *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) gb_connection_hd_find(struct gb_host_device *hd, u16 cport_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) 	struct gb_connection *connection;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) 	spin_lock_irqsave(&gb_connections_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) 	list_for_each_entry(connection, &hd->connections, hd_links)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) 		if (connection->hd_cport_id == cport_id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) 			gb_connection_get(connection);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) 			goto found;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) 	connection = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) found:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) 	spin_unlock_irqrestore(&gb_connections_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) 	return connection;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73)  * Callback from the host driver to let us know that data has been
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74)  * received on the bundle.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) void greybus_data_rcvd(struct gb_host_device *hd, u16 cport_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) 		       u8 *data, size_t length)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) 	struct gb_connection *connection;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) 	trace_gb_hd_in(hd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) 	connection = gb_connection_hd_find(hd, cport_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) 	if (!connection) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) 		dev_err(&hd->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) 			"nonexistent connection (%zu bytes dropped)\n", length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) 	gb_connection_recv(connection, data, length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) 	gb_connection_put(connection);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) EXPORT_SYMBOL_GPL(greybus_data_rcvd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) static void gb_connection_kref_release(struct kref *kref)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) 	struct gb_connection *connection;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) 	connection = container_of(kref, struct gb_connection, kref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) 	trace_gb_connection_release(connection);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) 	kfree(connection);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) static void gb_connection_init_name(struct gb_connection *connection)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) 	u16 hd_cport_id = connection->hd_cport_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) 	u16 cport_id = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) 	u8 intf_id = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) 	if (connection->intf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) 		intf_id = connection->intf->interface_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) 		cport_id = connection->intf_cport_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 	snprintf(connection->name, sizeof(connection->name),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 		 "%u/%u:%u", hd_cport_id, intf_id, cport_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121)  * _gb_connection_create() - create a Greybus connection
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122)  * @hd:			host device of the connection
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123)  * @hd_cport_id:	host-device cport id, or -1 for dynamic allocation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124)  * @intf:		remote interface, or NULL for static connections
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125)  * @bundle:		remote-interface bundle (may be NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126)  * @cport_id:		remote-interface cport id, or 0 for static connections
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127)  * @handler:		request handler (may be NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128)  * @flags:		connection flags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130)  * Create a Greybus connection, representing the bidirectional link
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131)  * between a CPort on a (local) Greybus host device and a CPort on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132)  * another Greybus interface.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134)  * A connection also maintains the state of operations sent over the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135)  * connection.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137)  * Serialised against concurrent create and destroy using the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138)  * gb_connection_mutex.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140)  * Return: A pointer to the new connection if successful, or an ERR_PTR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141)  * otherwise.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) static struct gb_connection *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) _gb_connection_create(struct gb_host_device *hd, int hd_cport_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) 		      struct gb_interface *intf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) 		      struct gb_bundle *bundle, int cport_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) 		      gb_request_handler_t handler,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) 		      unsigned long flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) 	struct gb_connection *connection;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) 	mutex_lock(&gb_connection_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) 	if (intf && gb_connection_cport_in_use(intf, cport_id)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) 		dev_err(&intf->dev, "cport %u already in use\n", cport_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) 		ret = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) 		goto err_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) 	ret = gb_hd_cport_allocate(hd, hd_cport_id, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) 	if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) 		dev_err(&hd->dev, "failed to allocate cport: %d\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) 		goto err_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) 	hd_cport_id = ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) 	connection = kzalloc(sizeof(*connection), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) 	if (!connection) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) 		ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) 		goto err_hd_cport_release;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) 	connection->hd_cport_id = hd_cport_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) 	connection->intf_cport_id = cport_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) 	connection->hd = hd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) 	connection->intf = intf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) 	connection->bundle = bundle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) 	connection->handler = handler;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) 	connection->flags = flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) 	if (intf && (intf->quirks & GB_INTERFACE_QUIRK_NO_CPORT_FEATURES))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) 		connection->flags |= GB_CONNECTION_FLAG_NO_FLOWCTRL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) 	connection->state = GB_CONNECTION_STATE_DISABLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) 	atomic_set(&connection->op_cycle, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) 	mutex_init(&connection->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) 	spin_lock_init(&connection->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) 	INIT_LIST_HEAD(&connection->operations);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) 	connection->wq = alloc_workqueue("%s:%d", WQ_UNBOUND, 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) 					 dev_name(&hd->dev), hd_cport_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) 	if (!connection->wq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) 		ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) 		goto err_free_connection;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) 	kref_init(&connection->kref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) 	gb_connection_init_name(connection);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) 	spin_lock_irq(&gb_connections_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) 	list_add(&connection->hd_links, &hd->connections);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) 	if (bundle)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) 		list_add(&connection->bundle_links, &bundle->connections);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) 		INIT_LIST_HEAD(&connection->bundle_links);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) 	spin_unlock_irq(&gb_connections_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) 	mutex_unlock(&gb_connection_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) 	trace_gb_connection_create(connection);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) 	return connection;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) err_free_connection:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) 	kfree(connection);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) err_hd_cport_release:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) 	gb_hd_cport_release(hd, hd_cport_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) err_unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) 	mutex_unlock(&gb_connection_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) 	return ERR_PTR(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) struct gb_connection *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) gb_connection_create_static(struct gb_host_device *hd, u16 hd_cport_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) 			    gb_request_handler_t handler)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) 	return _gb_connection_create(hd, hd_cport_id, NULL, NULL, 0, handler,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) 				     GB_CONNECTION_FLAG_HIGH_PRIO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) struct gb_connection *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) gb_connection_create_control(struct gb_interface *intf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) 	return _gb_connection_create(intf->hd, -1, intf, NULL, 0, NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) 				     GB_CONNECTION_FLAG_CONTROL |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) 				     GB_CONNECTION_FLAG_HIGH_PRIO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) struct gb_connection *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) gb_connection_create(struct gb_bundle *bundle, u16 cport_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) 		     gb_request_handler_t handler)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) 	struct gb_interface *intf = bundle->intf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) 	return _gb_connection_create(intf->hd, -1, intf, bundle, cport_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) 				     handler, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) EXPORT_SYMBOL_GPL(gb_connection_create);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) struct gb_connection *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) gb_connection_create_flags(struct gb_bundle *bundle, u16 cport_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) 			   gb_request_handler_t handler,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) 			   unsigned long flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) 	struct gb_interface *intf = bundle->intf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) 	if (WARN_ON_ONCE(flags & GB_CONNECTION_FLAG_CORE_MASK))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) 		flags &= ~GB_CONNECTION_FLAG_CORE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) 	return _gb_connection_create(intf->hd, -1, intf, bundle, cport_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) 				     handler, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) EXPORT_SYMBOL_GPL(gb_connection_create_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) struct gb_connection *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) gb_connection_create_offloaded(struct gb_bundle *bundle, u16 cport_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) 			       unsigned long flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) 	flags |= GB_CONNECTION_FLAG_OFFLOADED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) 	return gb_connection_create_flags(bundle, cport_id, NULL, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) EXPORT_SYMBOL_GPL(gb_connection_create_offloaded);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) static int gb_connection_hd_cport_enable(struct gb_connection *connection)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) 	struct gb_host_device *hd = connection->hd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) 	if (!hd->driver->cport_enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) 	ret = hd->driver->cport_enable(hd, connection->hd_cport_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) 				       connection->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) 		dev_err(&hd->dev, "%s: failed to enable host cport: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) 			connection->name, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) static void gb_connection_hd_cport_disable(struct gb_connection *connection)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) 	struct gb_host_device *hd = connection->hd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) 	if (!hd->driver->cport_disable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) 	ret = hd->driver->cport_disable(hd, connection->hd_cport_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) 		dev_err(&hd->dev, "%s: failed to disable host cport: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) 			connection->name, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) static int gb_connection_hd_cport_connected(struct gb_connection *connection)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) 	struct gb_host_device *hd = connection->hd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) 	if (!hd->driver->cport_connected)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) 	ret = hd->driver->cport_connected(hd, connection->hd_cport_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) 		dev_err(&hd->dev, "%s: failed to set connected state: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) 			connection->name, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) static int gb_connection_hd_cport_flush(struct gb_connection *connection)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) 	struct gb_host_device *hd = connection->hd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) 	if (!hd->driver->cport_flush)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) 	ret = hd->driver->cport_flush(hd, connection->hd_cport_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) 		dev_err(&hd->dev, "%s: failed to flush host cport: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) 			connection->name, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) static int gb_connection_hd_cport_quiesce(struct gb_connection *connection)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) 	struct gb_host_device *hd = connection->hd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) 	size_t peer_space;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) 	if (!hd->driver->cport_quiesce)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) 	peer_space = sizeof(struct gb_operation_msg_hdr) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) 			sizeof(struct gb_cport_shutdown_request);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) 	if (connection->mode_switch)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) 		peer_space += sizeof(struct gb_operation_msg_hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) 	ret = hd->driver->cport_quiesce(hd, connection->hd_cport_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) 					peer_space,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) 					GB_CONNECTION_CPORT_QUIESCE_TIMEOUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) 		dev_err(&hd->dev, "%s: failed to quiesce host cport: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) 			connection->name, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) static int gb_connection_hd_cport_clear(struct gb_connection *connection)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) 	struct gb_host_device *hd = connection->hd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) 	if (!hd->driver->cport_clear)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) 	ret = hd->driver->cport_clear(hd, connection->hd_cport_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) 		dev_err(&hd->dev, "%s: failed to clear host cport: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) 			connection->name, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395)  * Request the SVC to create a connection from AP's cport to interface's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396)  * cport.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) gb_connection_svc_connection_create(struct gb_connection *connection)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) 	struct gb_host_device *hd = connection->hd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) 	struct gb_interface *intf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) 	u8 cport_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) 	if (gb_connection_is_static(connection))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) 	intf = connection->intf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) 	 * Enable either E2EFC or CSD, unless no flow control is requested.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) 	cport_flags = GB_SVC_CPORT_FLAG_CSV_N;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) 	if (gb_connection_flow_control_disabled(connection)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) 		cport_flags |= GB_SVC_CPORT_FLAG_CSD_N;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) 	} else if (gb_connection_e2efc_enabled(connection)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) 		cport_flags |= GB_SVC_CPORT_FLAG_CSD_N |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) 				GB_SVC_CPORT_FLAG_E2EFC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) 	ret = gb_svc_connection_create(hd->svc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) 				       hd->svc->ap_intf_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) 				       connection->hd_cport_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) 				       intf->interface_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) 				       connection->intf_cport_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) 				       cport_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) 		dev_err(&connection->hd->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) 			"%s: failed to create svc connection: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) 			connection->name, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) gb_connection_svc_connection_destroy(struct gb_connection *connection)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) 	if (gb_connection_is_static(connection))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) 	gb_svc_connection_destroy(connection->hd->svc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) 				  connection->hd->svc->ap_intf_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) 				  connection->hd_cport_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) 				  connection->intf->interface_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) 				  connection->intf_cport_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) /* Inform Interface about active CPorts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) static int gb_connection_control_connected(struct gb_connection *connection)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) 	struct gb_control *control;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) 	u16 cport_id = connection->intf_cport_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) 	if (gb_connection_is_static(connection))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) 	if (gb_connection_is_control(connection))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) 	control = connection->intf->control;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) 	ret = gb_control_connected_operation(control, cport_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) 		dev_err(&connection->bundle->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) 			"failed to connect cport: %d\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) gb_connection_control_disconnecting(struct gb_connection *connection)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) 	struct gb_control *control;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) 	u16 cport_id = connection->intf_cport_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) 	if (gb_connection_is_static(connection))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) 	control = connection->intf->control;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) 	ret = gb_control_disconnecting_operation(control, cport_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) 		dev_err(&connection->hd->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) 			"%s: failed to send disconnecting: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) 			connection->name, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) gb_connection_control_disconnected(struct gb_connection *connection)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) 	struct gb_control *control;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) 	u16 cport_id = connection->intf_cport_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) 	if (gb_connection_is_static(connection))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) 	control = connection->intf->control;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) 	if (gb_connection_is_control(connection)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) 		if (connection->mode_switch) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) 			ret = gb_control_mode_switch_operation(control);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) 			if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) 				/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) 				 * Allow mode switch to time out waiting for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) 				 * mailbox event.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) 				 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) 				return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) 	ret = gb_control_disconnected_operation(control, cport_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) 		dev_warn(&connection->bundle->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) 			 "failed to disconnect cport: %d\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) static int gb_connection_shutdown_operation(struct gb_connection *connection,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) 					    u8 phase)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) 	struct gb_cport_shutdown_request *req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) 	struct gb_operation *operation;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) 	operation = gb_operation_create_core(connection,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) 					     GB_REQUEST_TYPE_CPORT_SHUTDOWN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) 					     sizeof(*req), 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) 					     GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) 	if (!operation)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) 	req = operation->request->payload;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) 	req->phase = phase;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) 	ret = gb_operation_request_send_sync(operation);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) 	gb_operation_put(operation);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) static int gb_connection_cport_shutdown(struct gb_connection *connection,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) 					u8 phase)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) 	struct gb_host_device *hd = connection->hd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) 	const struct gb_hd_driver *drv = hd->driver;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) 	if (gb_connection_is_static(connection))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) 	if (gb_connection_is_offloaded(connection)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) 		if (!drv->cport_shutdown)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) 			return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) 		ret = drv->cport_shutdown(hd, connection->hd_cport_id, phase,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) 					  GB_OPERATION_TIMEOUT_DEFAULT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) 		ret = gb_connection_shutdown_operation(connection, phase);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) 		dev_err(&hd->dev, "%s: failed to send cport shutdown (phase %d): %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) 			connection->name, phase, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) gb_connection_cport_shutdown_phase_1(struct gb_connection *connection)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) 	return gb_connection_cport_shutdown(connection, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) gb_connection_cport_shutdown_phase_2(struct gb_connection *connection)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) 	return gb_connection_cport_shutdown(connection, 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596)  * Cancel all active operations on a connection.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598)  * Locking: Called with connection lock held and state set to DISABLED or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599)  * DISCONNECTING.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) static void gb_connection_cancel_operations(struct gb_connection *connection,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) 					    int errno)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) 	__must_hold(&connection->lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) 	struct gb_operation *operation;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) 	while (!list_empty(&connection->operations)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) 		operation = list_last_entry(&connection->operations,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) 					    struct gb_operation, links);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) 		gb_operation_get(operation);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) 		spin_unlock_irq(&connection->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) 		if (gb_operation_is_incoming(operation))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) 			gb_operation_cancel_incoming(operation, errno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) 			gb_operation_cancel(operation, errno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) 		gb_operation_put(operation);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) 		spin_lock_irq(&connection->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625)  * Cancel all active incoming operations on a connection.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627)  * Locking: Called with connection lock held and state set to ENABLED_TX.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) gb_connection_flush_incoming_operations(struct gb_connection *connection,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) 					int errno)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) 	__must_hold(&connection->lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) 	struct gb_operation *operation;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) 	bool incoming;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) 	while (!list_empty(&connection->operations)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) 		incoming = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) 		list_for_each_entry(operation, &connection->operations,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) 				    links) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) 			if (gb_operation_is_incoming(operation)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) 				gb_operation_get(operation);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) 				incoming = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) 		if (!incoming)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) 		spin_unlock_irq(&connection->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) 		/* FIXME: flush, not cancel? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) 		gb_operation_cancel_incoming(operation, errno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) 		gb_operation_put(operation);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) 		spin_lock_irq(&connection->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662)  * _gb_connection_enable() - enable a connection
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663)  * @connection:		connection to enable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664)  * @rx:			whether to enable incoming requests
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666)  * Connection-enable helper for DISABLED->ENABLED, DISABLED->ENABLED_TX, and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667)  * ENABLED_TX->ENABLED state transitions.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669)  * Locking: Caller holds connection->mutex.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) static int _gb_connection_enable(struct gb_connection *connection, bool rx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) 	/* Handle ENABLED_TX -> ENABLED transitions. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) 	if (connection->state == GB_CONNECTION_STATE_ENABLED_TX) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) 		if (!(connection->handler && rx))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) 			return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) 		spin_lock_irq(&connection->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) 		connection->state = GB_CONNECTION_STATE_ENABLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) 		spin_unlock_irq(&connection->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) 	ret = gb_connection_hd_cport_enable(connection);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) 	ret = gb_connection_svc_connection_create(connection);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) 		goto err_hd_cport_clear;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) 	ret = gb_connection_hd_cport_connected(connection);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) 		goto err_svc_connection_destroy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) 	spin_lock_irq(&connection->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) 	if (connection->handler && rx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) 		connection->state = GB_CONNECTION_STATE_ENABLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) 		connection->state = GB_CONNECTION_STATE_ENABLED_TX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) 	spin_unlock_irq(&connection->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) 	ret = gb_connection_control_connected(connection);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) 		goto err_control_disconnecting;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) err_control_disconnecting:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) 	spin_lock_irq(&connection->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) 	connection->state = GB_CONNECTION_STATE_DISCONNECTING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) 	gb_connection_cancel_operations(connection, -ESHUTDOWN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) 	spin_unlock_irq(&connection->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) 	/* Transmit queue should already be empty. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) 	gb_connection_hd_cport_flush(connection);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) 	gb_connection_control_disconnecting(connection);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) 	gb_connection_cport_shutdown_phase_1(connection);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) 	gb_connection_hd_cport_quiesce(connection);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) 	gb_connection_cport_shutdown_phase_2(connection);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) 	gb_connection_control_disconnected(connection);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) 	connection->state = GB_CONNECTION_STATE_DISABLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) err_svc_connection_destroy:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) 	gb_connection_svc_connection_destroy(connection);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) err_hd_cport_clear:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) 	gb_connection_hd_cport_clear(connection);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) 	gb_connection_hd_cport_disable(connection);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) int gb_connection_enable(struct gb_connection *connection)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) 	mutex_lock(&connection->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) 	if (connection->state == GB_CONNECTION_STATE_ENABLED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) 		goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) 	ret = _gb_connection_enable(connection, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) 	if (!ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) 		trace_gb_connection_enable(connection);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) out_unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) 	mutex_unlock(&connection->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) EXPORT_SYMBOL_GPL(gb_connection_enable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) int gb_connection_enable_tx(struct gb_connection *connection)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) 	mutex_lock(&connection->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) 	if (connection->state == GB_CONNECTION_STATE_ENABLED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) 		ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) 		goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) 	if (connection->state == GB_CONNECTION_STATE_ENABLED_TX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) 		goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) 	ret = _gb_connection_enable(connection, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) 	if (!ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) 		trace_gb_connection_enable(connection);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) out_unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) 	mutex_unlock(&connection->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) EXPORT_SYMBOL_GPL(gb_connection_enable_tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) void gb_connection_disable_rx(struct gb_connection *connection)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) 	mutex_lock(&connection->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) 	spin_lock_irq(&connection->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) 	if (connection->state != GB_CONNECTION_STATE_ENABLED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) 		spin_unlock_irq(&connection->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) 		goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) 	connection->state = GB_CONNECTION_STATE_ENABLED_TX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) 	gb_connection_flush_incoming_operations(connection, -ESHUTDOWN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) 	spin_unlock_irq(&connection->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) 	trace_gb_connection_disable(connection);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) out_unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) 	mutex_unlock(&connection->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) EXPORT_SYMBOL_GPL(gb_connection_disable_rx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) void gb_connection_mode_switch_prepare(struct gb_connection *connection)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) 	connection->mode_switch = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) void gb_connection_mode_switch_complete(struct gb_connection *connection)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) 	gb_connection_svc_connection_destroy(connection);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) 	gb_connection_hd_cport_clear(connection);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) 	gb_connection_hd_cport_disable(connection);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) 	connection->mode_switch = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) void gb_connection_disable(struct gb_connection *connection)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) 	mutex_lock(&connection->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) 	if (connection->state == GB_CONNECTION_STATE_DISABLED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) 		goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) 	trace_gb_connection_disable(connection);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) 	spin_lock_irq(&connection->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) 	connection->state = GB_CONNECTION_STATE_DISCONNECTING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) 	gb_connection_cancel_operations(connection, -ESHUTDOWN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) 	spin_unlock_irq(&connection->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) 	gb_connection_hd_cport_flush(connection);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) 	gb_connection_control_disconnecting(connection);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) 	gb_connection_cport_shutdown_phase_1(connection);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) 	gb_connection_hd_cport_quiesce(connection);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) 	gb_connection_cport_shutdown_phase_2(connection);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) 	gb_connection_control_disconnected(connection);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) 	connection->state = GB_CONNECTION_STATE_DISABLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) 	/* control-connection tear down is deferred when mode switching */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) 	if (!connection->mode_switch) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) 		gb_connection_svc_connection_destroy(connection);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) 		gb_connection_hd_cport_clear(connection);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) 		gb_connection_hd_cport_disable(connection);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) out_unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) 	mutex_unlock(&connection->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) EXPORT_SYMBOL_GPL(gb_connection_disable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) /* Disable a connection without communicating with the remote end. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) void gb_connection_disable_forced(struct gb_connection *connection)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) 	mutex_lock(&connection->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) 	if (connection->state == GB_CONNECTION_STATE_DISABLED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) 		goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) 	trace_gb_connection_disable(connection);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) 	spin_lock_irq(&connection->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) 	connection->state = GB_CONNECTION_STATE_DISABLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) 	gb_connection_cancel_operations(connection, -ESHUTDOWN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) 	spin_unlock_irq(&connection->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) 	gb_connection_hd_cport_flush(connection);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) 	gb_connection_svc_connection_destroy(connection);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) 	gb_connection_hd_cport_clear(connection);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) 	gb_connection_hd_cport_disable(connection);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) out_unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) 	mutex_unlock(&connection->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) EXPORT_SYMBOL_GPL(gb_connection_disable_forced);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) /* Caller must have disabled the connection before destroying it. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) void gb_connection_destroy(struct gb_connection *connection)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) 	if (!connection)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) 	if (WARN_ON(connection->state != GB_CONNECTION_STATE_DISABLED))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) 		gb_connection_disable(connection);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) 	mutex_lock(&gb_connection_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) 	spin_lock_irq(&gb_connections_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) 	list_del(&connection->bundle_links);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) 	list_del(&connection->hd_links);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) 	spin_unlock_irq(&gb_connections_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) 	destroy_workqueue(connection->wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) 	gb_hd_cport_release(connection->hd, connection->hd_cport_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) 	connection->hd_cport_id = CPORT_ID_BAD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) 	mutex_unlock(&gb_connection_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) 	gb_connection_put(connection);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) EXPORT_SYMBOL_GPL(gb_connection_destroy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) void gb_connection_latency_tag_enable(struct gb_connection *connection)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) 	struct gb_host_device *hd = connection->hd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) 	if (!hd->driver->latency_tag_enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) 	ret = hd->driver->latency_tag_enable(hd, connection->hd_cport_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) 		dev_err(&connection->hd->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) 			"%s: failed to enable latency tag: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) 			connection->name, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) EXPORT_SYMBOL_GPL(gb_connection_latency_tag_enable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) void gb_connection_latency_tag_disable(struct gb_connection *connection)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) 	struct gb_host_device *hd = connection->hd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) 	if (!hd->driver->latency_tag_disable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) 	ret = hd->driver->latency_tag_disable(hd, connection->hd_cport_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) 		dev_err(&connection->hd->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) 			"%s: failed to disable latency tag: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) 			connection->name, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) EXPORT_SYMBOL_GPL(gb_connection_latency_tag_disable);