^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-or-later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Bluetooth HCI Three-wire UART driver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Copyright (C) 2012 Intel Corporation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/acpi.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/errno.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/gpio/consumer.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/mod_devicetable.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/of_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/serdev.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/skbuff.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <net/bluetooth/bluetooth.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <net/bluetooth/hci_core.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include "btrtl.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include "hci_uart.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #define HCI_3WIRE_ACK_PKT 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #define HCI_3WIRE_LINK_PKT 15
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) /* Sliding window size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #define H5_TX_WIN_MAX 4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #define H5_ACK_TIMEOUT msecs_to_jiffies(250)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #define H5_SYNC_TIMEOUT msecs_to_jiffies(100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) * Maximum Three-wire packet:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) * 4 byte header + max value for 12-bit length + 2 bytes for CRC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #define H5_MAX_LEN (4 + 0xfff + 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) /* Convenience macros for reading Three-wire header values */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) #define H5_HDR_SEQ(hdr) ((hdr)[0] & 0x07)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) #define H5_HDR_ACK(hdr) (((hdr)[0] >> 3) & 0x07)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) #define H5_HDR_CRC(hdr) (((hdr)[0] >> 6) & 0x01)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) #define H5_HDR_RELIABLE(hdr) (((hdr)[0] >> 7) & 0x01)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) #define H5_HDR_PKT_TYPE(hdr) ((hdr)[1] & 0x0f)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) #define H5_HDR_LEN(hdr) ((((hdr)[1] >> 4) & 0x0f) + ((hdr)[2] << 4))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) #define SLIP_DELIMITER 0xc0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) #define SLIP_ESC 0xdb
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) #define SLIP_ESC_DELIM 0xdc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) #define SLIP_ESC_ESC 0xdd
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) /* H5 state flags */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) enum {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) H5_RX_ESC, /* SLIP escape mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) H5_TX_ACK_REQ, /* Pending ack to send */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) struct h5 {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) /* Must be the first member, hci_serdev.c expects this. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) struct hci_uart serdev_hu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) struct sk_buff_head unack; /* Unack'ed packets queue */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) struct sk_buff_head rel; /* Reliable packets queue */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) struct sk_buff_head unrel; /* Unreliable packets queue */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) struct sk_buff *rx_skb; /* Receive buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) size_t rx_pending; /* Expecting more bytes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) u8 rx_ack; /* Last ack number received */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) int (*rx_func)(struct hci_uart *hu, u8 c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) struct timer_list timer; /* Retransmission timer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) struct hci_uart *hu; /* Parent HCI UART */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) u8 tx_seq; /* Next seq number to send */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) u8 tx_ack; /* Next ack number to send */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) u8 tx_win; /* Sliding window size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) enum {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) H5_UNINITIALIZED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) H5_INITIALIZED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) H5_ACTIVE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) } state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) enum {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) H5_AWAKE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) H5_SLEEPING,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) H5_WAKING_UP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) } sleep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) const struct h5_vnd *vnd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) const char *id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) struct gpio_desc *enable_gpio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) struct gpio_desc *device_wake_gpio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) struct h5_vnd {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) int (*setup)(struct h5 *h5);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) void (*open)(struct h5 *h5);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) void (*close)(struct h5 *h5);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) int (*suspend)(struct h5 *h5);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) int (*resume)(struct h5 *h5);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) const struct acpi_gpio_mapping *acpi_gpio_map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) static void h5_reset_rx(struct h5 *h5);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) static void h5_link_control(struct hci_uart *hu, const void *data, size_t len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) struct h5 *h5 = hu->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) struct sk_buff *nskb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) nskb = alloc_skb(3, GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) if (!nskb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) hci_skb_pkt_type(nskb) = HCI_3WIRE_LINK_PKT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) skb_put_data(nskb, data, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) skb_queue_tail(&h5->unrel, nskb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) static u8 h5_cfg_field(struct h5 *h5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) /* Sliding window size (first 3 bits) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) return h5->tx_win & 0x07;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) static void h5_timed_event(struct timer_list *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) const unsigned char sync_req[] = { 0x01, 0x7e };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) unsigned char conf_req[3] = { 0x03, 0xfc };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) struct h5 *h5 = from_timer(h5, t, timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) struct hci_uart *hu = h5->hu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) BT_DBG("%s", hu->hdev->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) if (h5->state == H5_UNINITIALIZED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) h5_link_control(hu, sync_req, sizeof(sync_req));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) if (h5->state == H5_INITIALIZED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) conf_req[2] = h5_cfg_field(h5);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) h5_link_control(hu, conf_req, sizeof(conf_req));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) if (h5->state != H5_ACTIVE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) mod_timer(&h5->timer, jiffies + H5_SYNC_TIMEOUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) goto wakeup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) if (h5->sleep != H5_AWAKE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) h5->sleep = H5_SLEEPING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) goto wakeup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) BT_DBG("hu %p retransmitting %u pkts", hu, h5->unack.qlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) spin_lock_irqsave_nested(&h5->unack.lock, flags, SINGLE_DEPTH_NESTING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) while ((skb = __skb_dequeue_tail(&h5->unack)) != NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) h5->tx_seq = (h5->tx_seq - 1) & 0x07;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) skb_queue_head(&h5->rel, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) spin_unlock_irqrestore(&h5->unack.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) wakeup:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) hci_uart_tx_wakeup(hu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) static void h5_peer_reset(struct hci_uart *hu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) struct h5 *h5 = hu->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) bt_dev_err(hu->hdev, "Peer device has reset");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) h5->state = H5_UNINITIALIZED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) del_timer(&h5->timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) skb_queue_purge(&h5->rel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) skb_queue_purge(&h5->unrel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) skb_queue_purge(&h5->unack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) h5->tx_seq = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) h5->tx_ack = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) /* Send reset request to upper stack */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) hci_reset_dev(hu->hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) static int h5_open(struct hci_uart *hu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) struct h5 *h5;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) const unsigned char sync[] = { 0x01, 0x7e };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) BT_DBG("hu %p", hu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) if (hu->serdev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) h5 = serdev_device_get_drvdata(hu->serdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) h5 = kzalloc(sizeof(*h5), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) if (!h5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) hu->priv = h5;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) h5->hu = hu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) skb_queue_head_init(&h5->unack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) skb_queue_head_init(&h5->rel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) skb_queue_head_init(&h5->unrel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) h5_reset_rx(h5);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) timer_setup(&h5->timer, h5_timed_event, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) h5->tx_win = H5_TX_WIN_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) if (h5->vnd && h5->vnd->open)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) h5->vnd->open(h5);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) set_bit(HCI_UART_INIT_PENDING, &hu->hdev_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) /* Send initial sync request */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) h5_link_control(hu, sync, sizeof(sync));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) mod_timer(&h5->timer, jiffies + H5_SYNC_TIMEOUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) static int h5_close(struct hci_uart *hu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) struct h5 *h5 = hu->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) del_timer_sync(&h5->timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) skb_queue_purge(&h5->unack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) skb_queue_purge(&h5->rel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) skb_queue_purge(&h5->unrel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) kfree_skb(h5->rx_skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) h5->rx_skb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) if (h5->vnd && h5->vnd->close)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) h5->vnd->close(h5);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) if (!hu->serdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) kfree(h5);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) static int h5_setup(struct hci_uart *hu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) struct h5 *h5 = hu->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) if (h5->vnd && h5->vnd->setup)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) return h5->vnd->setup(h5);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) static void h5_pkt_cull(struct h5 *h5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) struct sk_buff *skb, *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) int i, to_remove;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) u8 seq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) spin_lock_irqsave(&h5->unack.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) to_remove = skb_queue_len(&h5->unack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) if (to_remove == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) seq = h5->tx_seq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) while (to_remove > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) if (h5->rx_ack == seq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) to_remove--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) seq = (seq - 1) & 0x07;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) if (seq != h5->rx_ack)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) BT_ERR("Controller acked invalid packet");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) i = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) skb_queue_walk_safe(&h5->unack, skb, tmp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) if (i++ >= to_remove)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) __skb_unlink(skb, &h5->unack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) if (skb_queue_empty(&h5->unack))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) del_timer(&h5->timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) spin_unlock_irqrestore(&h5->unack.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) static void h5_handle_internal_rx(struct hci_uart *hu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) struct h5 *h5 = hu->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) const unsigned char sync_req[] = { 0x01, 0x7e };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) const unsigned char sync_rsp[] = { 0x02, 0x7d };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) unsigned char conf_req[3] = { 0x03, 0xfc };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) const unsigned char conf_rsp[] = { 0x04, 0x7b };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) const unsigned char wakeup_req[] = { 0x05, 0xfa };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) const unsigned char woken_req[] = { 0x06, 0xf9 };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) const unsigned char sleep_req[] = { 0x07, 0x78 };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) const unsigned char *hdr = h5->rx_skb->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) const unsigned char *data = &h5->rx_skb->data[4];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) BT_DBG("%s", hu->hdev->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) if (H5_HDR_PKT_TYPE(hdr) != HCI_3WIRE_LINK_PKT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) if (H5_HDR_LEN(hdr) < 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) conf_req[2] = h5_cfg_field(h5);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) if (memcmp(data, sync_req, 2) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) if (h5->state == H5_ACTIVE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) h5_peer_reset(hu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) h5_link_control(hu, sync_rsp, 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) } else if (memcmp(data, sync_rsp, 2) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) if (h5->state == H5_ACTIVE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) h5_peer_reset(hu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) h5->state = H5_INITIALIZED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) h5_link_control(hu, conf_req, 3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) } else if (memcmp(data, conf_req, 2) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) h5_link_control(hu, conf_rsp, 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) h5_link_control(hu, conf_req, 3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) } else if (memcmp(data, conf_rsp, 2) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) if (H5_HDR_LEN(hdr) > 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) h5->tx_win = (data[2] & 0x07);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) BT_DBG("Three-wire init complete. tx_win %u", h5->tx_win);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) h5->state = H5_ACTIVE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) hci_uart_init_ready(hu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) } else if (memcmp(data, sleep_req, 2) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) BT_DBG("Peer went to sleep");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) h5->sleep = H5_SLEEPING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) } else if (memcmp(data, woken_req, 2) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) BT_DBG("Peer woke up");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) h5->sleep = H5_AWAKE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) } else if (memcmp(data, wakeup_req, 2) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) BT_DBG("Peer requested wakeup");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) h5_link_control(hu, woken_req, 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) h5->sleep = H5_AWAKE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) BT_DBG("Link Control: 0x%02hhx 0x%02hhx", data[0], data[1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) hci_uart_tx_wakeup(hu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) static void h5_complete_rx_pkt(struct hci_uart *hu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) struct h5 *h5 = hu->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) const unsigned char *hdr = h5->rx_skb->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) if (H5_HDR_RELIABLE(hdr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) h5->tx_ack = (h5->tx_ack + 1) % 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) set_bit(H5_TX_ACK_REQ, &h5->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) hci_uart_tx_wakeup(hu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) h5->rx_ack = H5_HDR_ACK(hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) h5_pkt_cull(h5);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) switch (H5_HDR_PKT_TYPE(hdr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) case HCI_EVENT_PKT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) case HCI_ACLDATA_PKT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) case HCI_SCODATA_PKT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) case HCI_ISODATA_PKT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) hci_skb_pkt_type(h5->rx_skb) = H5_HDR_PKT_TYPE(hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) /* Remove Three-wire header */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) skb_pull(h5->rx_skb, 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) hci_recv_frame(hu->hdev, h5->rx_skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) h5->rx_skb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) h5_handle_internal_rx(hu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) h5_reset_rx(h5);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) static int h5_rx_crc(struct hci_uart *hu, unsigned char c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) h5_complete_rx_pkt(hu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) static int h5_rx_payload(struct hci_uart *hu, unsigned char c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) struct h5 *h5 = hu->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) const unsigned char *hdr = h5->rx_skb->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) if (H5_HDR_CRC(hdr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) h5->rx_func = h5_rx_crc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) h5->rx_pending = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) h5_complete_rx_pkt(hu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) static int h5_rx_3wire_hdr(struct hci_uart *hu, unsigned char c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) struct h5 *h5 = hu->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) const unsigned char *hdr = h5->rx_skb->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) BT_DBG("%s rx: seq %u ack %u crc %u rel %u type %u len %u",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) hu->hdev->name, H5_HDR_SEQ(hdr), H5_HDR_ACK(hdr),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) H5_HDR_CRC(hdr), H5_HDR_RELIABLE(hdr), H5_HDR_PKT_TYPE(hdr),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) H5_HDR_LEN(hdr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) if (((hdr[0] + hdr[1] + hdr[2] + hdr[3]) & 0xff) != 0xff) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) bt_dev_err(hu->hdev, "Invalid header checksum");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) h5_reset_rx(h5);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) if (H5_HDR_RELIABLE(hdr) && H5_HDR_SEQ(hdr) != h5->tx_ack) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) bt_dev_err(hu->hdev, "Out-of-order packet arrived (%u != %u)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) H5_HDR_SEQ(hdr), h5->tx_ack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) h5_reset_rx(h5);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) if (h5->state != H5_ACTIVE &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) H5_HDR_PKT_TYPE(hdr) != HCI_3WIRE_LINK_PKT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) bt_dev_err(hu->hdev, "Non-link packet received in non-active state");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) h5_reset_rx(h5);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) h5->rx_func = h5_rx_payload;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) h5->rx_pending = H5_HDR_LEN(hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) static int h5_rx_pkt_start(struct hci_uart *hu, unsigned char c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) struct h5 *h5 = hu->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) if (c == SLIP_DELIMITER)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) h5->rx_func = h5_rx_3wire_hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) h5->rx_pending = 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) h5->rx_skb = bt_skb_alloc(H5_MAX_LEN, GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) if (!h5->rx_skb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) bt_dev_err(hu->hdev, "Can't allocate mem for new packet");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) h5_reset_rx(h5);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) h5->rx_skb->dev = (void *)hu->hdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) static int h5_rx_delimiter(struct hci_uart *hu, unsigned char c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) struct h5 *h5 = hu->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) if (c == SLIP_DELIMITER)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) h5->rx_func = h5_rx_pkt_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) static void h5_unslip_one_byte(struct h5 *h5, unsigned char c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) const u8 delim = SLIP_DELIMITER, esc = SLIP_ESC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) const u8 *byte = &c;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) if (!test_bit(H5_RX_ESC, &h5->flags) && c == SLIP_ESC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) set_bit(H5_RX_ESC, &h5->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) if (test_and_clear_bit(H5_RX_ESC, &h5->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) switch (c) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) case SLIP_ESC_DELIM:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) byte = &delim;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) case SLIP_ESC_ESC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) byte = &esc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) BT_ERR("Invalid esc byte 0x%02hhx", c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) h5_reset_rx(h5);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) skb_put_data(h5->rx_skb, byte, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) h5->rx_pending--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) BT_DBG("unslipped 0x%02hhx, rx_pending %zu", *byte, h5->rx_pending);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) static void h5_reset_rx(struct h5 *h5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) if (h5->rx_skb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) kfree_skb(h5->rx_skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) h5->rx_skb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) h5->rx_func = h5_rx_delimiter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) h5->rx_pending = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) clear_bit(H5_RX_ESC, &h5->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) static int h5_recv(struct hci_uart *hu, const void *data, int count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) struct h5 *h5 = hu->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) const unsigned char *ptr = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) BT_DBG("%s pending %zu count %d", hu->hdev->name, h5->rx_pending,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) while (count > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) int processed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) if (h5->rx_pending > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) if (*ptr == SLIP_DELIMITER) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) bt_dev_err(hu->hdev, "Too short H5 packet");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) h5_reset_rx(h5);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) h5_unslip_one_byte(h5, *ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) ptr++; count--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) processed = h5->rx_func(hu, *ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) if (processed < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) return processed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) ptr += processed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) count -= processed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) static int h5_enqueue(struct hci_uart *hu, struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) struct h5 *h5 = hu->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) if (skb->len > 0xfff) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) bt_dev_err(hu->hdev, "Packet too long (%u bytes)", skb->len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) if (h5->state != H5_ACTIVE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) bt_dev_err(hu->hdev, "Ignoring HCI data in non-active state");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) switch (hci_skb_pkt_type(skb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) case HCI_ACLDATA_PKT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) case HCI_COMMAND_PKT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) skb_queue_tail(&h5->rel, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) case HCI_SCODATA_PKT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) case HCI_ISODATA_PKT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) skb_queue_tail(&h5->unrel, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) bt_dev_err(hu->hdev, "Unknown packet type %u", hci_skb_pkt_type(skb));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) static void h5_slip_delim(struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) const char delim = SLIP_DELIMITER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) skb_put_data(skb, &delim, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) static void h5_slip_one_byte(struct sk_buff *skb, u8 c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) const char esc_delim[2] = { SLIP_ESC, SLIP_ESC_DELIM };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) const char esc_esc[2] = { SLIP_ESC, SLIP_ESC_ESC };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) switch (c) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) case SLIP_DELIMITER:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) skb_put_data(skb, &esc_delim, 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) case SLIP_ESC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) skb_put_data(skb, &esc_esc, 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) skb_put_data(skb, &c, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) static bool valid_packet_type(u8 type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) switch (type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) case HCI_ACLDATA_PKT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) case HCI_COMMAND_PKT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) case HCI_SCODATA_PKT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) case HCI_ISODATA_PKT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) case HCI_3WIRE_LINK_PKT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) case HCI_3WIRE_ACK_PKT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) static struct sk_buff *h5_prepare_pkt(struct hci_uart *hu, u8 pkt_type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) const u8 *data, size_t len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) struct h5 *h5 = hu->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) struct sk_buff *nskb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) u8 hdr[4];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) if (!valid_packet_type(pkt_type)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) bt_dev_err(hu->hdev, "Unknown packet type %u", pkt_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) * Max len of packet: (original len + 4 (H5 hdr) + 2 (crc)) * 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) * (because bytes 0xc0 and 0xdb are escaped, worst case is when
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) * the packet is all made of 0xc0 and 0xdb) + 2 (0xc0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) * delimiters at start and end).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) nskb = alloc_skb((len + 6) * 2 + 2, GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) if (!nskb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) hci_skb_pkt_type(nskb) = pkt_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) h5_slip_delim(nskb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) hdr[0] = h5->tx_ack << 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) clear_bit(H5_TX_ACK_REQ, &h5->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) /* Reliable packet? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) if (pkt_type == HCI_ACLDATA_PKT || pkt_type == HCI_COMMAND_PKT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) hdr[0] |= 1 << 7;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) hdr[0] |= h5->tx_seq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) h5->tx_seq = (h5->tx_seq + 1) % 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) hdr[1] = pkt_type | ((len & 0x0f) << 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) hdr[2] = len >> 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) hdr[3] = ~((hdr[0] + hdr[1] + hdr[2]) & 0xff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) BT_DBG("%s tx: seq %u ack %u crc %u rel %u type %u len %u",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) hu->hdev->name, H5_HDR_SEQ(hdr), H5_HDR_ACK(hdr),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) H5_HDR_CRC(hdr), H5_HDR_RELIABLE(hdr), H5_HDR_PKT_TYPE(hdr),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) H5_HDR_LEN(hdr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) for (i = 0; i < 4; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) h5_slip_one_byte(nskb, hdr[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) for (i = 0; i < len; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) h5_slip_one_byte(nskb, data[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) h5_slip_delim(nskb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) return nskb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) static struct sk_buff *h5_dequeue(struct hci_uart *hu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) struct h5 *h5 = hu->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) struct sk_buff *skb, *nskb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) if (h5->sleep != H5_AWAKE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) const unsigned char wakeup_req[] = { 0x05, 0xfa };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) if (h5->sleep == H5_WAKING_UP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) h5->sleep = H5_WAKING_UP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) BT_DBG("Sending wakeup request");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) mod_timer(&h5->timer, jiffies + HZ / 100);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) return h5_prepare_pkt(hu, HCI_3WIRE_LINK_PKT, wakeup_req, 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) skb = skb_dequeue(&h5->unrel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) if (skb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) nskb = h5_prepare_pkt(hu, hci_skb_pkt_type(skb),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) skb->data, skb->len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) if (nskb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) return nskb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) skb_queue_head(&h5->unrel, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) bt_dev_err(hu->hdev, "Could not dequeue pkt because alloc_skb failed");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) spin_lock_irqsave_nested(&h5->unack.lock, flags, SINGLE_DEPTH_NESTING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) if (h5->unack.qlen >= h5->tx_win)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) skb = skb_dequeue(&h5->rel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) if (skb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) nskb = h5_prepare_pkt(hu, hci_skb_pkt_type(skb),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) skb->data, skb->len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) if (nskb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) __skb_queue_tail(&h5->unack, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) mod_timer(&h5->timer, jiffies + H5_ACK_TIMEOUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) spin_unlock_irqrestore(&h5->unack.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) return nskb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) skb_queue_head(&h5->rel, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) bt_dev_err(hu->hdev, "Could not dequeue pkt because alloc_skb failed");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) spin_unlock_irqrestore(&h5->unack.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) if (test_bit(H5_TX_ACK_REQ, &h5->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) return h5_prepare_pkt(hu, HCI_3WIRE_ACK_PKT, NULL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) static int h5_flush(struct hci_uart *hu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) BT_DBG("hu %p", hu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) static const struct hci_uart_proto h5p = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) .id = HCI_UART_3WIRE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) .name = "Three-wire (H5)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) .open = h5_open,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) .close = h5_close,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) .setup = h5_setup,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) .recv = h5_recv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) .enqueue = h5_enqueue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) .dequeue = h5_dequeue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) .flush = h5_flush,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) static int h5_serdev_probe(struct serdev_device *serdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) struct device *dev = &serdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) struct h5 *h5;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) h5 = devm_kzalloc(dev, sizeof(*h5), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) if (!h5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) h5->hu = &h5->serdev_hu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) h5->serdev_hu.serdev = serdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) serdev_device_set_drvdata(serdev, h5);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) if (has_acpi_companion(dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) const struct acpi_device_id *match;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) match = acpi_match_device(dev->driver->acpi_match_table, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) if (!match)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) h5->vnd = (const struct h5_vnd *)match->driver_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) h5->id = (char *)match->id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) if (h5->vnd->acpi_gpio_map)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) devm_acpi_dev_add_driver_gpios(dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) h5->vnd->acpi_gpio_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) const void *data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) data = of_device_get_match_data(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) if (!data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) h5->vnd = (const struct h5_vnd *)data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) h5->enable_gpio = devm_gpiod_get_optional(dev, "enable", GPIOD_OUT_LOW);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) if (IS_ERR(h5->enable_gpio))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) return PTR_ERR(h5->enable_gpio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) h5->device_wake_gpio = devm_gpiod_get_optional(dev, "device-wake",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) GPIOD_OUT_LOW);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) if (IS_ERR(h5->device_wake_gpio))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) return PTR_ERR(h5->device_wake_gpio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) return hci_uart_register_device(&h5->serdev_hu, &h5p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) static void h5_serdev_remove(struct serdev_device *serdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) struct h5 *h5 = serdev_device_get_drvdata(serdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) hci_uart_unregister_device(&h5->serdev_hu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) static int __maybe_unused h5_serdev_suspend(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) struct h5 *h5 = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) if (h5->vnd && h5->vnd->suspend)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) ret = h5->vnd->suspend(h5);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) static int __maybe_unused h5_serdev_resume(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) struct h5 *h5 = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) if (h5->vnd && h5->vnd->resume)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) ret = h5->vnd->resume(h5);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) #ifdef CONFIG_BT_HCIUART_RTL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) static int h5_btrtl_setup(struct h5 *h5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) struct btrtl_device_info *btrtl_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) __le32 baudrate_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) u32 device_baudrate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) unsigned int controller_baudrate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) bool flow_control;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) btrtl_dev = btrtl_initialize(h5->hu->hdev, h5->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) if (IS_ERR(btrtl_dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) return PTR_ERR(btrtl_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) err = btrtl_get_uart_settings(h5->hu->hdev, btrtl_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) &controller_baudrate, &device_baudrate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) &flow_control);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) goto out_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) baudrate_data = cpu_to_le32(device_baudrate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) skb = __hci_cmd_sync(h5->hu->hdev, 0xfc17, sizeof(baudrate_data),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) &baudrate_data, HCI_INIT_TIMEOUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) if (IS_ERR(skb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) rtl_dev_err(h5->hu->hdev, "set baud rate command failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) err = PTR_ERR(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) goto out_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) /* Give the device some time to set up the new baudrate. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) usleep_range(10000, 20000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) serdev_device_set_baudrate(h5->hu->serdev, controller_baudrate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) serdev_device_set_flow_control(h5->hu->serdev, flow_control);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) err = btrtl_download_firmware(h5->hu->hdev, btrtl_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) /* Give the device some time before the hci-core sends it a reset */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) usleep_range(10000, 20000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) /* Enable controller to do both LE scan and BR/EDR inquiry
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) * simultaneously.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) set_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &h5->hu->hdev->quirks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) out_free:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) btrtl_free(btrtl_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) static void h5_btrtl_open(struct h5 *h5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) /* Devices always start with these fixed parameters */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) serdev_device_set_flow_control(h5->hu->serdev, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) serdev_device_set_parity(h5->hu->serdev, SERDEV_PARITY_EVEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) serdev_device_set_baudrate(h5->hu->serdev, 115200);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) /* The controller needs up to 500ms to wakeup */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) gpiod_set_value_cansleep(h5->enable_gpio, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) gpiod_set_value_cansleep(h5->device_wake_gpio, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) msleep(500);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) static void h5_btrtl_close(struct h5 *h5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) gpiod_set_value_cansleep(h5->device_wake_gpio, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) gpiod_set_value_cansleep(h5->enable_gpio, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) /* Suspend/resume support. On many devices the RTL BT device loses power during
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) * suspend/resume, causing it to lose its firmware and all state. So we simply
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) * turn it off on suspend and reprobe on resume. This mirrors how RTL devices
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) * are handled in the USB driver, where the USB_QUIRK_RESET_RESUME is used which
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) * also causes a reprobe on resume.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) static int h5_btrtl_suspend(struct h5 *h5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) serdev_device_set_flow_control(h5->hu->serdev, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) gpiod_set_value_cansleep(h5->device_wake_gpio, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) gpiod_set_value_cansleep(h5->enable_gpio, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) struct h5_btrtl_reprobe {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) struct device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) struct work_struct work;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) static void h5_btrtl_reprobe_worker(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) struct h5_btrtl_reprobe *reprobe =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) container_of(work, struct h5_btrtl_reprobe, work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) ret = device_reprobe(reprobe->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) if (ret && ret != -EPROBE_DEFER)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) dev_err(reprobe->dev, "Reprobe error %d\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) put_device(reprobe->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) kfree(reprobe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) module_put(THIS_MODULE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) static int h5_btrtl_resume(struct h5 *h5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) struct h5_btrtl_reprobe *reprobe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) reprobe = kzalloc(sizeof(*reprobe), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) if (!reprobe)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) __module_get(THIS_MODULE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) INIT_WORK(&reprobe->work, h5_btrtl_reprobe_worker);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) reprobe->dev = get_device(&h5->hu->serdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) queue_work(system_long_wq, &reprobe->work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) static const struct acpi_gpio_params btrtl_device_wake_gpios = { 0, 0, false };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) static const struct acpi_gpio_params btrtl_enable_gpios = { 1, 0, false };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) static const struct acpi_gpio_params btrtl_host_wake_gpios = { 2, 0, false };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) static const struct acpi_gpio_mapping acpi_btrtl_gpios[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) { "device-wake-gpios", &btrtl_device_wake_gpios, 1 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) { "enable-gpios", &btrtl_enable_gpios, 1 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) { "host-wake-gpios", &btrtl_host_wake_gpios, 1 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) {},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) static struct h5_vnd rtl_vnd = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) .setup = h5_btrtl_setup,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) .open = h5_btrtl_open,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) .close = h5_btrtl_close,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) .suspend = h5_btrtl_suspend,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) .resume = h5_btrtl_resume,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) .acpi_gpio_map = acpi_btrtl_gpios,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) #ifdef CONFIG_ACPI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) static const struct acpi_device_id h5_acpi_match[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) #ifdef CONFIG_BT_HCIUART_RTL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) { "OBDA8723", (kernel_ulong_t)&rtl_vnd },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) { },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) MODULE_DEVICE_TABLE(acpi, h5_acpi_match);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) static const struct dev_pm_ops h5_serdev_pm_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) SET_SYSTEM_SLEEP_PM_OPS(h5_serdev_suspend, h5_serdev_resume)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) static const struct of_device_id rtl_bluetooth_of_match[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) #ifdef CONFIG_BT_HCIUART_RTL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) { .compatible = "realtek,rtl8822cs-bt",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) .data = (const void *)&rtl_vnd },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) { .compatible = "realtek,rtl8723bs-bt",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) .data = (const void *)&rtl_vnd },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) { },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) MODULE_DEVICE_TABLE(of, rtl_bluetooth_of_match);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) static struct serdev_device_driver h5_serdev_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) .probe = h5_serdev_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) .remove = h5_serdev_remove,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) .driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) .name = "hci_uart_h5",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) .acpi_match_table = ACPI_PTR(h5_acpi_match),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) .pm = &h5_serdev_pm_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) .of_match_table = rtl_bluetooth_of_match,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) int __init h5_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) serdev_device_driver_register(&h5_serdev_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) return hci_uart_register_proto(&h5p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) int __exit h5_deinit(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) serdev_device_driver_unregister(&h5_serdev_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) return hci_uart_unregister_proto(&h5p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) }