Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  *  Texas Instruments' Bluetooth HCILL UART protocol
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5)  *  HCILL (HCI Low Level) is a Texas Instruments' power management
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6)  *  protocol extension to H4.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8)  *  Copyright (C) 2007 Texas Instruments, Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10)  *  Written by Ohad Ben-Cohen <ohad@bencohen.org>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12)  *  Acknowledgements:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13)  *  This file is based on hci_h4.c, which was written
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14)  *  by Maxim Krasnyansky and Marcel Holtmann.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) #include <linux/sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) #include <linux/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) #include <linux/fcntl.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) #include <linux/firmware.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) #include <linux/ptrace.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) #include <linux/poll.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) #include <linux/errno.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) #include <linux/string.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) #include <linux/signal.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) #include <linux/ioctl.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) #include <linux/of.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) #include <linux/serdev.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) #include <linux/skbuff.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) #include <linux/ti_wilink_st.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) #include <linux/clk.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) #include <net/bluetooth/bluetooth.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) #include <net/bluetooth/hci_core.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) #include <linux/gpio/consumer.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) #include <linux/nvmem-consumer.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) #include "hci_uart.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) /* Vendor-specific HCI commands */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) #define HCI_VS_WRITE_BD_ADDR			0xfc06
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) #define HCI_VS_UPDATE_UART_HCI_BAUDRATE		0xff36
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) /* HCILL commands */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) #define HCILL_GO_TO_SLEEP_IND	0x30
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) #define HCILL_GO_TO_SLEEP_ACK	0x31
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) #define HCILL_WAKE_UP_IND	0x32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) #define HCILL_WAKE_UP_ACK	0x33
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) /* HCILL states */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) enum hcill_states_e {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) 	HCILL_ASLEEP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) 	HCILL_ASLEEP_TO_AWAKE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) 	HCILL_AWAKE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) 	HCILL_AWAKE_TO_ASLEEP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) struct ll_device {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) 	struct hci_uart hu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) 	struct serdev_device *serdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) 	struct gpio_desc *enable_gpio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) 	struct clk *ext_clk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) 	bdaddr_t bdaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) struct ll_struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) 	struct sk_buff *rx_skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) 	struct sk_buff_head txq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) 	spinlock_t hcill_lock;		/* HCILL state lock	*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) 	unsigned long hcill_state;	/* HCILL power state	*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) 	struct sk_buff_head tx_wait_q;	/* HCILL wait queue	*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82)  * Builds and sends an HCILL command packet.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83)  * These are very simple packets with only 1 cmd byte
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) static int send_hcill_cmd(u8 cmd, struct hci_uart *hu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) 	int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) 	struct sk_buff *skb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) 	struct ll_struct *ll = hu->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) 	BT_DBG("hu %p cmd 0x%x", hu, cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) 	/* allocate packet */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) 	skb = bt_skb_alloc(1, GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) 	if (!skb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) 		BT_ERR("cannot allocate memory for HCILL packet");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) 		err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) 	/* prepare packet */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) 	skb_put_u8(skb, cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) 	/* send packet */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) 	skb_queue_tail(&ll->txq, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) /* Initialize protocol */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) static int ll_open(struct hci_uart *hu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) 	struct ll_struct *ll;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) 	BT_DBG("hu %p", hu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 	ll = kzalloc(sizeof(*ll), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) 	if (!ll)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) 	skb_queue_head_init(&ll->txq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 	skb_queue_head_init(&ll->tx_wait_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) 	spin_lock_init(&ll->hcill_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) 	ll->hcill_state = HCILL_AWAKE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) 	hu->priv = ll;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) 	if (hu->serdev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) 		struct ll_device *lldev = serdev_device_get_drvdata(hu->serdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) 		if (!IS_ERR(lldev->ext_clk))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) 			clk_prepare_enable(lldev->ext_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) /* Flush protocol data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) static int ll_flush(struct hci_uart *hu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) 	struct ll_struct *ll = hu->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) 	BT_DBG("hu %p", hu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) 	skb_queue_purge(&ll->tx_wait_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) 	skb_queue_purge(&ll->txq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) /* Close protocol */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) static int ll_close(struct hci_uart *hu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) 	struct ll_struct *ll = hu->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) 	BT_DBG("hu %p", hu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) 	skb_queue_purge(&ll->tx_wait_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) 	skb_queue_purge(&ll->txq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) 	kfree_skb(ll->rx_skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) 	if (hu->serdev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) 		struct ll_device *lldev = serdev_device_get_drvdata(hu->serdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) 		gpiod_set_value_cansleep(lldev->enable_gpio, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) 		clk_disable_unprepare(lldev->ext_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) 	hu->priv = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) 	kfree(ll);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180)  * internal function, which does common work of the device wake up process:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181)  * 1. places all pending packets (waiting in tx_wait_q list) in txq list.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182)  * 2. changes internal state to HCILL_AWAKE.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183)  * Note: assumes that hcill_lock spinlock is taken,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184)  * shouldn't be called otherwise!
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) static void __ll_do_awake(struct ll_struct *ll)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) 	struct sk_buff *skb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) 	while ((skb = skb_dequeue(&ll->tx_wait_q)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) 		skb_queue_tail(&ll->txq, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) 	ll->hcill_state = HCILL_AWAKE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197)  * Called upon a wake-up-indication from the device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) static void ll_device_want_to_wakeup(struct hci_uart *hu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) 	struct ll_struct *ll = hu->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) 	BT_DBG("hu %p", hu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) 	/* lock hcill state */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) 	spin_lock_irqsave(&ll->hcill_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) 	switch (ll->hcill_state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) 	case HCILL_ASLEEP_TO_AWAKE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) 		 * This state means that both the host and the BRF chip
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) 		 * have simultaneously sent a wake-up-indication packet.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) 		 * Traditionally, in this case, receiving a wake-up-indication
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) 		 * was enough and an additional wake-up-ack wasn't needed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) 		 * This has changed with the BRF6350, which does require an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) 		 * explicit wake-up-ack. Other BRF versions, which do not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) 		 * require an explicit ack here, do accept it, thus it is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) 		 * perfectly safe to always send one.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) 		BT_DBG("dual wake-up-indication");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) 		fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) 	case HCILL_ASLEEP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) 		/* acknowledge device wake up */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) 		if (send_hcill_cmd(HCILL_WAKE_UP_ACK, hu) < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) 			BT_ERR("cannot acknowledge device wake up");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) 		/* any other state is illegal */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) 		BT_ERR("received HCILL_WAKE_UP_IND in state %ld",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) 		       ll->hcill_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) 	/* send pending packets and change state to HCILL_AWAKE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) 	__ll_do_awake(ll);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) 	spin_unlock_irqrestore(&ll->hcill_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) 	/* actually send the packets */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) 	hci_uart_tx_wakeup(hu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248)  * Called upon a sleep-indication from the device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) static void ll_device_want_to_sleep(struct hci_uart *hu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) 	struct ll_struct *ll = hu->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) 	BT_DBG("hu %p", hu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) 	/* lock hcill state */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) 	spin_lock_irqsave(&ll->hcill_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) 	/* sanity check */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) 	if (ll->hcill_state != HCILL_AWAKE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) 		BT_ERR("ERR: HCILL_GO_TO_SLEEP_IND in state %ld",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) 		       ll->hcill_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) 	/* acknowledge device sleep */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) 	if (send_hcill_cmd(HCILL_GO_TO_SLEEP_ACK, hu) < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) 		BT_ERR("cannot acknowledge device sleep");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) 	/* update state */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) 	ll->hcill_state = HCILL_ASLEEP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) 	spin_unlock_irqrestore(&ll->hcill_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) 	/* actually send the sleep ack packet */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) 	hci_uart_tx_wakeup(hu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282)  * Called upon wake-up-acknowledgement from the device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) static void ll_device_woke_up(struct hci_uart *hu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) 	struct ll_struct *ll = hu->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) 	BT_DBG("hu %p", hu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) 	/* lock hcill state */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) 	spin_lock_irqsave(&ll->hcill_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) 	/* sanity check */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) 	if (ll->hcill_state != HCILL_ASLEEP_TO_AWAKE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) 		BT_ERR("received HCILL_WAKE_UP_ACK in state %ld",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) 		       ll->hcill_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) 	/* send pending packets and change state to HCILL_AWAKE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) 	__ll_do_awake(ll);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) 	spin_unlock_irqrestore(&ll->hcill_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) 	/* actually send the packets */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) 	hci_uart_tx_wakeup(hu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) /* Enqueue frame for transmittion (padding, crc, etc) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) /* may be called from two simultaneous tasklets */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) static int ll_enqueue(struct hci_uart *hu, struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) 	unsigned long flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) 	struct ll_struct *ll = hu->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) 	BT_DBG("hu %p skb %p", hu, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) 	/* Prepend skb with frame type */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) 	memcpy(skb_push(skb, 1), &hci_skb_pkt_type(skb), 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) 	/* lock hcill state */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) 	spin_lock_irqsave(&ll->hcill_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) 	/* act according to current state */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) 	switch (ll->hcill_state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) 	case HCILL_AWAKE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) 		BT_DBG("device awake, sending normally");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) 		skb_queue_tail(&ll->txq, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) 	case HCILL_ASLEEP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) 		BT_DBG("device asleep, waking up and queueing packet");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) 		/* save packet for later */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) 		skb_queue_tail(&ll->tx_wait_q, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) 		/* awake device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) 		if (send_hcill_cmd(HCILL_WAKE_UP_IND, hu) < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) 			BT_ERR("cannot wake up device");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) 		ll->hcill_state = HCILL_ASLEEP_TO_AWAKE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) 	case HCILL_ASLEEP_TO_AWAKE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) 		BT_DBG("device waking up, queueing packet");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) 		/* transient state; just keep packet for later */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) 		skb_queue_tail(&ll->tx_wait_q, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) 		BT_ERR("illegal hcill state: %ld (losing packet)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) 		       ll->hcill_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) 		kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) 	spin_unlock_irqrestore(&ll->hcill_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) static int ll_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) 	struct hci_uart *hu = hci_get_drvdata(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) 	struct ll_struct *ll = hu->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) 	switch (hci_skb_pkt_type(skb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) 	case HCILL_GO_TO_SLEEP_IND:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) 		BT_DBG("HCILL_GO_TO_SLEEP_IND packet");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) 		ll_device_want_to_sleep(hu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) 	case HCILL_GO_TO_SLEEP_ACK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) 		/* shouldn't happen */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) 		bt_dev_err(hdev, "received HCILL_GO_TO_SLEEP_ACK in state %ld",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) 			   ll->hcill_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) 	case HCILL_WAKE_UP_IND:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) 		BT_DBG("HCILL_WAKE_UP_IND packet");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) 		ll_device_want_to_wakeup(hu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) 	case HCILL_WAKE_UP_ACK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) 		BT_DBG("HCILL_WAKE_UP_ACK packet");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) 		ll_device_woke_up(hu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) 	kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) #define LL_RECV_SLEEP_IND \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) 	.type = HCILL_GO_TO_SLEEP_IND, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) 	.hlen = 0, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) 	.loff = 0, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) 	.lsize = 0, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) 	.maxlen = 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) #define LL_RECV_SLEEP_ACK \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) 	.type = HCILL_GO_TO_SLEEP_ACK, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) 	.hlen = 0, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) 	.loff = 0, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) 	.lsize = 0, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) 	.maxlen = 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) #define LL_RECV_WAKE_IND \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) 	.type = HCILL_WAKE_UP_IND, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) 	.hlen = 0, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) 	.loff = 0, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) 	.lsize = 0, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) 	.maxlen = 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) #define LL_RECV_WAKE_ACK \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) 	.type = HCILL_WAKE_UP_ACK, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) 	.hlen = 0, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) 	.loff = 0, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) 	.lsize = 0, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) 	.maxlen = 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) static const struct h4_recv_pkt ll_recv_pkts[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) 	{ H4_RECV_ACL,       .recv = hci_recv_frame },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) 	{ H4_RECV_SCO,       .recv = hci_recv_frame },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) 	{ H4_RECV_EVENT,     .recv = hci_recv_frame },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) 	{ LL_RECV_SLEEP_IND, .recv = ll_recv_frame  },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) 	{ LL_RECV_SLEEP_ACK, .recv = ll_recv_frame  },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) 	{ LL_RECV_WAKE_IND,  .recv = ll_recv_frame  },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) 	{ LL_RECV_WAKE_ACK,  .recv = ll_recv_frame  },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) /* Recv data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) static int ll_recv(struct hci_uart *hu, const void *data, int count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) 	struct ll_struct *ll = hu->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) 	if (!test_bit(HCI_UART_REGISTERED, &hu->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) 		return -EUNATCH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) 	ll->rx_skb = h4_recv_buf(hu->hdev, ll->rx_skb, data, count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) 				 ll_recv_pkts, ARRAY_SIZE(ll_recv_pkts));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) 	if (IS_ERR(ll->rx_skb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) 		int err = PTR_ERR(ll->rx_skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) 		bt_dev_err(hu->hdev, "Frame reassembly failed (%d)", err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) 		ll->rx_skb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) 	return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) static struct sk_buff *ll_dequeue(struct hci_uart *hu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) 	struct ll_struct *ll = hu->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) 	return skb_dequeue(&ll->txq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) #if IS_ENABLED(CONFIG_SERIAL_DEV_BUS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) static int read_local_version(struct hci_dev *hdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) 	int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) 	unsigned short version = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) 	struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) 	struct hci_rp_read_local_version *ver;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) 	skb = __hci_cmd_sync(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) 			     HCI_INIT_TIMEOUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) 	if (IS_ERR(skb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) 		bt_dev_err(hdev, "Reading TI version information failed (%ld)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) 			   PTR_ERR(skb));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) 		return PTR_ERR(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) 	if (skb->len != sizeof(*ver)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) 		err = -EILSEQ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) 	ver = (struct hci_rp_read_local_version *)skb->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) 	if (le16_to_cpu(ver->manufacturer) != 13) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) 		err = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) 	version = le16_to_cpu(ver->lmp_subver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) 		bt_dev_err(hdev, "Failed to read TI version info: %d", err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) 	kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) 	return err ? err : version;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) static int send_command_from_firmware(struct ll_device *lldev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) 				      struct hci_command *cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) 	struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) 	if (cmd->opcode == HCI_VS_UPDATE_UART_HCI_BAUDRATE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) 		/* ignore remote change
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) 		 * baud rate HCI VS command
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) 		bt_dev_warn(lldev->hu.hdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) 			    "change remote baud rate command in firmware");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) 	if (cmd->prefix != 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) 		bt_dev_dbg(lldev->hu.hdev, "command type %d", cmd->prefix);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) 	skb = __hci_cmd_sync(lldev->hu.hdev, cmd->opcode, cmd->plen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) 			     &cmd->speed, HCI_INIT_TIMEOUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) 	if (IS_ERR(skb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) 		bt_dev_err(lldev->hu.hdev, "send command failed");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) 		return PTR_ERR(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) 	kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513)  * download_firmware -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514)  *	internal function which parses through the .bts firmware
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515)  *	script file intreprets SEND, DELAY actions only as of now
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) static int download_firmware(struct ll_device *lldev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) 	unsigned short chip, min_ver, maj_ver;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) 	int version, err, len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) 	unsigned char *ptr, *action_ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) 	unsigned char bts_scr_name[40];	/* 40 char long bts scr name? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) 	const struct firmware *fw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) 	struct hci_command *cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) 	version = read_local_version(lldev->hu.hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) 	if (version < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) 		return version;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) 	chip = (version & 0x7C00) >> 10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) 	min_ver = (version & 0x007F);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) 	maj_ver = (version & 0x0380) >> 7;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) 	if (version & 0x8000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) 		maj_ver |= 0x0008;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) 	snprintf(bts_scr_name, sizeof(bts_scr_name),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) 		 "ti-connectivity/TIInit_%d.%d.%d.bts",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) 		 chip, maj_ver, min_ver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) 	err = request_firmware(&fw, bts_scr_name, &lldev->serdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) 	if (err || !fw->data || !fw->size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) 		bt_dev_err(lldev->hu.hdev, "request_firmware failed(errno %d) for %s",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) 			   err, bts_scr_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) 	ptr = (void *)fw->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) 	len = fw->size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) 	/* bts_header to remove out magic number and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) 	 * version
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) 	ptr += sizeof(struct bts_header);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) 	len -= sizeof(struct bts_header);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) 	while (len > 0 && ptr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) 		bt_dev_dbg(lldev->hu.hdev, " action size %d, type %d ",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) 			   ((struct bts_action *)ptr)->size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) 			   ((struct bts_action *)ptr)->type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) 		action_ptr = &(((struct bts_action *)ptr)->data[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) 		switch (((struct bts_action *)ptr)->type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) 		case ACTION_SEND_COMMAND:	/* action send */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) 			bt_dev_dbg(lldev->hu.hdev, "S");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) 			cmd = (struct hci_command *)action_ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) 			err = send_command_from_firmware(lldev, cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) 			if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) 				goto out_rel_fw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) 		case ACTION_WAIT_EVENT:  /* wait */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) 			/* no need to wait as command was synchronous */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) 			bt_dev_dbg(lldev->hu.hdev, "W");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) 		case ACTION_DELAY:	/* sleep */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) 			bt_dev_info(lldev->hu.hdev, "sleep command in scr");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) 			msleep(((struct bts_action_delay *)action_ptr)->msec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) 		len -= (sizeof(struct bts_action) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) 			((struct bts_action *)ptr)->size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) 		ptr += sizeof(struct bts_action) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) 			((struct bts_action *)ptr)->size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) out_rel_fw:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) 	/* fw download complete */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) 	release_firmware(fw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) static int ll_set_bdaddr(struct hci_dev *hdev, const bdaddr_t *bdaddr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) 	bdaddr_t bdaddr_swapped;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) 	struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) 	/* HCI_VS_WRITE_BD_ADDR (at least on a CC2560A chip) expects the BD
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) 	 * address to be MSB first, but bdaddr_t has the convention of being
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) 	 * LSB first.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) 	baswap(&bdaddr_swapped, bdaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) 	skb = __hci_cmd_sync(hdev, HCI_VS_WRITE_BD_ADDR, sizeof(bdaddr_t),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) 			     &bdaddr_swapped, HCI_INIT_TIMEOUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) 	if (!IS_ERR(skb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) 		kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) 	return PTR_ERR_OR_ZERO(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) static int ll_setup(struct hci_uart *hu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) 	int err, retry = 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) 	struct ll_device *lldev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) 	struct serdev_device *serdev = hu->serdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) 	u32 speed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) 	if (!serdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) 	lldev = serdev_device_get_drvdata(serdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) 	hu->hdev->set_bdaddr = ll_set_bdaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) 	serdev_device_set_flow_control(serdev, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) 		/* Reset the Bluetooth device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) 		gpiod_set_value_cansleep(lldev->enable_gpio, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) 		msleep(5);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) 		gpiod_set_value_cansleep(lldev->enable_gpio, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) 		err = serdev_device_wait_for_cts(serdev, true, 200);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) 		if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) 			bt_dev_err(hu->hdev, "Failed to get CTS");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) 			return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) 		err = download_firmware(lldev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) 		if (!err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) 		/* Toggle BT_EN and retry */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) 		bt_dev_err(hu->hdev, "download firmware failed, retrying...");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) 	} while (retry--);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) 	/* Set BD address if one was specified at probe */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) 	if (!bacmp(&lldev->bdaddr, BDADDR_NONE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) 		/* This means that there was an error getting the BD address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) 		 * during probe, so mark the device as having a bad address.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) 		set_bit(HCI_QUIRK_INVALID_BDADDR, &hu->hdev->quirks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) 	} else if (bacmp(&lldev->bdaddr, BDADDR_ANY)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) 		err = ll_set_bdaddr(hu->hdev, &lldev->bdaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) 		if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) 			set_bit(HCI_QUIRK_INVALID_BDADDR, &hu->hdev->quirks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) 	/* Operational speed if any */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) 	if (hu->oper_speed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) 		speed = hu->oper_speed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) 	else if (hu->proto->oper_speed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) 		speed = hu->proto->oper_speed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) 		speed = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) 	if (speed) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) 		__le32 speed_le = cpu_to_le32(speed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) 		struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) 		skb = __hci_cmd_sync(hu->hdev, HCI_VS_UPDATE_UART_HCI_BAUDRATE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) 				     sizeof(speed_le), &speed_le,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) 				     HCI_INIT_TIMEOUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) 		if (!IS_ERR(skb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) 			kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) 			serdev_device_set_baudrate(serdev, speed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) static const struct hci_uart_proto llp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) static int hci_ti_probe(struct serdev_device *serdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) 	struct hci_uart *hu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) 	struct ll_device *lldev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) 	struct nvmem_cell *bdaddr_cell;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) 	u32 max_speed = 3000000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) 	lldev = devm_kzalloc(&serdev->dev, sizeof(struct ll_device), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) 	if (!lldev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) 	hu = &lldev->hu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) 	serdev_device_set_drvdata(serdev, lldev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) 	lldev->serdev = hu->serdev = serdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) 	lldev->enable_gpio = devm_gpiod_get_optional(&serdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) 						     "enable",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) 						     GPIOD_OUT_LOW);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) 	if (IS_ERR(lldev->enable_gpio))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) 		return PTR_ERR(lldev->enable_gpio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) 	lldev->ext_clk = devm_clk_get(&serdev->dev, "ext_clock");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) 	if (IS_ERR(lldev->ext_clk) && PTR_ERR(lldev->ext_clk) != -ENOENT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) 		return PTR_ERR(lldev->ext_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) 	of_property_read_u32(serdev->dev.of_node, "max-speed", &max_speed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) 	hci_uart_set_speeds(hu, 115200, max_speed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) 	/* optional BD address from nvram */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) 	bdaddr_cell = nvmem_cell_get(&serdev->dev, "bd-address");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) 	if (IS_ERR(bdaddr_cell)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) 		int err = PTR_ERR(bdaddr_cell);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) 		if (err == -EPROBE_DEFER)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) 			return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) 		/* ENOENT means there is no matching nvmem cell and ENOSYS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) 		 * means that nvmem is not enabled in the kernel configuration.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) 		if (err != -ENOENT && err != -ENOSYS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) 			/* If there was some other error, give userspace a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) 			 * chance to fix the problem instead of failing to load
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) 			 * the driver. Using BDADDR_NONE as a flag that is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) 			 * tested later in the setup function.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) 			dev_warn(&serdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) 				 "Failed to get \"bd-address\" nvmem cell (%d)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) 				 err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) 			bacpy(&lldev->bdaddr, BDADDR_NONE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) 		bdaddr_t *bdaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) 		size_t len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) 		bdaddr = nvmem_cell_read(bdaddr_cell, &len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) 		nvmem_cell_put(bdaddr_cell);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) 		if (IS_ERR(bdaddr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) 			dev_err(&serdev->dev, "Failed to read nvmem bd-address\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) 			return PTR_ERR(bdaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) 		if (len != sizeof(bdaddr_t)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) 			dev_err(&serdev->dev, "Invalid nvmem bd-address length\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) 			kfree(bdaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) 		/* As per the device tree bindings, the value from nvmem is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) 		 * expected to be MSB first, but in the kernel it is expected
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) 		 * that bdaddr_t is LSB first.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) 		baswap(&lldev->bdaddr, bdaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) 		kfree(bdaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) 	return hci_uart_register_device(hu, &llp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) static void hci_ti_remove(struct serdev_device *serdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) 	struct ll_device *lldev = serdev_device_get_drvdata(serdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) 	hci_uart_unregister_device(&lldev->hu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) static const struct of_device_id hci_ti_of_match[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) 	{ .compatible = "ti,cc2560" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) 	{ .compatible = "ti,wl1271-st" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) 	{ .compatible = "ti,wl1273-st" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) 	{ .compatible = "ti,wl1281-st" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) 	{ .compatible = "ti,wl1283-st" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) 	{ .compatible = "ti,wl1285-st" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) 	{ .compatible = "ti,wl1801-st" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) 	{ .compatible = "ti,wl1805-st" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) 	{ .compatible = "ti,wl1807-st" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) 	{ .compatible = "ti,wl1831-st" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) 	{ .compatible = "ti,wl1835-st" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) 	{ .compatible = "ti,wl1837-st" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) 	{},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) MODULE_DEVICE_TABLE(of, hci_ti_of_match);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) static struct serdev_device_driver hci_ti_drv = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) 	.driver		= {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) 		.name	= "hci-ti",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) 		.of_match_table = of_match_ptr(hci_ti_of_match),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) 	.probe	= hci_ti_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) 	.remove	= hci_ti_remove,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) #define ll_setup NULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) static const struct hci_uart_proto llp = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) 	.id		= HCI_UART_LL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) 	.name		= "LL",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) 	.setup		= ll_setup,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) 	.open		= ll_open,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) 	.close		= ll_close,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) 	.recv		= ll_recv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) 	.enqueue	= ll_enqueue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) 	.dequeue	= ll_dequeue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) 	.flush		= ll_flush,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) int __init ll_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) 	serdev_device_driver_register(&hci_ti_drv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) 	return hci_uart_register_proto(&llp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) int __exit ll_deinit(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) 	serdev_device_driver_unregister(&hci_ti_drv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) 	return hci_uart_unregister_proto(&llp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) }