Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    2) // Copyright (c) 2018 MediaTek Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    3) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    4) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    5)  * Bluetooth support for MediaTek serial devices
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    6)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    7)  * Author: Sean Wang <sean.wang@mediatek.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    8)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    9)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   10) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   11) #include <asm/unaligned.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   12) #include <linux/atomic.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   13) #include <linux/clk.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   14) #include <linux/firmware.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   15) #include <linux/gpio/consumer.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   16) #include <linux/iopoll.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   17) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   18) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   19) #include <linux/of.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   20) #include <linux/of_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   21) #include <linux/pinctrl/consumer.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   22) #include <linux/pm_runtime.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   23) #include <linux/regulator/consumer.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   24) #include <linux/serdev.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   25) #include <linux/skbuff.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   26) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   27) #include <net/bluetooth/bluetooth.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   28) #include <net/bluetooth/hci_core.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   29) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   30) #include "h4_recv.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   31) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   32) #define VERSION "0.2"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   33) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   34) #define FIRMWARE_MT7622		"mediatek/mt7622pr2h.bin"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   35) #define FIRMWARE_MT7663		"mediatek/mt7663pr2h.bin"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   36) #define FIRMWARE_MT7668		"mediatek/mt7668pr2h.bin"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   37) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   38) #define MTK_STP_TLR_SIZE	2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   39) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   40) #define BTMTKUART_TX_STATE_ACTIVE	1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   41) #define BTMTKUART_TX_STATE_WAKEUP	2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   42) #define BTMTKUART_TX_WAIT_VND_EVT	3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   43) #define BTMTKUART_REQUIRED_WAKEUP	4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   44) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   45) #define BTMTKUART_FLAG_STANDALONE_HW	 BIT(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   46) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   47) enum {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   48) 	MTK_WMT_PATCH_DWNLD = 0x1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   49) 	MTK_WMT_TEST = 0x2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   50) 	MTK_WMT_WAKEUP = 0x3,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   51) 	MTK_WMT_HIF = 0x4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   52) 	MTK_WMT_FUNC_CTRL = 0x6,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   53) 	MTK_WMT_RST = 0x7,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   54) 	MTK_WMT_SEMAPHORE = 0x17,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   55) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   56) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   57) enum {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   58) 	BTMTK_WMT_INVALID,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   59) 	BTMTK_WMT_PATCH_UNDONE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   60) 	BTMTK_WMT_PATCH_DONE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   61) 	BTMTK_WMT_ON_UNDONE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   62) 	BTMTK_WMT_ON_DONE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   63) 	BTMTK_WMT_ON_PROGRESS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   64) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   65) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   66) struct mtk_stp_hdr {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   67) 	u8	prefix;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   68) 	__be16	dlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   69) 	u8	cs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   70) } __packed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   71) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   72) struct btmtkuart_data {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   73) 	unsigned int flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   74) 	const char *fwname;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   75) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   76) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   77) struct mtk_wmt_hdr {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   78) 	u8	dir;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   79) 	u8	op;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   80) 	__le16	dlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   81) 	u8	flag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   82) } __packed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   83) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   84) struct mtk_hci_wmt_cmd {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   85) 	struct mtk_wmt_hdr hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   86) 	u8 data[256];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   87) } __packed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   88) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   89) struct btmtk_hci_wmt_evt {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   90) 	struct hci_event_hdr hhdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   91) 	struct mtk_wmt_hdr whdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   92) } __packed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   93) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   94) struct btmtk_hci_wmt_evt_funcc {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   95) 	struct btmtk_hci_wmt_evt hwhdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   96) 	__be16 status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   97) } __packed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   98) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   99) struct btmtk_tci_sleep {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  100) 	u8 mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  101) 	__le16 duration;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  102) 	__le16 host_duration;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  103) 	u8 host_wakeup_pin;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  104) 	u8 time_compensation;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  105) } __packed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  106) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  107) struct btmtk_hci_wmt_params {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  108) 	u8 op;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  109) 	u8 flag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  110) 	u16 dlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  111) 	const void *data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  112) 	u32 *status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  113) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  114) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  115) struct btmtkuart_dev {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  116) 	struct hci_dev *hdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  117) 	struct serdev_device *serdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  118) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  119) 	struct clk *clk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  120) 	struct clk *osc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  121) 	struct regulator *vcc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  122) 	struct gpio_desc *reset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  123) 	struct gpio_desc *boot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  124) 	struct pinctrl *pinctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  125) 	struct pinctrl_state *pins_runtime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  126) 	struct pinctrl_state *pins_boot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  127) 	speed_t	desired_speed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  128) 	speed_t	curr_speed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  129) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  130) 	struct work_struct tx_work;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  131) 	unsigned long tx_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  132) 	struct sk_buff_head txq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  133) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  134) 	struct sk_buff *rx_skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  135) 	struct sk_buff *evt_skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  136) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  137) 	u8	stp_pad[6];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  138) 	u8	stp_cursor;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  139) 	u16	stp_dlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  140) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  141) 	const struct btmtkuart_data *data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  142) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  143) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  144) #define btmtkuart_is_standalone(bdev)	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  145) 	((bdev)->data->flags & BTMTKUART_FLAG_STANDALONE_HW)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  146) #define btmtkuart_is_builtin_soc(bdev)	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  147) 	!((bdev)->data->flags & BTMTKUART_FLAG_STANDALONE_HW)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  148) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  149) static int mtk_hci_wmt_sync(struct hci_dev *hdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  150) 			    struct btmtk_hci_wmt_params *wmt_params)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  151) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  152) 	struct btmtkuart_dev *bdev = hci_get_drvdata(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  153) 	struct btmtk_hci_wmt_evt_funcc *wmt_evt_funcc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  154) 	u32 hlen, status = BTMTK_WMT_INVALID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  155) 	struct btmtk_hci_wmt_evt *wmt_evt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  156) 	struct mtk_hci_wmt_cmd wc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  157) 	struct mtk_wmt_hdr *hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  158) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  159) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  160) 	hlen = sizeof(*hdr) + wmt_params->dlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  161) 	if (hlen > 255) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  162) 		err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  163) 		goto err_free_skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  164) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  165) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  166) 	hdr = (struct mtk_wmt_hdr *)&wc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  167) 	hdr->dir = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  168) 	hdr->op = wmt_params->op;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  169) 	hdr->dlen = cpu_to_le16(wmt_params->dlen + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  170) 	hdr->flag = wmt_params->flag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  171) 	memcpy(wc.data, wmt_params->data, wmt_params->dlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  172) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  173) 	set_bit(BTMTKUART_TX_WAIT_VND_EVT, &bdev->tx_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  174) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  175) 	err = __hci_cmd_send(hdev, 0xfc6f, hlen, &wc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  176) 	if (err < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  177) 		clear_bit(BTMTKUART_TX_WAIT_VND_EVT, &bdev->tx_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  178) 		goto err_free_skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  179) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  180) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  181) 	/* The vendor specific WMT commands are all answered by a vendor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  182) 	 * specific event and will not have the Command Status or Command
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  183) 	 * Complete as with usual HCI command flow control.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  184) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  185) 	 * After sending the command, wait for BTMTKUART_TX_WAIT_VND_EVT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  186) 	 * state to be cleared. The driver specific event receive routine
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  187) 	 * will clear that state and with that indicate completion of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  188) 	 * WMT command.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  189) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  190) 	err = wait_on_bit_timeout(&bdev->tx_state, BTMTKUART_TX_WAIT_VND_EVT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  191) 				  TASK_INTERRUPTIBLE, HCI_INIT_TIMEOUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  192) 	if (err == -EINTR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  193) 		bt_dev_err(hdev, "Execution of wmt command interrupted");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  194) 		clear_bit(BTMTKUART_TX_WAIT_VND_EVT, &bdev->tx_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  195) 		goto err_free_skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  196) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  197) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  198) 	if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  199) 		bt_dev_err(hdev, "Execution of wmt command timed out");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  200) 		clear_bit(BTMTKUART_TX_WAIT_VND_EVT, &bdev->tx_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  201) 		err = -ETIMEDOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  202) 		goto err_free_skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  203) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  204) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  205) 	/* Parse and handle the return WMT event */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  206) 	wmt_evt = (struct btmtk_hci_wmt_evt *)bdev->evt_skb->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  207) 	if (wmt_evt->whdr.op != hdr->op) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  208) 		bt_dev_err(hdev, "Wrong op received %d expected %d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  209) 			   wmt_evt->whdr.op, hdr->op);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  210) 		err = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  211) 		goto err_free_skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  212) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  213) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  214) 	switch (wmt_evt->whdr.op) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  215) 	case MTK_WMT_SEMAPHORE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  216) 		if (wmt_evt->whdr.flag == 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  217) 			status = BTMTK_WMT_PATCH_UNDONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  218) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  219) 			status = BTMTK_WMT_PATCH_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  220) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  221) 	case MTK_WMT_FUNC_CTRL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  222) 		wmt_evt_funcc = (struct btmtk_hci_wmt_evt_funcc *)wmt_evt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  223) 		if (be16_to_cpu(wmt_evt_funcc->status) == 0x404)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  224) 			status = BTMTK_WMT_ON_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  225) 		else if (be16_to_cpu(wmt_evt_funcc->status) == 0x420)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  226) 			status = BTMTK_WMT_ON_PROGRESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  227) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  228) 			status = BTMTK_WMT_ON_UNDONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  229) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  230) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  231) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  232) 	if (wmt_params->status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  233) 		*wmt_params->status = status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  234) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  235) err_free_skb:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  236) 	kfree_skb(bdev->evt_skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  237) 	bdev->evt_skb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  238) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  239) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  240) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  241) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  242) static int mtk_setup_firmware(struct hci_dev *hdev, const char *fwname)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  243) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  244) 	struct btmtk_hci_wmt_params wmt_params;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  245) 	const struct firmware *fw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  246) 	const u8 *fw_ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  247) 	size_t fw_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  248) 	int err, dlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  249) 	u8 flag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  250) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  251) 	err = request_firmware(&fw, fwname, &hdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  252) 	if (err < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  253) 		bt_dev_err(hdev, "Failed to load firmware file (%d)", err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  254) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  255) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  256) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  257) 	fw_ptr = fw->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  258) 	fw_size = fw->size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  259) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  260) 	/* The size of patch header is 30 bytes, should be skip */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  261) 	if (fw_size < 30) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  262) 		err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  263) 		goto free_fw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  264) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  265) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  266) 	fw_size -= 30;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  267) 	fw_ptr += 30;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  268) 	flag = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  269) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  270) 	wmt_params.op = MTK_WMT_PATCH_DWNLD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  271) 	wmt_params.status = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  272) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  273) 	while (fw_size > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  274) 		dlen = min_t(int, 250, fw_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  275) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  276) 		/* Tell device the position in sequence */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  277) 		if (fw_size - dlen <= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  278) 			flag = 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  279) 		else if (fw_size < fw->size - 30)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  280) 			flag = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  281) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  282) 		wmt_params.flag = flag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  283) 		wmt_params.dlen = dlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  284) 		wmt_params.data = fw_ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  285) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  286) 		err = mtk_hci_wmt_sync(hdev, &wmt_params);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  287) 		if (err < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  288) 			bt_dev_err(hdev, "Failed to send wmt patch dwnld (%d)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  289) 				   err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  290) 			goto free_fw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  291) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  292) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  293) 		fw_size -= dlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  294) 		fw_ptr += dlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  295) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  296) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  297) 	wmt_params.op = MTK_WMT_RST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  298) 	wmt_params.flag = 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  299) 	wmt_params.dlen = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  300) 	wmt_params.data = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  301) 	wmt_params.status = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  302) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  303) 	/* Activate funciton the firmware providing to */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  304) 	err = mtk_hci_wmt_sync(hdev, &wmt_params);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  305) 	if (err < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  306) 		bt_dev_err(hdev, "Failed to send wmt rst (%d)", err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  307) 		goto free_fw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  308) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  309) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  310) 	/* Wait a few moments for firmware activation done */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  311) 	usleep_range(10000, 12000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  312) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  313) free_fw:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  314) 	release_firmware(fw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  315) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  316) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  317) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  318) static int btmtkuart_recv_event(struct hci_dev *hdev, struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  319) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  320) 	struct btmtkuart_dev *bdev = hci_get_drvdata(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  321) 	struct hci_event_hdr *hdr = (void *)skb->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  322) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  323) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  324) 	/* Fix up the vendor event id with 0xff for vendor specific instead
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  325) 	 * of 0xe4 so that event send via monitoring socket can be parsed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  326) 	 * properly.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  327) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  328) 	if (hdr->evt == 0xe4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  329) 		hdr->evt = HCI_EV_VENDOR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  330) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  331) 	/* When someone waits for the WMT event, the skb is being cloned
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  332) 	 * and being processed the events from there then.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  333) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  334) 	if (test_bit(BTMTKUART_TX_WAIT_VND_EVT, &bdev->tx_state)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  335) 		bdev->evt_skb = skb_clone(skb, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  336) 		if (!bdev->evt_skb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  337) 			err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  338) 			goto err_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  339) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  340) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  341) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  342) 	err = hci_recv_frame(hdev, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  343) 	if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  344) 		goto err_free_skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  345) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  346) 	if (hdr->evt == HCI_EV_VENDOR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  347) 		if (test_and_clear_bit(BTMTKUART_TX_WAIT_VND_EVT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  348) 				       &bdev->tx_state)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  349) 			/* Barrier to sync with other CPUs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  350) 			smp_mb__after_atomic();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  351) 			wake_up_bit(&bdev->tx_state, BTMTKUART_TX_WAIT_VND_EVT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  352) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  353) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  354) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  355) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  356) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  357) err_free_skb:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  358) 	kfree_skb(bdev->evt_skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  359) 	bdev->evt_skb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  360) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  361) err_out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  362) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  363) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  364) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  365) static const struct h4_recv_pkt mtk_recv_pkts[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  366) 	{ H4_RECV_ACL,      .recv = hci_recv_frame },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  367) 	{ H4_RECV_SCO,      .recv = hci_recv_frame },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  368) 	{ H4_RECV_EVENT,    .recv = btmtkuart_recv_event },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  369) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  370) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  371) static void btmtkuart_tx_work(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  372) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  373) 	struct btmtkuart_dev *bdev = container_of(work, struct btmtkuart_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  374) 						   tx_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  375) 	struct serdev_device *serdev = bdev->serdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  376) 	struct hci_dev *hdev = bdev->hdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  377) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  378) 	while (1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  379) 		clear_bit(BTMTKUART_TX_STATE_WAKEUP, &bdev->tx_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  380) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  381) 		while (1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  382) 			struct sk_buff *skb = skb_dequeue(&bdev->txq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  383) 			int len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  384) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  385) 			if (!skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  386) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  387) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  388) 			len = serdev_device_write_buf(serdev, skb->data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  389) 						      skb->len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  390) 			hdev->stat.byte_tx += len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  391) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  392) 			skb_pull(skb, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  393) 			if (skb->len > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  394) 				skb_queue_head(&bdev->txq, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  395) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  396) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  397) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  398) 			switch (hci_skb_pkt_type(skb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  399) 			case HCI_COMMAND_PKT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  400) 				hdev->stat.cmd_tx++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  401) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  402) 			case HCI_ACLDATA_PKT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  403) 				hdev->stat.acl_tx++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  404) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  405) 			case HCI_SCODATA_PKT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  406) 				hdev->stat.sco_tx++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  407) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  408) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  409) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  410) 			kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  411) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  412) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  413) 		if (!test_bit(BTMTKUART_TX_STATE_WAKEUP, &bdev->tx_state))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  414) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  415) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  416) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  417) 	clear_bit(BTMTKUART_TX_STATE_ACTIVE, &bdev->tx_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  418) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  419) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  420) static void btmtkuart_tx_wakeup(struct btmtkuart_dev *bdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  421) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  422) 	if (test_and_set_bit(BTMTKUART_TX_STATE_ACTIVE, &bdev->tx_state))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  423) 		set_bit(BTMTKUART_TX_STATE_WAKEUP, &bdev->tx_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  424) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  425) 	schedule_work(&bdev->tx_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  426) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  427) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  428) static const unsigned char *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  429) mtk_stp_split(struct btmtkuart_dev *bdev, const unsigned char *data, int count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  430) 	      int *sz_h4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  431) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  432) 	struct mtk_stp_hdr *shdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  433) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  434) 	/* The cursor is reset when all the data of STP is consumed out */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  435) 	if (!bdev->stp_dlen && bdev->stp_cursor >= 6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  436) 		bdev->stp_cursor = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  437) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  438) 	/* Filling pad until all STP info is obtained */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  439) 	while (bdev->stp_cursor < 6 && count > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  440) 		bdev->stp_pad[bdev->stp_cursor] = *data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  441) 		bdev->stp_cursor++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  442) 		data++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  443) 		count--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  444) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  445) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  446) 	/* Retrieve STP info and have a sanity check */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  447) 	if (!bdev->stp_dlen && bdev->stp_cursor >= 6) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  448) 		shdr = (struct mtk_stp_hdr *)&bdev->stp_pad[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  449) 		bdev->stp_dlen = be16_to_cpu(shdr->dlen) & 0x0fff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  450) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  451) 		/* Resync STP when unexpected data is being read */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  452) 		if (shdr->prefix != 0x80 || bdev->stp_dlen > 2048) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  453) 			bt_dev_err(bdev->hdev, "stp format unexpect (%d, %d)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  454) 				   shdr->prefix, bdev->stp_dlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  455) 			bdev->stp_cursor = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  456) 			bdev->stp_dlen = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  457) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  458) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  459) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  460) 	/* Directly quit when there's no data found for H4 can process */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  461) 	if (count <= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  462) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  463) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  464) 	/* Tranlate to how much the size of data H4 can handle so far */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  465) 	*sz_h4 = min_t(int, count, bdev->stp_dlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  466) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  467) 	/* Update the remaining size of STP packet */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  468) 	bdev->stp_dlen -= *sz_h4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  469) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  470) 	/* Data points to STP payload which can be handled by H4 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  471) 	return data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  472) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  473) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  474) static int btmtkuart_recv(struct hci_dev *hdev, const u8 *data, size_t count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  475) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  476) 	struct btmtkuart_dev *bdev = hci_get_drvdata(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  477) 	const unsigned char *p_left = data, *p_h4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  478) 	int sz_left = count, sz_h4, adv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  479) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  480) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  481) 	while (sz_left > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  482) 		/*  The serial data received from MT7622 BT controller is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  483) 		 *  at all time padded around with the STP header and tailer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  484) 		 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  485) 		 *  A full STP packet is looking like
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  486) 		 *   -----------------------------------
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  487) 		 *  | STP header  |  H:4   | STP tailer |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  488) 		 *   -----------------------------------
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  489) 		 *  but it doesn't guarantee to contain a full H:4 packet which
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  490) 		 *  means that it's possible for multiple STP packets forms a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  491) 		 *  full H:4 packet that means extra STP header + length doesn't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  492) 		 *  indicate a full H:4 frame, things can fragment. Whose length
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  493) 		 *  recorded in STP header just shows up the most length the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  494) 		 *  H:4 engine can handle currently.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  495) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  496) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  497) 		p_h4 = mtk_stp_split(bdev, p_left, sz_left, &sz_h4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  498) 		if (!p_h4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  499) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  500) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  501) 		adv = p_h4 - p_left;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  502) 		sz_left -= adv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  503) 		p_left += adv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  504) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  505) 		bdev->rx_skb = h4_recv_buf(bdev->hdev, bdev->rx_skb, p_h4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  506) 					   sz_h4, mtk_recv_pkts,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  507) 					   ARRAY_SIZE(mtk_recv_pkts));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  508) 		if (IS_ERR(bdev->rx_skb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  509) 			err = PTR_ERR(bdev->rx_skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  510) 			bt_dev_err(bdev->hdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  511) 				   "Frame reassembly failed (%d)", err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  512) 			bdev->rx_skb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  513) 			return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  514) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  515) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  516) 		sz_left -= sz_h4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  517) 		p_left += sz_h4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  518) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  519) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  520) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  521) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  522) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  523) static int btmtkuart_receive_buf(struct serdev_device *serdev, const u8 *data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  524) 				 size_t count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  525) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  526) 	struct btmtkuart_dev *bdev = serdev_device_get_drvdata(serdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  527) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  528) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  529) 	err = btmtkuart_recv(bdev->hdev, data, count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  530) 	if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  531) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  532) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  533) 	bdev->hdev->stat.byte_rx += count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  534) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  535) 	return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  536) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  537) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  538) static void btmtkuart_write_wakeup(struct serdev_device *serdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  539) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  540) 	struct btmtkuart_dev *bdev = serdev_device_get_drvdata(serdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  541) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  542) 	btmtkuart_tx_wakeup(bdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  543) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  544) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  545) static const struct serdev_device_ops btmtkuart_client_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  546) 	.receive_buf = btmtkuart_receive_buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  547) 	.write_wakeup = btmtkuart_write_wakeup,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  548) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  549) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  550) static int btmtkuart_open(struct hci_dev *hdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  551) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  552) 	struct btmtkuart_dev *bdev = hci_get_drvdata(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  553) 	struct device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  554) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  555) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  556) 	err = serdev_device_open(bdev->serdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  557) 	if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  558) 		bt_dev_err(hdev, "Unable to open UART device %s",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  559) 			   dev_name(&bdev->serdev->dev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  560) 		goto err_open;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  561) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  562) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  563) 	if (btmtkuart_is_standalone(bdev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  564) 		if (bdev->curr_speed != bdev->desired_speed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  565) 			err = serdev_device_set_baudrate(bdev->serdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  566) 							 115200);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  567) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  568) 			err = serdev_device_set_baudrate(bdev->serdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  569) 							 bdev->desired_speed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  570) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  571) 		if (err < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  572) 			bt_dev_err(hdev, "Unable to set baudrate UART device %s",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  573) 				   dev_name(&bdev->serdev->dev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  574) 			goto  err_serdev_close;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  575) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  576) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  577) 		serdev_device_set_flow_control(bdev->serdev, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  578) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  579) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  580) 	bdev->stp_cursor = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  581) 	bdev->stp_dlen = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  582) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  583) 	dev = &bdev->serdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  584) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  585) 	/* Enable the power domain and clock the device requires */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  586) 	pm_runtime_enable(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  587) 	err = pm_runtime_get_sync(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  588) 	if (err < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  589) 		pm_runtime_put_noidle(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  590) 		goto err_disable_rpm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  591) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  592) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  593) 	err = clk_prepare_enable(bdev->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  594) 	if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  595) 		goto err_put_rpm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  596) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  597) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  598) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  599) err_put_rpm:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  600) 	pm_runtime_put_sync(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  601) err_disable_rpm:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  602) 	pm_runtime_disable(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  603) err_serdev_close:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  604) 	serdev_device_close(bdev->serdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  605) err_open:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  606) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  607) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  608) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  609) static int btmtkuart_close(struct hci_dev *hdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  610) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  611) 	struct btmtkuart_dev *bdev = hci_get_drvdata(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  612) 	struct device *dev = &bdev->serdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  613) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  614) 	/* Shutdown the clock and power domain the device requires */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  615) 	clk_disable_unprepare(bdev->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  616) 	pm_runtime_put_sync(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  617) 	pm_runtime_disable(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  618) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  619) 	serdev_device_close(bdev->serdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  620) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  621) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  622) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  623) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  624) static int btmtkuart_flush(struct hci_dev *hdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  625) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  626) 	struct btmtkuart_dev *bdev = hci_get_drvdata(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  627) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  628) 	/* Flush any pending characters */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  629) 	serdev_device_write_flush(bdev->serdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  630) 	skb_queue_purge(&bdev->txq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  631) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  632) 	cancel_work_sync(&bdev->tx_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  633) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  634) 	kfree_skb(bdev->rx_skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  635) 	bdev->rx_skb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  636) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  637) 	bdev->stp_cursor = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  638) 	bdev->stp_dlen = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  639) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  640) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  641) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  642) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  643) static int btmtkuart_func_query(struct hci_dev *hdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  644) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  645) 	struct btmtk_hci_wmt_params wmt_params;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  646) 	int status, err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  647) 	u8 param = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  648) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  649) 	/* Query whether the function is enabled */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  650) 	wmt_params.op = MTK_WMT_FUNC_CTRL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  651) 	wmt_params.flag = 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  652) 	wmt_params.dlen = sizeof(param);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  653) 	wmt_params.data = &param;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  654) 	wmt_params.status = &status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  655) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  656) 	err = mtk_hci_wmt_sync(hdev, &wmt_params);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  657) 	if (err < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  658) 		bt_dev_err(hdev, "Failed to query function status (%d)", err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  659) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  660) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  661) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  662) 	return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  663) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  664) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  665) static int btmtkuart_change_baudrate(struct hci_dev *hdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  666) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  667) 	struct btmtkuart_dev *bdev = hci_get_drvdata(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  668) 	struct btmtk_hci_wmt_params wmt_params;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  669) 	__le32 baudrate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  670) 	u8 param;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  671) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  672) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  673) 	/* Indicate the device to enter the probe state the host is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  674) 	 * ready to change a new baudrate.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  675) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  676) 	baudrate = cpu_to_le32(bdev->desired_speed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  677) 	wmt_params.op = MTK_WMT_HIF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  678) 	wmt_params.flag = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  679) 	wmt_params.dlen = 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  680) 	wmt_params.data = &baudrate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  681) 	wmt_params.status = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  682) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  683) 	err = mtk_hci_wmt_sync(hdev, &wmt_params);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  684) 	if (err < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  685) 		bt_dev_err(hdev, "Failed to device baudrate (%d)", err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  686) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  687) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  688) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  689) 	err = serdev_device_set_baudrate(bdev->serdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  690) 					 bdev->desired_speed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  691) 	if (err < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  692) 		bt_dev_err(hdev, "Failed to set up host baudrate (%d)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  693) 			   err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  694) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  695) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  696) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  697) 	serdev_device_set_flow_control(bdev->serdev, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  698) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  699) 	/* Send a dummy byte 0xff to activate the new baudrate */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  700) 	param = 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  701) 	err = serdev_device_write_buf(bdev->serdev, &param, sizeof(param));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  702) 	if (err < 0 || err < sizeof(param))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  703) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  704) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  705) 	serdev_device_wait_until_sent(bdev->serdev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  706) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  707) 	/* Wait some time for the device changing baudrate done */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  708) 	usleep_range(20000, 22000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  709) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  710) 	/* Test the new baudrate */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  711) 	wmt_params.op = MTK_WMT_TEST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  712) 	wmt_params.flag = 7;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  713) 	wmt_params.dlen = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  714) 	wmt_params.data = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  715) 	wmt_params.status = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  716) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  717) 	err = mtk_hci_wmt_sync(hdev, &wmt_params);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  718) 	if (err < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  719) 		bt_dev_err(hdev, "Failed to test new baudrate (%d)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  720) 			   err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  721) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  722) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  723) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  724) 	bdev->curr_speed = bdev->desired_speed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  725) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  726) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  727) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  728) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  729) static int btmtkuart_setup(struct hci_dev *hdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  730) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  731) 	struct btmtkuart_dev *bdev = hci_get_drvdata(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  732) 	struct btmtk_hci_wmt_params wmt_params;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  733) 	ktime_t calltime, delta, rettime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  734) 	struct btmtk_tci_sleep tci_sleep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  735) 	unsigned long long duration;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  736) 	struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  737) 	int err, status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  738) 	u8 param = 0x1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  739) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  740) 	calltime = ktime_get();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  741) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  742) 	/* Wakeup MCUSYS is required for certain devices before we start to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  743) 	 * do any setups.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  744) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  745) 	if (test_bit(BTMTKUART_REQUIRED_WAKEUP, &bdev->tx_state)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  746) 		wmt_params.op = MTK_WMT_WAKEUP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  747) 		wmt_params.flag = 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  748) 		wmt_params.dlen = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  749) 		wmt_params.data = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  750) 		wmt_params.status = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  751) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  752) 		err = mtk_hci_wmt_sync(hdev, &wmt_params);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  753) 		if (err < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  754) 			bt_dev_err(hdev, "Failed to wakeup the chip (%d)", err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  755) 			return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  756) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  757) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  758) 		clear_bit(BTMTKUART_REQUIRED_WAKEUP, &bdev->tx_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  759) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  760) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  761) 	if (btmtkuart_is_standalone(bdev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  762) 		btmtkuart_change_baudrate(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  763) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  764) 	/* Query whether the firmware is already download */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  765) 	wmt_params.op = MTK_WMT_SEMAPHORE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  766) 	wmt_params.flag = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  767) 	wmt_params.dlen = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  768) 	wmt_params.data = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  769) 	wmt_params.status = &status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  770) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  771) 	err = mtk_hci_wmt_sync(hdev, &wmt_params);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  772) 	if (err < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  773) 		bt_dev_err(hdev, "Failed to query firmware status (%d)", err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  774) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  775) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  776) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  777) 	if (status == BTMTK_WMT_PATCH_DONE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  778) 		bt_dev_info(hdev, "Firmware already downloaded");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  779) 		goto ignore_setup_fw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  780) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  781) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  782) 	/* Setup a firmware which the device definitely requires */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  783) 	err = mtk_setup_firmware(hdev, bdev->data->fwname);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  784) 	if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  785) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  786) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  787) ignore_setup_fw:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  788) 	/* Query whether the device is already enabled */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  789) 	err = readx_poll_timeout(btmtkuart_func_query, hdev, status,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  790) 				 status < 0 || status != BTMTK_WMT_ON_PROGRESS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  791) 				 2000, 5000000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  792) 	/* -ETIMEDOUT happens */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  793) 	if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  794) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  795) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  796) 	/* The other errors happen in btusb_mtk_func_query */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  797) 	if (status < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  798) 		return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  799) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  800) 	if (status == BTMTK_WMT_ON_DONE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  801) 		bt_dev_info(hdev, "function already on");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  802) 		goto ignore_func_on;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  803) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  804) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  805) 	/* Enable Bluetooth protocol */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  806) 	wmt_params.op = MTK_WMT_FUNC_CTRL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  807) 	wmt_params.flag = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  808) 	wmt_params.dlen = sizeof(param);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  809) 	wmt_params.data = &param;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  810) 	wmt_params.status = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  811) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  812) 	err = mtk_hci_wmt_sync(hdev, &wmt_params);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  813) 	if (err < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  814) 		bt_dev_err(hdev, "Failed to send wmt func ctrl (%d)", err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  815) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  816) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  817) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  818) ignore_func_on:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  819) 	/* Apply the low power environment setup */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  820) 	tci_sleep.mode = 0x5;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  821) 	tci_sleep.duration = cpu_to_le16(0x640);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  822) 	tci_sleep.host_duration = cpu_to_le16(0x640);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  823) 	tci_sleep.host_wakeup_pin = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  824) 	tci_sleep.time_compensation = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  825) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  826) 	skb = __hci_cmd_sync(hdev, 0xfc7a, sizeof(tci_sleep), &tci_sleep,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  827) 			     HCI_INIT_TIMEOUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  828) 	if (IS_ERR(skb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  829) 		err = PTR_ERR(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  830) 		bt_dev_err(hdev, "Failed to apply low power setting (%d)", err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  831) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  832) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  833) 	kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  834) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  835) 	rettime = ktime_get();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  836) 	delta = ktime_sub(rettime, calltime);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  837) 	duration = (unsigned long long)ktime_to_ns(delta) >> 10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  838) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  839) 	bt_dev_info(hdev, "Device setup in %llu usecs", duration);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  840) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  841) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  842) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  843) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  844) static int btmtkuart_shutdown(struct hci_dev *hdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  845) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  846) 	struct btmtk_hci_wmt_params wmt_params;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  847) 	u8 param = 0x0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  848) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  849) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  850) 	/* Disable the device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  851) 	wmt_params.op = MTK_WMT_FUNC_CTRL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  852) 	wmt_params.flag = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  853) 	wmt_params.dlen = sizeof(param);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  854) 	wmt_params.data = &param;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  855) 	wmt_params.status = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  856) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  857) 	err = mtk_hci_wmt_sync(hdev, &wmt_params);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  858) 	if (err < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  859) 		bt_dev_err(hdev, "Failed to send wmt func ctrl (%d)", err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  860) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  861) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  862) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  863) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  864) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  865) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  866) static int btmtkuart_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  867) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  868) 	struct btmtkuart_dev *bdev = hci_get_drvdata(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  869) 	struct mtk_stp_hdr *shdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  870) 	int err, dlen, type = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  871) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  872) 	/* Prepend skb with frame type */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  873) 	memcpy(skb_push(skb, 1), &hci_skb_pkt_type(skb), 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  874) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  875) 	/* Make sure that there is enough rooms for STP header and trailer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  876) 	if (unlikely(skb_headroom(skb) < sizeof(*shdr)) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  877) 	    (skb_tailroom(skb) < MTK_STP_TLR_SIZE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  878) 		err = pskb_expand_head(skb, sizeof(*shdr), MTK_STP_TLR_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  879) 				       GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  880) 		if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  881) 			return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  882) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  883) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  884) 	/* Add the STP header */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  885) 	dlen = skb->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  886) 	shdr = skb_push(skb, sizeof(*shdr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  887) 	shdr->prefix = 0x80;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  888) 	shdr->dlen = cpu_to_be16((dlen & 0x0fff) | (type << 12));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  889) 	shdr->cs = 0;		/* MT7622 doesn't care about checksum value */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  890) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  891) 	/* Add the STP trailer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  892) 	skb_put_zero(skb, MTK_STP_TLR_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  893) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  894) 	skb_queue_tail(&bdev->txq, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  895) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  896) 	btmtkuart_tx_wakeup(bdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  897) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  898) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  899) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  900) static int btmtkuart_parse_dt(struct serdev_device *serdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  901) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  902) 	struct btmtkuart_dev *bdev = serdev_device_get_drvdata(serdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  903) 	struct device_node *node = serdev->dev.of_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  904) 	u32 speed = 921600;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  905) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  906) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  907) 	if (btmtkuart_is_standalone(bdev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  908) 		of_property_read_u32(node, "current-speed", &speed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  909) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  910) 		bdev->desired_speed = speed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  911) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  912) 		bdev->vcc = devm_regulator_get(&serdev->dev, "vcc");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  913) 		if (IS_ERR(bdev->vcc)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  914) 			err = PTR_ERR(bdev->vcc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  915) 			return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  916) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  917) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  918) 		bdev->osc = devm_clk_get_optional(&serdev->dev, "osc");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  919) 		if (IS_ERR(bdev->osc)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  920) 			err = PTR_ERR(bdev->osc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  921) 			return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  922) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  923) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  924) 		bdev->boot = devm_gpiod_get_optional(&serdev->dev, "boot",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  925) 						     GPIOD_OUT_LOW);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  926) 		if (IS_ERR(bdev->boot)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  927) 			err = PTR_ERR(bdev->boot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  928) 			return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  929) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  930) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  931) 		bdev->pinctrl = devm_pinctrl_get(&serdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  932) 		if (IS_ERR(bdev->pinctrl)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  933) 			err = PTR_ERR(bdev->pinctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  934) 			return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  935) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  936) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  937) 		bdev->pins_boot = pinctrl_lookup_state(bdev->pinctrl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  938) 						       "default");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  939) 		if (IS_ERR(bdev->pins_boot) && !bdev->boot) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  940) 			err = PTR_ERR(bdev->pins_boot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  941) 			dev_err(&serdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  942) 				"Should assign RXD to LOW at boot stage\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  943) 			return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  944) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  945) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  946) 		bdev->pins_runtime = pinctrl_lookup_state(bdev->pinctrl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  947) 							  "runtime");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  948) 		if (IS_ERR(bdev->pins_runtime)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  949) 			err = PTR_ERR(bdev->pins_runtime);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  950) 			return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  951) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  952) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  953) 		bdev->reset = devm_gpiod_get_optional(&serdev->dev, "reset",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  954) 						      GPIOD_OUT_LOW);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  955) 		if (IS_ERR(bdev->reset)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  956) 			err = PTR_ERR(bdev->reset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  957) 			return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  958) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  959) 	} else if (btmtkuart_is_builtin_soc(bdev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  960) 		bdev->clk = devm_clk_get(&serdev->dev, "ref");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  961) 		if (IS_ERR(bdev->clk))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  962) 			return PTR_ERR(bdev->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  963) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  964) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  965) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  966) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  967) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  968) static int btmtkuart_probe(struct serdev_device *serdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  969) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  970) 	struct btmtkuart_dev *bdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  971) 	struct hci_dev *hdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  972) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  973) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  974) 	bdev = devm_kzalloc(&serdev->dev, sizeof(*bdev), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  975) 	if (!bdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  976) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  977) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  978) 	bdev->data = of_device_get_match_data(&serdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  979) 	if (!bdev->data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  980) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  981) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  982) 	bdev->serdev = serdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  983) 	serdev_device_set_drvdata(serdev, bdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  984) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  985) 	serdev_device_set_client_ops(serdev, &btmtkuart_client_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  986) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  987) 	err = btmtkuart_parse_dt(serdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  988) 	if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  989) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  990) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  991) 	INIT_WORK(&bdev->tx_work, btmtkuart_tx_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  992) 	skb_queue_head_init(&bdev->txq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  993) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  994) 	/* Initialize and register HCI device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  995) 	hdev = hci_alloc_dev();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  996) 	if (!hdev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  997) 		dev_err(&serdev->dev, "Can't allocate HCI device\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  998) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  999) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) 	bdev->hdev = hdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) 	hdev->bus = HCI_UART;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) 	hci_set_drvdata(hdev, bdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) 	hdev->open     = btmtkuart_open;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) 	hdev->close    = btmtkuart_close;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) 	hdev->flush    = btmtkuart_flush;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) 	hdev->setup    = btmtkuart_setup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) 	hdev->shutdown = btmtkuart_shutdown;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) 	hdev->send     = btmtkuart_send_frame;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) 	SET_HCIDEV_DEV(hdev, &serdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) 	hdev->manufacturer = 70;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) 	set_bit(HCI_QUIRK_NON_PERSISTENT_SETUP, &hdev->quirks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) 	if (btmtkuart_is_standalone(bdev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) 		err = clk_prepare_enable(bdev->osc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) 		if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) 			goto err_hci_free_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) 		if (bdev->boot) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) 			gpiod_set_value_cansleep(bdev->boot, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) 			/* Switch to the specific pin state for the booting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) 			 * requires.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) 			pinctrl_select_state(bdev->pinctrl, bdev->pins_boot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) 		/* Power on */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) 		err = regulator_enable(bdev->vcc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) 		if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) 			goto err_clk_disable_unprepare;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) 		/* Reset if the reset-gpios is available otherwise the board
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) 		 * -level design should be guaranteed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) 		if (bdev->reset) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) 			gpiod_set_value_cansleep(bdev->reset, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) 			usleep_range(1000, 2000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) 			gpiod_set_value_cansleep(bdev->reset, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) 		/* Wait some time until device got ready and switch to the pin
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) 		 * mode the device requires for UART transfers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) 		msleep(50);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) 		if (bdev->boot)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) 			devm_gpiod_put(&serdev->dev, bdev->boot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) 		pinctrl_select_state(bdev->pinctrl, bdev->pins_runtime);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) 		/* A standalone device doesn't depends on power domain on SoC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) 		 * so mark it as no callbacks.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) 		pm_runtime_no_callbacks(&serdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) 		set_bit(BTMTKUART_REQUIRED_WAKEUP, &bdev->tx_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) 	err = hci_register_dev(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) 	if (err < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) 		dev_err(&serdev->dev, "Can't register HCI device\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) 		goto err_regulator_disable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) err_regulator_disable:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) 	if (btmtkuart_is_standalone(bdev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) 		regulator_disable(bdev->vcc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) err_clk_disable_unprepare:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) 	if (btmtkuart_is_standalone(bdev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) 		clk_disable_unprepare(bdev->osc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) err_hci_free_dev:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) 	hci_free_dev(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) static void btmtkuart_remove(struct serdev_device *serdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) 	struct btmtkuart_dev *bdev = serdev_device_get_drvdata(serdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) 	struct hci_dev *hdev = bdev->hdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) 	if (btmtkuart_is_standalone(bdev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) 		regulator_disable(bdev->vcc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) 		clk_disable_unprepare(bdev->osc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) 	hci_unregister_dev(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) 	hci_free_dev(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) static const struct btmtkuart_data mt7622_data = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) 	.fwname = FIRMWARE_MT7622,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) static const struct btmtkuart_data mt7663_data = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) 	.flags = BTMTKUART_FLAG_STANDALONE_HW,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) 	.fwname = FIRMWARE_MT7663,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) static const struct btmtkuart_data mt7668_data = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) 	.flags = BTMTKUART_FLAG_STANDALONE_HW,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) 	.fwname = FIRMWARE_MT7668,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) #ifdef CONFIG_OF
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) static const struct of_device_id mtk_of_match_table[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) 	{ .compatible = "mediatek,mt7622-bluetooth", .data = &mt7622_data},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) 	{ .compatible = "mediatek,mt7663u-bluetooth", .data = &mt7663_data},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) 	{ .compatible = "mediatek,mt7668u-bluetooth", .data = &mt7668_data},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) 	{ }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) MODULE_DEVICE_TABLE(of, mtk_of_match_table);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) static struct serdev_device_driver btmtkuart_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) 	.probe = btmtkuart_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) 	.remove = btmtkuart_remove,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) 	.driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) 		.name = "btmtkuart",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) 		.of_match_table = of_match_ptr(mtk_of_match_table),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) module_serdev_device_driver(btmtkuart_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) MODULE_AUTHOR("Sean Wang <sean.wang@mediatek.com>");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) MODULE_DESCRIPTION("MediaTek Bluetooth Serial driver ver " VERSION);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) MODULE_VERSION(VERSION);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) MODULE_LICENSE("GPL");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) MODULE_FIRMWARE(FIRMWARE_MT7622);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) MODULE_FIRMWARE(FIRMWARE_MT7663);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) MODULE_FIRMWARE(FIRMWARE_MT7668);