^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) // Copyright (c) 2019 MediaTek Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Bluetooth support for MediaTek SDIO devices
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * This file is written based on btsdio.c and btmtkuart.c.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * Author: Sean Wang <sean.wang@mediatek.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <asm/unaligned.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/atomic.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/firmware.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/iopoll.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/pm_runtime.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/skbuff.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <linux/mmc/host.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <linux/mmc/sdio_ids.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include <linux/mmc/sdio_func.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include <net/bluetooth/bluetooth.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #include <net/bluetooth/hci_core.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #include "h4_recv.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #define VERSION "0.1"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #define FIRMWARE_MT7663 "mediatek/mt7663pr2h.bin"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #define FIRMWARE_MT7668 "mediatek/mt7668pr2h.bin"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #define MTKBTSDIO_AUTOSUSPEND_DELAY 8000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) static bool enable_autosuspend;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) struct btmtksdio_data {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) const char *fwname;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) static const struct btmtksdio_data mt7663_data = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) .fwname = FIRMWARE_MT7663,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) static const struct btmtksdio_data mt7668_data = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) .fwname = FIRMWARE_MT7668,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) static const struct sdio_device_id btmtksdio_table[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) {SDIO_DEVICE(SDIO_VENDOR_ID_MEDIATEK, SDIO_DEVICE_ID_MEDIATEK_MT7663),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) .driver_data = (kernel_ulong_t)&mt7663_data },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) {SDIO_DEVICE(SDIO_VENDOR_ID_MEDIATEK, SDIO_DEVICE_ID_MEDIATEK_MT7668),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) .driver_data = (kernel_ulong_t)&mt7668_data },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) { } /* Terminating entry */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) MODULE_DEVICE_TABLE(sdio, btmtksdio_table);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) #define MTK_REG_CHLPCR 0x4 /* W1S */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) #define C_INT_EN_SET BIT(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) #define C_INT_EN_CLR BIT(1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) #define C_FW_OWN_REQ_SET BIT(8) /* For write */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) #define C_COM_DRV_OWN BIT(8) /* For read */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) #define C_FW_OWN_REQ_CLR BIT(9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) #define MTK_REG_CSDIOCSR 0x8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) #define SDIO_RE_INIT_EN BIT(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) #define SDIO_INT_CTL BIT(2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) #define MTK_REG_CHCR 0xc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) #define C_INT_CLR_CTRL BIT(1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) /* CHISR have the same bits field definition with CHIER */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) #define MTK_REG_CHISR 0x10
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) #define MTK_REG_CHIER 0x14
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) #define FW_OWN_BACK_INT BIT(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) #define RX_DONE_INT BIT(1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) #define TX_EMPTY BIT(2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) #define TX_FIFO_OVERFLOW BIT(8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) #define RX_PKT_LEN GENMASK(31, 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) #define MTK_REG_CTDR 0x18
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) #define MTK_REG_CRDR 0x1c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) #define MTK_SDIO_BLOCK_SIZE 256
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) #define BTMTKSDIO_TX_WAIT_VND_EVT 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) enum {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) MTK_WMT_PATCH_DWNLD = 0x1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) MTK_WMT_TEST = 0x2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) MTK_WMT_WAKEUP = 0x3,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) MTK_WMT_HIF = 0x4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) MTK_WMT_FUNC_CTRL = 0x6,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) MTK_WMT_RST = 0x7,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) MTK_WMT_SEMAPHORE = 0x17,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) enum {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) BTMTK_WMT_INVALID,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) BTMTK_WMT_PATCH_UNDONE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) BTMTK_WMT_PATCH_DONE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) BTMTK_WMT_ON_UNDONE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) BTMTK_WMT_ON_DONE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) BTMTK_WMT_ON_PROGRESS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) struct mtkbtsdio_hdr {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) __le16 len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) __le16 reserved;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) u8 bt_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) } __packed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) struct mtk_wmt_hdr {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) u8 dir;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) u8 op;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) __le16 dlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) u8 flag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) } __packed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) struct mtk_hci_wmt_cmd {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) struct mtk_wmt_hdr hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) u8 data[256];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) } __packed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) struct btmtk_hci_wmt_evt {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) struct hci_event_hdr hhdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) struct mtk_wmt_hdr whdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) } __packed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) struct btmtk_hci_wmt_evt_funcc {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) struct btmtk_hci_wmt_evt hwhdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) __be16 status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) } __packed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) struct btmtk_tci_sleep {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) u8 mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) __le16 duration;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) __le16 host_duration;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) u8 host_wakeup_pin;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) u8 time_compensation;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) } __packed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) struct btmtk_hci_wmt_params {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) u8 op;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) u8 flag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) u16 dlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) const void *data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) u32 *status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) struct btmtksdio_dev {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) struct hci_dev *hdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) struct sdio_func *func;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) struct device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) struct work_struct tx_work;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) unsigned long tx_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) struct sk_buff_head txq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) struct sk_buff *evt_skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) const struct btmtksdio_data *data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) static int mtk_hci_wmt_sync(struct hci_dev *hdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) struct btmtk_hci_wmt_params *wmt_params)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) struct btmtksdio_dev *bdev = hci_get_drvdata(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) struct btmtk_hci_wmt_evt_funcc *wmt_evt_funcc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) u32 hlen, status = BTMTK_WMT_INVALID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) struct btmtk_hci_wmt_evt *wmt_evt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) struct mtk_hci_wmt_cmd wc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) struct mtk_wmt_hdr *hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) hlen = sizeof(*hdr) + wmt_params->dlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) if (hlen > 255)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) hdr = (struct mtk_wmt_hdr *)&wc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) hdr->dir = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) hdr->op = wmt_params->op;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) hdr->dlen = cpu_to_le16(wmt_params->dlen + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) hdr->flag = wmt_params->flag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) memcpy(wc.data, wmt_params->data, wmt_params->dlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) set_bit(BTMTKSDIO_TX_WAIT_VND_EVT, &bdev->tx_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) err = __hci_cmd_send(hdev, 0xfc6f, hlen, &wc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) if (err < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) clear_bit(BTMTKSDIO_TX_WAIT_VND_EVT, &bdev->tx_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) /* The vendor specific WMT commands are all answered by a vendor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) * specific event and will not have the Command Status or Command
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) * Complete as with usual HCI command flow control.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) * After sending the command, wait for BTMTKSDIO_TX_WAIT_VND_EVT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) * state to be cleared. The driver specific event receive routine
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) * will clear that state and with that indicate completion of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) * WMT command.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) err = wait_on_bit_timeout(&bdev->tx_state, BTMTKSDIO_TX_WAIT_VND_EVT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) TASK_INTERRUPTIBLE, HCI_INIT_TIMEOUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) if (err == -EINTR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) bt_dev_err(hdev, "Execution of wmt command interrupted");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) clear_bit(BTMTKSDIO_TX_WAIT_VND_EVT, &bdev->tx_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) bt_dev_err(hdev, "Execution of wmt command timed out");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) clear_bit(BTMTKSDIO_TX_WAIT_VND_EVT, &bdev->tx_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) return -ETIMEDOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) /* Parse and handle the return WMT event */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) wmt_evt = (struct btmtk_hci_wmt_evt *)bdev->evt_skb->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) if (wmt_evt->whdr.op != hdr->op) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) bt_dev_err(hdev, "Wrong op received %d expected %d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) wmt_evt->whdr.op, hdr->op);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) err = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) goto err_free_skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) switch (wmt_evt->whdr.op) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) case MTK_WMT_SEMAPHORE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) if (wmt_evt->whdr.flag == 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) status = BTMTK_WMT_PATCH_UNDONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) status = BTMTK_WMT_PATCH_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) case MTK_WMT_FUNC_CTRL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) wmt_evt_funcc = (struct btmtk_hci_wmt_evt_funcc *)wmt_evt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) if (be16_to_cpu(wmt_evt_funcc->status) == 0x404)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) status = BTMTK_WMT_ON_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) else if (be16_to_cpu(wmt_evt_funcc->status) == 0x420)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) status = BTMTK_WMT_ON_PROGRESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) status = BTMTK_WMT_ON_UNDONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) if (wmt_params->status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) *wmt_params->status = status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) err_free_skb:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) kfree_skb(bdev->evt_skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) bdev->evt_skb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) static int btmtksdio_tx_packet(struct btmtksdio_dev *bdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) struct mtkbtsdio_hdr *sdio_hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) /* Make sure that there are enough rooms for SDIO header */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) if (unlikely(skb_headroom(skb) < sizeof(*sdio_hdr))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) err = pskb_expand_head(skb, sizeof(*sdio_hdr), 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) /* Prepend MediaTek SDIO Specific Header */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) skb_push(skb, sizeof(*sdio_hdr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) sdio_hdr = (void *)skb->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) sdio_hdr->len = cpu_to_le16(skb->len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) sdio_hdr->reserved = cpu_to_le16(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) sdio_hdr->bt_type = hci_skb_pkt_type(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) err = sdio_writesb(bdev->func, MTK_REG_CTDR, skb->data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) round_up(skb->len, MTK_SDIO_BLOCK_SIZE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) goto err_skb_pull;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) bdev->hdev->stat.byte_tx += skb->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) err_skb_pull:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) skb_pull(skb, sizeof(*sdio_hdr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) static u32 btmtksdio_drv_own_query(struct btmtksdio_dev *bdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) return sdio_readl(bdev->func, MTK_REG_CHLPCR, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) static void btmtksdio_tx_work(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) struct btmtksdio_dev *bdev = container_of(work, struct btmtksdio_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) tx_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) pm_runtime_get_sync(bdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) sdio_claim_host(bdev->func);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) while ((skb = skb_dequeue(&bdev->txq))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) err = btmtksdio_tx_packet(bdev, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) if (err < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) bdev->hdev->stat.err_tx++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) skb_queue_head(&bdev->txq, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) sdio_release_host(bdev->func);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) pm_runtime_mark_last_busy(bdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) pm_runtime_put_autosuspend(bdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) static int btmtksdio_recv_event(struct hci_dev *hdev, struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) struct btmtksdio_dev *bdev = hci_get_drvdata(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) struct hci_event_hdr *hdr = (void *)skb->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) /* Fix up the vendor event id with 0xff for vendor specific instead
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) * of 0xe4 so that event send via monitoring socket can be parsed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) * properly.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) if (hdr->evt == 0xe4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) hdr->evt = HCI_EV_VENDOR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) /* When someone waits for the WMT event, the skb is being cloned
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) * and being processed the events from there then.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) if (test_bit(BTMTKSDIO_TX_WAIT_VND_EVT, &bdev->tx_state)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) bdev->evt_skb = skb_clone(skb, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) if (!bdev->evt_skb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) goto err_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) err = hci_recv_frame(hdev, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) goto err_free_skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) if (hdr->evt == HCI_EV_VENDOR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) if (test_and_clear_bit(BTMTKSDIO_TX_WAIT_VND_EVT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) &bdev->tx_state)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) /* Barrier to sync with other CPUs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) smp_mb__after_atomic();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) wake_up_bit(&bdev->tx_state, BTMTKSDIO_TX_WAIT_VND_EVT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) err_free_skb:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) kfree_skb(bdev->evt_skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) bdev->evt_skb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) err_out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) static const struct h4_recv_pkt mtk_recv_pkts[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) { H4_RECV_ACL, .recv = hci_recv_frame },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) { H4_RECV_SCO, .recv = hci_recv_frame },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) { H4_RECV_EVENT, .recv = btmtksdio_recv_event },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) static int btmtksdio_rx_packet(struct btmtksdio_dev *bdev, u16 rx_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) const struct h4_recv_pkt *pkts = mtk_recv_pkts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) int pkts_count = ARRAY_SIZE(mtk_recv_pkts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) struct mtkbtsdio_hdr *sdio_hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) int err, i, pad_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) u16 dlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) if (rx_size < sizeof(*sdio_hdr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) return -EILSEQ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) /* A SDIO packet is exactly containing a Bluetooth packet */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) skb = bt_skb_alloc(rx_size, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) if (!skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) skb_put(skb, rx_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) err = sdio_readsb(bdev->func, skb->data, MTK_REG_CRDR, rx_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) goto err_kfree_skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) sdio_hdr = (void *)skb->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) /* We assume the default error as -EILSEQ simply to make the error path
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) * be cleaner.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) err = -EILSEQ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) if (rx_size != le16_to_cpu(sdio_hdr->len)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) bt_dev_err(bdev->hdev, "Rx size in sdio header is mismatched ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) goto err_kfree_skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) hci_skb_pkt_type(skb) = sdio_hdr->bt_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) /* Remove MediaTek SDIO header */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) skb_pull(skb, sizeof(*sdio_hdr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) /* We have to dig into the packet to get payload size and then know how
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) * many padding bytes at the tail, these padding bytes should be removed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) * before the packet is indicated to the core layer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) for (i = 0; i < pkts_count; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) if (sdio_hdr->bt_type == (&pkts[i])->type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) if (i >= pkts_count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) bt_dev_err(bdev->hdev, "Invalid bt type 0x%02x",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) sdio_hdr->bt_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) goto err_kfree_skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) /* Remaining bytes cannot hold a header*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) if (skb->len < (&pkts[i])->hlen) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) bt_dev_err(bdev->hdev, "The size of bt header is mismatched");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) goto err_kfree_skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) switch ((&pkts[i])->lsize) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) case 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) dlen = skb->data[(&pkts[i])->loff];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) case 2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) dlen = get_unaligned_le16(skb->data +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) (&pkts[i])->loff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) goto err_kfree_skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) pad_size = skb->len - (&pkts[i])->hlen - dlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) /* Remaining bytes cannot hold a payload */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) if (pad_size < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) bt_dev_err(bdev->hdev, "The size of bt payload is mismatched");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) goto err_kfree_skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) /* Remove padding bytes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) skb_trim(skb, skb->len - pad_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) /* Complete frame */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) (&pkts[i])->recv(bdev->hdev, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) bdev->hdev->stat.byte_rx += rx_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) err_kfree_skb:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) static void btmtksdio_interrupt(struct sdio_func *func)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) struct btmtksdio_dev *bdev = sdio_get_drvdata(func);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) u32 int_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) u16 rx_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) /* It is required that the host gets ownership from the device before
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) * accessing any register, however, if SDIO host is not being released,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) * a potential deadlock probably happens in a circular wait between SDIO
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) * IRQ work and PM runtime work. So, we have to explicitly release SDIO
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) * host here and claim again after the PM runtime work is all done.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) sdio_release_host(bdev->func);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) pm_runtime_get_sync(bdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) sdio_claim_host(bdev->func);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) /* Disable interrupt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) sdio_writel(func, C_INT_EN_CLR, MTK_REG_CHLPCR, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) int_status = sdio_readl(func, MTK_REG_CHISR, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) /* Ack an interrupt as soon as possible before any operation on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) * hardware.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) * Note that we don't ack any status during operations to avoid race
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) * condition between the host and the device such as it's possible to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) * mistakenly ack RX_DONE for the next packet and then cause interrupts
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) * not be raised again but there is still pending data in the hardware
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) * FIFO.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) sdio_writel(func, int_status, MTK_REG_CHISR, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) if (unlikely(!int_status))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) bt_dev_err(bdev->hdev, "CHISR is 0");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) if (int_status & FW_OWN_BACK_INT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) bt_dev_dbg(bdev->hdev, "Get fw own back");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) if (int_status & TX_EMPTY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) schedule_work(&bdev->tx_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) else if (unlikely(int_status & TX_FIFO_OVERFLOW))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) bt_dev_warn(bdev->hdev, "Tx fifo overflow");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) if (int_status & RX_DONE_INT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) rx_size = (int_status & RX_PKT_LEN) >> 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) if (btmtksdio_rx_packet(bdev, rx_size) < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) bdev->hdev->stat.err_rx++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) /* Enable interrupt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) sdio_writel(func, C_INT_EN_SET, MTK_REG_CHLPCR, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) pm_runtime_mark_last_busy(bdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) pm_runtime_put_autosuspend(bdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) static int btmtksdio_open(struct hci_dev *hdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) struct btmtksdio_dev *bdev = hci_get_drvdata(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) u32 status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) sdio_claim_host(bdev->func);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) err = sdio_enable_func(bdev->func);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) goto err_release_host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) /* Get ownership from the device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) sdio_writel(bdev->func, C_FW_OWN_REQ_CLR, MTK_REG_CHLPCR, &err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) goto err_disable_func;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) err = readx_poll_timeout(btmtksdio_drv_own_query, bdev, status,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) status & C_COM_DRV_OWN, 2000, 1000000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) if (err < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) bt_dev_err(bdev->hdev, "Cannot get ownership from device");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) goto err_disable_func;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) /* Disable interrupt & mask out all interrupt sources */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) sdio_writel(bdev->func, C_INT_EN_CLR, MTK_REG_CHLPCR, &err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) goto err_disable_func;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) sdio_writel(bdev->func, 0, MTK_REG_CHIER, &err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) goto err_disable_func;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) err = sdio_claim_irq(bdev->func, btmtksdio_interrupt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) goto err_disable_func;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) err = sdio_set_block_size(bdev->func, MTK_SDIO_BLOCK_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) goto err_release_irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) /* SDIO CMD 5 allows the SDIO device back to idle state an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) * synchronous interrupt is supported in SDIO 4-bit mode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) sdio_writel(bdev->func, SDIO_INT_CTL | SDIO_RE_INIT_EN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) MTK_REG_CSDIOCSR, &err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) goto err_release_irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) /* Setup write-1-clear for CHISR register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) sdio_writel(bdev->func, C_INT_CLR_CTRL, MTK_REG_CHCR, &err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) goto err_release_irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) /* Setup interrupt sources */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) sdio_writel(bdev->func, RX_DONE_INT | TX_EMPTY | TX_FIFO_OVERFLOW,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) MTK_REG_CHIER, &err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) goto err_release_irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) /* Enable interrupt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) sdio_writel(bdev->func, C_INT_EN_SET, MTK_REG_CHLPCR, &err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) goto err_release_irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) sdio_release_host(bdev->func);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) err_release_irq:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) sdio_release_irq(bdev->func);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) err_disable_func:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) sdio_disable_func(bdev->func);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) err_release_host:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) sdio_release_host(bdev->func);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) static int btmtksdio_close(struct hci_dev *hdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) struct btmtksdio_dev *bdev = hci_get_drvdata(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) u32 status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) sdio_claim_host(bdev->func);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) /* Disable interrupt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) sdio_writel(bdev->func, C_INT_EN_CLR, MTK_REG_CHLPCR, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) sdio_release_irq(bdev->func);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) /* Return ownership to the device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) sdio_writel(bdev->func, C_FW_OWN_REQ_SET, MTK_REG_CHLPCR, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) err = readx_poll_timeout(btmtksdio_drv_own_query, bdev, status,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) !(status & C_COM_DRV_OWN), 2000, 1000000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) bt_dev_err(bdev->hdev, "Cannot return ownership to device");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) sdio_disable_func(bdev->func);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) sdio_release_host(bdev->func);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) static int btmtksdio_flush(struct hci_dev *hdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) struct btmtksdio_dev *bdev = hci_get_drvdata(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) skb_queue_purge(&bdev->txq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) cancel_work_sync(&bdev->tx_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) static int btmtksdio_func_query(struct hci_dev *hdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) struct btmtk_hci_wmt_params wmt_params;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) int status, err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) u8 param = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) /* Query whether the function is enabled */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) wmt_params.op = MTK_WMT_FUNC_CTRL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) wmt_params.flag = 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) wmt_params.dlen = sizeof(param);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) wmt_params.data = ¶m;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) wmt_params.status = &status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) err = mtk_hci_wmt_sync(hdev, &wmt_params);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) if (err < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) bt_dev_err(hdev, "Failed to query function status (%d)", err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) static int mtk_setup_firmware(struct hci_dev *hdev, const char *fwname)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) struct btmtk_hci_wmt_params wmt_params;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) const struct firmware *fw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) const u8 *fw_ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) size_t fw_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) int err, dlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) u8 flag, param;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) err = request_firmware(&fw, fwname, &hdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) if (err < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) bt_dev_err(hdev, "Failed to load firmware file (%d)", err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) /* Power on data RAM the firmware relies on. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) param = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) wmt_params.op = MTK_WMT_FUNC_CTRL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) wmt_params.flag = 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) wmt_params.dlen = sizeof(param);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) wmt_params.data = ¶m;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) wmt_params.status = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) err = mtk_hci_wmt_sync(hdev, &wmt_params);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) if (err < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) bt_dev_err(hdev, "Failed to power on data RAM (%d)", err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) goto free_fw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) fw_ptr = fw->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) fw_size = fw->size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) /* The size of patch header is 30 bytes, should be skip */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) if (fw_size < 30) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) goto free_fw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) fw_size -= 30;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) fw_ptr += 30;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) flag = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) wmt_params.op = MTK_WMT_PATCH_DWNLD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) wmt_params.status = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) while (fw_size > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) dlen = min_t(int, 250, fw_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) /* Tell device the position in sequence */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) if (fw_size - dlen <= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) flag = 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) else if (fw_size < fw->size - 30)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) flag = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) wmt_params.flag = flag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) wmt_params.dlen = dlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) wmt_params.data = fw_ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) err = mtk_hci_wmt_sync(hdev, &wmt_params);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) if (err < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) bt_dev_err(hdev, "Failed to send wmt patch dwnld (%d)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) goto free_fw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) fw_size -= dlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) fw_ptr += dlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) wmt_params.op = MTK_WMT_RST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) wmt_params.flag = 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) wmt_params.dlen = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) wmt_params.data = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) wmt_params.status = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) /* Activate funciton the firmware providing to */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) err = mtk_hci_wmt_sync(hdev, &wmt_params);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) if (err < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) bt_dev_err(hdev, "Failed to send wmt rst (%d)", err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) goto free_fw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) /* Wait a few moments for firmware activation done */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) usleep_range(10000, 12000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) free_fw:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) release_firmware(fw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) static int btmtksdio_setup(struct hci_dev *hdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) struct btmtksdio_dev *bdev = hci_get_drvdata(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) struct btmtk_hci_wmt_params wmt_params;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) ktime_t calltime, delta, rettime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) struct btmtk_tci_sleep tci_sleep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) unsigned long long duration;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) int err, status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) u8 param = 0x1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) calltime = ktime_get();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) /* Query whether the firmware is already download */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) wmt_params.op = MTK_WMT_SEMAPHORE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) wmt_params.flag = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) wmt_params.dlen = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) wmt_params.data = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) wmt_params.status = &status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) err = mtk_hci_wmt_sync(hdev, &wmt_params);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) if (err < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) bt_dev_err(hdev, "Failed to query firmware status (%d)", err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) if (status == BTMTK_WMT_PATCH_DONE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) bt_dev_info(hdev, "Firmware already downloaded");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) goto ignore_setup_fw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) /* Setup a firmware which the device definitely requires */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) err = mtk_setup_firmware(hdev, bdev->data->fwname);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) ignore_setup_fw:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) /* Query whether the device is already enabled */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) err = readx_poll_timeout(btmtksdio_func_query, hdev, status,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) status < 0 || status != BTMTK_WMT_ON_PROGRESS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) 2000, 5000000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) /* -ETIMEDOUT happens */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) /* The other errors happen in btusb_mtk_func_query */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) if (status < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) if (status == BTMTK_WMT_ON_DONE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) bt_dev_info(hdev, "function already on");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) goto ignore_func_on;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) /* Enable Bluetooth protocol */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) wmt_params.op = MTK_WMT_FUNC_CTRL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) wmt_params.flag = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) wmt_params.dlen = sizeof(param);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) wmt_params.data = ¶m;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) wmt_params.status = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) err = mtk_hci_wmt_sync(hdev, &wmt_params);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) if (err < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) bt_dev_err(hdev, "Failed to send wmt func ctrl (%d)", err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) ignore_func_on:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) /* Apply the low power environment setup */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) tci_sleep.mode = 0x5;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) tci_sleep.duration = cpu_to_le16(0x640);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) tci_sleep.host_duration = cpu_to_le16(0x640);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) tci_sleep.host_wakeup_pin = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) tci_sleep.time_compensation = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) skb = __hci_cmd_sync(hdev, 0xfc7a, sizeof(tci_sleep), &tci_sleep,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) HCI_INIT_TIMEOUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) if (IS_ERR(skb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) err = PTR_ERR(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) bt_dev_err(hdev, "Failed to apply low power setting (%d)", err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) rettime = ktime_get();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) delta = ktime_sub(rettime, calltime);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) duration = (unsigned long long)ktime_to_ns(delta) >> 10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) pm_runtime_set_autosuspend_delay(bdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) MTKBTSDIO_AUTOSUSPEND_DELAY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) pm_runtime_use_autosuspend(bdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) err = pm_runtime_set_active(bdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) /* Default forbid runtime auto suspend, that can be allowed by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) * enable_autosuspend flag or the PM runtime entry under sysfs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) pm_runtime_forbid(bdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) pm_runtime_enable(bdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) if (enable_autosuspend)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) pm_runtime_allow(bdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) bt_dev_info(hdev, "Device setup in %llu usecs", duration);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) static int btmtksdio_shutdown(struct hci_dev *hdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) struct btmtksdio_dev *bdev = hci_get_drvdata(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) struct btmtk_hci_wmt_params wmt_params;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) u8 param = 0x0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) /* Get back the state to be consistent with the state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) * in btmtksdio_setup.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) pm_runtime_get_sync(bdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) /* Disable the device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) wmt_params.op = MTK_WMT_FUNC_CTRL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) wmt_params.flag = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) wmt_params.dlen = sizeof(param);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) wmt_params.data = ¶m;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) wmt_params.status = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) err = mtk_hci_wmt_sync(hdev, &wmt_params);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) if (err < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) bt_dev_err(hdev, "Failed to send wmt func ctrl (%d)", err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) pm_runtime_put_noidle(bdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) pm_runtime_disable(bdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) static int btmtksdio_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) struct btmtksdio_dev *bdev = hci_get_drvdata(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) switch (hci_skb_pkt_type(skb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) case HCI_COMMAND_PKT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) hdev->stat.cmd_tx++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) case HCI_ACLDATA_PKT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) hdev->stat.acl_tx++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) case HCI_SCODATA_PKT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) hdev->stat.sco_tx++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) return -EILSEQ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) skb_queue_tail(&bdev->txq, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) schedule_work(&bdev->tx_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) static int btmtksdio_probe(struct sdio_func *func,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) const struct sdio_device_id *id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) struct btmtksdio_dev *bdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) struct hci_dev *hdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) bdev = devm_kzalloc(&func->dev, sizeof(*bdev), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) if (!bdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) bdev->data = (void *)id->driver_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) if (!bdev->data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) bdev->dev = &func->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) bdev->func = func;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) INIT_WORK(&bdev->tx_work, btmtksdio_tx_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) skb_queue_head_init(&bdev->txq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) /* Initialize and register HCI device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) hdev = hci_alloc_dev();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) if (!hdev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) dev_err(&func->dev, "Can't allocate HCI device\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) bdev->hdev = hdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) hdev->bus = HCI_SDIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) hci_set_drvdata(hdev, bdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) hdev->open = btmtksdio_open;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) hdev->close = btmtksdio_close;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) hdev->flush = btmtksdio_flush;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) hdev->setup = btmtksdio_setup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) hdev->shutdown = btmtksdio_shutdown;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) hdev->send = btmtksdio_send_frame;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) SET_HCIDEV_DEV(hdev, &func->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) hdev->manufacturer = 70;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) set_bit(HCI_QUIRK_NON_PERSISTENT_SETUP, &hdev->quirks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) sdio_set_drvdata(func, bdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) err = hci_register_dev(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) if (err < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) dev_err(&func->dev, "Can't register HCI device\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) hci_free_dev(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) /* pm_runtime_enable would be done after the firmware is being
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) * downloaded because the core layer probably already enables
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) * runtime PM for this func such as the case host->caps &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) * MMC_CAP_POWER_OFF_CARD.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) if (pm_runtime_enabled(bdev->dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) pm_runtime_disable(bdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) /* As explaination in drivers/mmc/core/sdio_bus.c tells us:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) * Unbound SDIO functions are always suspended.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) * During probe, the function is set active and the usage count
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) * is incremented. If the driver supports runtime PM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) * it should call pm_runtime_put_noidle() in its probe routine and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) * pm_runtime_get_noresume() in its remove routine.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) * So, put a pm_runtime_put_noidle here !
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) pm_runtime_put_noidle(bdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) static void btmtksdio_remove(struct sdio_func *func)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) struct btmtksdio_dev *bdev = sdio_get_drvdata(func);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) struct hci_dev *hdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) if (!bdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) /* Be consistent the state in btmtksdio_probe */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) pm_runtime_get_noresume(bdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) hdev = bdev->hdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) sdio_set_drvdata(func, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) hci_unregister_dev(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) hci_free_dev(hdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) #ifdef CONFIG_PM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) static int btmtksdio_runtime_suspend(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) struct sdio_func *func = dev_to_sdio_func(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) struct btmtksdio_dev *bdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) u32 status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) bdev = sdio_get_drvdata(func);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) if (!bdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) sdio_set_host_pm_flags(func, MMC_PM_KEEP_POWER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) sdio_claim_host(bdev->func);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) sdio_writel(bdev->func, C_FW_OWN_REQ_SET, MTK_REG_CHLPCR, &err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) err = readx_poll_timeout(btmtksdio_drv_own_query, bdev, status,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) !(status & C_COM_DRV_OWN), 2000, 1000000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) bt_dev_info(bdev->hdev, "status (%d) return ownership to device", err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) sdio_release_host(bdev->func);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) static int btmtksdio_runtime_resume(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) struct sdio_func *func = dev_to_sdio_func(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) struct btmtksdio_dev *bdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) u32 status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) bdev = sdio_get_drvdata(func);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) if (!bdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) sdio_claim_host(bdev->func);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) sdio_writel(bdev->func, C_FW_OWN_REQ_CLR, MTK_REG_CHLPCR, &err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) err = readx_poll_timeout(btmtksdio_drv_own_query, bdev, status,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) status & C_COM_DRV_OWN, 2000, 1000000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) bt_dev_info(bdev->hdev, "status (%d) get ownership from device", err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) sdio_release_host(bdev->func);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) static UNIVERSAL_DEV_PM_OPS(btmtksdio_pm_ops, btmtksdio_runtime_suspend,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) btmtksdio_runtime_resume, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) #define BTMTKSDIO_PM_OPS (&btmtksdio_pm_ops)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) #else /* CONFIG_PM */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) #define BTMTKSDIO_PM_OPS NULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) #endif /* CONFIG_PM */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) static struct sdio_driver btmtksdio_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) .name = "btmtksdio",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) .probe = btmtksdio_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) .remove = btmtksdio_remove,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) .id_table = btmtksdio_table,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) .drv = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) .owner = THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) .pm = BTMTKSDIO_PM_OPS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) module_sdio_driver(btmtksdio_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) module_param(enable_autosuspend, bool, 0644);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) MODULE_PARM_DESC(enable_autosuspend, "Enable autosuspend by default");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) MODULE_AUTHOR("Sean Wang <sean.wang@mediatek.com>");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) MODULE_DESCRIPTION("MediaTek Bluetooth SDIO driver ver " VERSION);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) MODULE_VERSION(VERSION);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) MODULE_LICENSE("GPL");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) MODULE_FIRMWARE(FIRMWARE_MT7663);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) MODULE_FIRMWARE(FIRMWARE_MT7668);